]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.9-2.6.32.57-201203012152.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9-2.6.32.57-201203012152.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index e1efc40..47f0daf 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -1,15 +1,19 @@
6 *.a
7 *.aux
8 *.bin
9 +*.cis
10 *.cpio
11 *.csp
12 +*.dbg
13 *.dsp
14 *.dvi
15 *.elf
16 *.eps
17 *.fw
18 +*.gcno
19 *.gen.S
20 *.gif
21 +*.gmo
22 *.grep
23 *.grp
24 *.gz
25 @@ -38,8 +42,10 @@
26 *.tab.h
27 *.tex
28 *.ver
29 +*.vim
30 *.xml
31 *_MODULES
32 +*_reg_safe.h
33 *_vga16.c
34 *~
35 *.9
36 @@ -49,11 +55,16 @@
37 53c700_d.h
38 CVS
39 ChangeSet
40 +GPATH
41 +GRTAGS
42 +GSYMS
43 +GTAGS
44 Image
45 Kerntypes
46 Module.markers
47 Module.symvers
48 PENDING
49 +PERF*
50 SCCS
51 System.map*
52 TAGS
53 @@ -76,7 +87,11 @@ btfixupprep
54 build
55 bvmlinux
56 bzImage*
57 +capability_names.h
58 +capflags.c
59 classlist.h*
60 +clut_vga16.c
61 +common-cmds.h
62 comp*.log
63 compile.h*
64 conf
65 @@ -84,6 +99,8 @@ config
66 config-*
67 config_data.h*
68 config_data.gz*
69 +config.c
70 +config.tmp
71 conmakehash
72 consolemap_deftbl.c*
73 cpustr.h
74 @@ -97,19 +114,23 @@ elfconfig.h*
75 fixdep
76 fore200e_mkfirm
77 fore200e_pca_fw.c*
78 +gate.lds
79 gconf
80 gen-devlist
81 gen_crc32table
82 gen_init_cpio
83 genksyms
84 *_gray256.c
85 +hash
86 +hid-example
87 ihex2fw
88 ikconfig.h*
89 initramfs_data.cpio
90 +initramfs_data.cpio.bz2
91 initramfs_data.cpio.gz
92 initramfs_list
93 kallsyms
94 -kconfig
95 +kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99 @@ -127,13 +148,16 @@ machtypes.h
100 map
101 maui_boot.h
102 mconf
103 +mdp
104 miboot*
105 mk_elfconfig
106 mkboot
107 mkbugboot
108 mkcpustr
109 mkdep
110 +mkpiggy
111 mkprep
112 +mkregtable
113 mktables
114 mktree
115 modpost
116 @@ -149,6 +173,7 @@ patches*
117 pca200e.bin
118 pca200e_ecd.bin2
119 piggy.gz
120 +piggy.S
121 piggyback
122 pnmtologo
123 ppc_defs.h*
124 @@ -157,12 +182,15 @@ qconf
125 raid6altivec*.c
126 raid6int*.c
127 raid6tables.c
128 +regdb.c
129 relocs
130 +rlim_names.h
131 series
132 setup
133 setup.bin
134 setup.elf
135 sImage
136 +slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140 @@ -171,6 +199,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144 +user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148 @@ -186,14 +215,20 @@ version.h*
149 vmlinux
150 vmlinux-*
151 vmlinux.aout
152 +vmlinux.bin.all
153 +vmlinux.bin.bz2
154 vmlinux.lds
155 +vmlinux.relocs
156 +voffset.h
157 vsyscall.lds
158 vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162 +utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168 +zoffset.h
169 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
170 index c840e7d..f4c451c 100644
171 --- a/Documentation/kernel-parameters.txt
172 +++ b/Documentation/kernel-parameters.txt
173 @@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters. It is defined in the file
174 the specified number of seconds. This is to be used if
175 your oopses keep scrolling off the screen.
176
177 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
178 + virtualization environments that don't cope well with the
179 + expand down segment used by UDEREF on X86-32 or the frequent
180 + page table updates on X86-64.
181 +
182 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
183 +
184 pcbit= [HW,ISDN]
185
186 pcd. [PARIDE]
187 diff --git a/Makefile b/Makefile
188 index 3377650..095e46d 100644
189 --- a/Makefile
190 +++ b/Makefile
191 @@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
192
193 HOSTCC = gcc
194 HOSTCXX = g++
195 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
196 -HOSTCXXFLAGS = -O2
197 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
198 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
199 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
200
201 # Decide whether to build built-in, modular, or both.
202 # Normally, just do built-in.
203 @@ -376,8 +377,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
204 # Rules shared between *config targets and build targets
205
206 # Basic helpers built in scripts/
207 -PHONY += scripts_basic
208 -scripts_basic:
209 +PHONY += scripts_basic gcc-plugins
210 +scripts_basic: gcc-plugins
211 $(Q)$(MAKE) $(build)=scripts/basic
212
213 # To avoid any implicit rule to kick in, define an empty command.
214 @@ -403,7 +404,7 @@ endif
215 # of make so .config is not included in this case either (for *config).
216
217 no-dot-config-targets := clean mrproper distclean \
218 - cscope TAGS tags help %docs check% \
219 + cscope gtags TAGS tags help %docs check% \
220 include/linux/version.h headers_% \
221 kernelrelease kernelversion
222
223 @@ -526,6 +527,48 @@ else
224 KBUILD_CFLAGS += -O2
225 endif
226
227 +ifndef DISABLE_PAX_PLUGINS
228 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
229 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
230 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
231 +endif
232 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
233 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
234 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
235 +endif
236 +ifdef CONFIG_KALLOCSTAT_PLUGIN
237 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
238 +endif
239 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
240 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
241 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
242 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
243 +endif
244 +ifdef CONFIG_CHECKER_PLUGIN
245 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
246 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
247 +endif
248 +endif
249 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS) $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS)
250 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
251 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
252 +ifeq ($(KBUILD_EXTMOD),)
253 +gcc-plugins:
254 + $(Q)$(MAKE) $(build)=tools/gcc
255 +else
256 +gcc-plugins: ;
257 +endif
258 +else
259 +gcc-plugins:
260 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
261 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
262 +else
263 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
264 +endif
265 + $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
266 +endif
267 +endif
268 +
269 include $(srctree)/arch/$(SRCARCH)/Makefile
270
271 ifneq ($(CONFIG_FRAME_WARN),0)
272 @@ -647,7 +690,7 @@ export mod_strip_cmd
273
274
275 ifeq ($(KBUILD_EXTMOD),)
276 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
277 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
278
279 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
280 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
281 @@ -868,6 +911,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
282
283 # The actual objects are generated when descending,
284 # make sure no implicit rule kicks in
285 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
286 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
287 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
288
289 # Handle descending into subdirectories listed in $(vmlinux-dirs)
290 @@ -877,7 +922,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
291 # Error messages still appears in the original language
292
293 PHONY += $(vmlinux-dirs)
294 -$(vmlinux-dirs): prepare scripts
295 +$(vmlinux-dirs): gcc-plugins prepare scripts
296 $(Q)$(MAKE) $(build)=$@
297
298 # Build the kernel release string
299 @@ -986,6 +1031,7 @@ prepare0: archprepare FORCE
300 $(Q)$(MAKE) $(build)=. missing-syscalls
301
302 # All the preparing..
303 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
304 prepare: prepare0
305
306 # The asm symlink changes when $(ARCH) changes.
307 @@ -1127,6 +1173,8 @@ all: modules
308 # using awk while concatenating to the final file.
309
310 PHONY += modules
311 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
312 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
313 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
314 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
315 @$(kecho) ' Building modules, stage 2.';
316 @@ -1136,7 +1184,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
317
318 # Target to prepare building external modules
319 PHONY += modules_prepare
320 -modules_prepare: prepare scripts
321 +modules_prepare: gcc-plugins prepare scripts
322
323 # Target to install modules
324 PHONY += modules_install
325 @@ -1201,7 +1249,7 @@ MRPROPER_FILES += .config .config.old include/asm .version .old_version \
326 include/linux/autoconf.h include/linux/version.h \
327 include/linux/utsrelease.h \
328 include/linux/bounds.h include/asm*/asm-offsets.h \
329 - Module.symvers Module.markers tags TAGS cscope*
330 + Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
331
332 # clean - Delete most, but leave enough to build external modules
333 #
334 @@ -1245,7 +1293,7 @@ distclean: mrproper
335 @find $(srctree) $(RCS_FIND_IGNORE) \
336 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
337 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
338 - -o -name '.*.rej' -o -size 0 \
339 + -o -name '.*.rej' -o -name '*.so' -o -size 0 \
340 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
341 -type f -print | xargs rm -f
342
343 @@ -1292,6 +1340,7 @@ help:
344 @echo ' modules_prepare - Set up for building external modules'
345 @echo ' tags/TAGS - Generate tags file for editors'
346 @echo ' cscope - Generate cscope index'
347 + @echo ' gtags - Generate GNU GLOBAL index'
348 @echo ' kernelrelease - Output the release version string'
349 @echo ' kernelversion - Output the version stored in Makefile'
350 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
351 @@ -1393,6 +1442,8 @@ PHONY += $(module-dirs) modules
352 $(module-dirs): crmodverdir $(objtree)/Module.symvers
353 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
354
355 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
356 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
357 modules: $(module-dirs)
358 @$(kecho) ' Building modules, stage 2.';
359 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
360 @@ -1448,7 +1499,7 @@ endif # KBUILD_EXTMOD
361 quiet_cmd_tags = GEN $@
362 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
363
364 -tags TAGS cscope: FORCE
365 +tags TAGS cscope gtags: FORCE
366 $(call cmd,tags)
367
368 # Scripts to check various things for consistency
369 @@ -1513,17 +1564,21 @@ else
370 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
371 endif
372
373 -%.s: %.c prepare scripts FORCE
374 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
375 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
376 +%.s: %.c gcc-plugins prepare scripts FORCE
377 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
378 %.i: %.c prepare scripts FORCE
379 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
380 -%.o: %.c prepare scripts FORCE
381 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
382 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
383 +%.o: %.c gcc-plugins prepare scripts FORCE
384 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
385 %.lst: %.c prepare scripts FORCE
386 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
387 -%.s: %.S prepare scripts FORCE
388 +%.s: %.S gcc-plugins prepare scripts FORCE
389 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
390 -%.o: %.S prepare scripts FORCE
391 +%.o: %.S gcc-plugins prepare scripts FORCE
392 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
393 %.symtypes: %.c prepare scripts FORCE
394 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
395 @@ -1533,11 +1588,15 @@ endif
396 $(cmd_crmodverdir)
397 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
398 $(build)=$(build-dir)
399 -%/: prepare scripts FORCE
400 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
401 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
402 +%/: gcc-plugins prepare scripts FORCE
403 $(cmd_crmodverdir)
404 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
405 $(build)=$(build-dir)
406 -%.ko: prepare scripts FORCE
407 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
408 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
409 +%.ko: gcc-plugins prepare scripts FORCE
410 $(cmd_crmodverdir)
411 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
412 $(build)=$(build-dir) $(@:.ko=.o)
413 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
414 index 610dff4..f396854 100644
415 --- a/arch/alpha/include/asm/atomic.h
416 +++ b/arch/alpha/include/asm/atomic.h
417 @@ -251,6 +251,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
418 #define atomic_dec(v) atomic_sub(1,(v))
419 #define atomic64_dec(v) atomic64_sub(1,(v))
420
421 +#define atomic64_read_unchecked(v) atomic64_read(v)
422 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
423 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
424 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
425 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
426 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
427 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
428 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
429 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
430 +
431 #define smp_mb__before_atomic_dec() smp_mb()
432 #define smp_mb__after_atomic_dec() smp_mb()
433 #define smp_mb__before_atomic_inc() smp_mb()
434 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
435 index 5c75c1b..c82f878 100644
436 --- a/arch/alpha/include/asm/elf.h
437 +++ b/arch/alpha/include/asm/elf.h
438 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
439
440 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
441
442 +#ifdef CONFIG_PAX_ASLR
443 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
444 +
445 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
446 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
447 +#endif
448 +
449 /* $0 is set by ld.so to a pointer to a function which might be
450 registered using atexit. This provides a mean for the dynamic
451 linker to call DT_FINI functions for shared libraries that have
452 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
453 index 3f0c59f..cf1e100 100644
454 --- a/arch/alpha/include/asm/pgtable.h
455 +++ b/arch/alpha/include/asm/pgtable.h
456 @@ -101,6 +101,17 @@ struct vm_area_struct;
457 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
458 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
459 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
460 +
461 +#ifdef CONFIG_PAX_PAGEEXEC
462 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
463 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
464 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
465 +#else
466 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
467 +# define PAGE_COPY_NOEXEC PAGE_COPY
468 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
469 +#endif
470 +
471 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
472
473 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
474 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
475 index ebc3c89..20cfa63 100644
476 --- a/arch/alpha/kernel/module.c
477 +++ b/arch/alpha/kernel/module.c
478 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
479
480 /* The small sections were sorted to the end of the segment.
481 The following should definitely cover them. */
482 - gp = (u64)me->module_core + me->core_size - 0x8000;
483 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
484 got = sechdrs[me->arch.gotsecindex].sh_addr;
485
486 for (i = 0; i < n; i++) {
487 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
488 index a94e49c..d71dd44 100644
489 --- a/arch/alpha/kernel/osf_sys.c
490 +++ b/arch/alpha/kernel/osf_sys.c
491 @@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
492 /* At this point: (!vma || addr < vma->vm_end). */
493 if (limit - len < addr)
494 return -ENOMEM;
495 - if (!vma || addr + len <= vma->vm_start)
496 + if (check_heap_stack_gap(vma, addr, len))
497 return addr;
498 addr = vma->vm_end;
499 vma = vma->vm_next;
500 @@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
501 merely specific addresses, but regions of memory -- perhaps
502 this feature should be incorporated into all ports? */
503
504 +#ifdef CONFIG_PAX_RANDMMAP
505 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
506 +#endif
507 +
508 if (addr) {
509 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
510 if (addr != (unsigned long) -ENOMEM)
511 @@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
512 }
513
514 /* Next, try allocating at TASK_UNMAPPED_BASE. */
515 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
516 - len, limit);
517 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
518 +
519 if (addr != (unsigned long) -ENOMEM)
520 return addr;
521
522 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
523 index 00a31de..2ded0f2 100644
524 --- a/arch/alpha/mm/fault.c
525 +++ b/arch/alpha/mm/fault.c
526 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
527 __reload_thread(pcb);
528 }
529
530 +#ifdef CONFIG_PAX_PAGEEXEC
531 +/*
532 + * PaX: decide what to do with offenders (regs->pc = fault address)
533 + *
534 + * returns 1 when task should be killed
535 + * 2 when patched PLT trampoline was detected
536 + * 3 when unpatched PLT trampoline was detected
537 + */
538 +static int pax_handle_fetch_fault(struct pt_regs *regs)
539 +{
540 +
541 +#ifdef CONFIG_PAX_EMUPLT
542 + int err;
543 +
544 + do { /* PaX: patched PLT emulation #1 */
545 + unsigned int ldah, ldq, jmp;
546 +
547 + err = get_user(ldah, (unsigned int *)regs->pc);
548 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
549 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
550 +
551 + if (err)
552 + break;
553 +
554 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
555 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
556 + jmp == 0x6BFB0000U)
557 + {
558 + unsigned long r27, addr;
559 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
560 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
561 +
562 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
563 + err = get_user(r27, (unsigned long *)addr);
564 + if (err)
565 + break;
566 +
567 + regs->r27 = r27;
568 + regs->pc = r27;
569 + return 2;
570 + }
571 + } while (0);
572 +
573 + do { /* PaX: patched PLT emulation #2 */
574 + unsigned int ldah, lda, br;
575 +
576 + err = get_user(ldah, (unsigned int *)regs->pc);
577 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
578 + err |= get_user(br, (unsigned int *)(regs->pc+8));
579 +
580 + if (err)
581 + break;
582 +
583 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
584 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
585 + (br & 0xFFE00000U) == 0xC3E00000U)
586 + {
587 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
588 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
589 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
590 +
591 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
592 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
593 + return 2;
594 + }
595 + } while (0);
596 +
597 + do { /* PaX: unpatched PLT emulation */
598 + unsigned int br;
599 +
600 + err = get_user(br, (unsigned int *)regs->pc);
601 +
602 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
603 + unsigned int br2, ldq, nop, jmp;
604 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
605 +
606 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
607 + err = get_user(br2, (unsigned int *)addr);
608 + err |= get_user(ldq, (unsigned int *)(addr+4));
609 + err |= get_user(nop, (unsigned int *)(addr+8));
610 + err |= get_user(jmp, (unsigned int *)(addr+12));
611 + err |= get_user(resolver, (unsigned long *)(addr+16));
612 +
613 + if (err)
614 + break;
615 +
616 + if (br2 == 0xC3600000U &&
617 + ldq == 0xA77B000CU &&
618 + nop == 0x47FF041FU &&
619 + jmp == 0x6B7B0000U)
620 + {
621 + regs->r28 = regs->pc+4;
622 + regs->r27 = addr+16;
623 + regs->pc = resolver;
624 + return 3;
625 + }
626 + }
627 + } while (0);
628 +#endif
629 +
630 + return 1;
631 +}
632 +
633 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
634 +{
635 + unsigned long i;
636 +
637 + printk(KERN_ERR "PAX: bytes at PC: ");
638 + for (i = 0; i < 5; i++) {
639 + unsigned int c;
640 + if (get_user(c, (unsigned int *)pc+i))
641 + printk(KERN_CONT "???????? ");
642 + else
643 + printk(KERN_CONT "%08x ", c);
644 + }
645 + printk("\n");
646 +}
647 +#endif
648
649 /*
650 * This routine handles page faults. It determines the address,
651 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
652 good_area:
653 si_code = SEGV_ACCERR;
654 if (cause < 0) {
655 - if (!(vma->vm_flags & VM_EXEC))
656 + if (!(vma->vm_flags & VM_EXEC)) {
657 +
658 +#ifdef CONFIG_PAX_PAGEEXEC
659 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
660 + goto bad_area;
661 +
662 + up_read(&mm->mmap_sem);
663 + switch (pax_handle_fetch_fault(regs)) {
664 +
665 +#ifdef CONFIG_PAX_EMUPLT
666 + case 2:
667 + case 3:
668 + return;
669 +#endif
670 +
671 + }
672 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
673 + do_group_exit(SIGKILL);
674 +#else
675 goto bad_area;
676 +#endif
677 +
678 + }
679 } else if (!cause) {
680 /* Allow reads even for write-only mappings */
681 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
682 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
683 index b68faef..6dd1496 100644
684 --- a/arch/arm/Kconfig
685 +++ b/arch/arm/Kconfig
686 @@ -14,6 +14,7 @@ config ARM
687 select SYS_SUPPORTS_APM_EMULATION
688 select HAVE_OPROFILE
689 select HAVE_ARCH_KGDB
690 + select GENERIC_ATOMIC64
691 select HAVE_KPROBES if (!XIP_KERNEL)
692 select HAVE_KRETPROBES if (HAVE_KPROBES)
693 select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
694 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
695 index d0daeab..ff286a8 100644
696 --- a/arch/arm/include/asm/atomic.h
697 +++ b/arch/arm/include/asm/atomic.h
698 @@ -15,6 +15,10 @@
699 #include <linux/types.h>
700 #include <asm/system.h>
701
702 +#ifdef CONFIG_GENERIC_ATOMIC64
703 +#include <asm-generic/atomic64.h>
704 +#endif
705 +
706 #define ATOMIC_INIT(i) { (i) }
707
708 #ifdef __KERNEL__
709 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
710 index 6aac3f5..265536b 100644
711 --- a/arch/arm/include/asm/elf.h
712 +++ b/arch/arm/include/asm/elf.h
713 @@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
714 the loader. We need to make sure that it is out of the way of the program
715 that it will "exec", and that there is sufficient room for the brk. */
716
717 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
718 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
719 +
720 +#ifdef CONFIG_PAX_ASLR
721 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
722 +
723 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
724 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
725 +#endif
726
727 /* When the program starts, a1 contains a pointer to a function to be
728 registered with atexit, as per the SVR4 ABI. A value of 0 means we
729 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
730 index c019949..388fdd1 100644
731 --- a/arch/arm/include/asm/kmap_types.h
732 +++ b/arch/arm/include/asm/kmap_types.h
733 @@ -19,6 +19,7 @@ enum km_type {
734 KM_SOFTIRQ0,
735 KM_SOFTIRQ1,
736 KM_L2_CACHE,
737 + KM_CLEARPAGE,
738 KM_TYPE_NR
739 };
740
741 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
742 index 1d6bd40..fba0cb9 100644
743 --- a/arch/arm/include/asm/uaccess.h
744 +++ b/arch/arm/include/asm/uaccess.h
745 @@ -22,6 +22,8 @@
746 #define VERIFY_READ 0
747 #define VERIFY_WRITE 1
748
749 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
750 +
751 /*
752 * The exception table consists of pairs of addresses: the first is the
753 * address of an instruction that is allowed to fault, and the second is
754 @@ -387,8 +389,23 @@ do { \
755
756
757 #ifdef CONFIG_MMU
758 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
759 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
760 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
761 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
762 +
763 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
764 +{
765 + if (!__builtin_constant_p(n))
766 + check_object_size(to, n, false);
767 + return ___copy_from_user(to, from, n);
768 +}
769 +
770 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
771 +{
772 + if (!__builtin_constant_p(n))
773 + check_object_size(from, n, true);
774 + return ___copy_to_user(to, from, n);
775 +}
776 +
777 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
778 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
779 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
780 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
781
782 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
783 {
784 + if ((long)n < 0)
785 + return n;
786 +
787 if (access_ok(VERIFY_READ, from, n))
788 n = __copy_from_user(to, from, n);
789 else /* security hole - plug it */
790 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
791
792 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
793 {
794 + if ((long)n < 0)
795 + return n;
796 +
797 if (access_ok(VERIFY_WRITE, to, n))
798 n = __copy_to_user(to, from, n);
799 return n;
800 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
801 index 0e62770..e2c2cd6 100644
802 --- a/arch/arm/kernel/armksyms.c
803 +++ b/arch/arm/kernel/armksyms.c
804 @@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
805 #ifdef CONFIG_MMU
806 EXPORT_SYMBOL(copy_page);
807
808 -EXPORT_SYMBOL(__copy_from_user);
809 -EXPORT_SYMBOL(__copy_to_user);
810 +EXPORT_SYMBOL(___copy_from_user);
811 +EXPORT_SYMBOL(___copy_to_user);
812 EXPORT_SYMBOL(__clear_user);
813
814 EXPORT_SYMBOL(__get_user_1);
815 diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
816 index ba8ccfe..2dc34dc 100644
817 --- a/arch/arm/kernel/kgdb.c
818 +++ b/arch/arm/kernel/kgdb.c
819 @@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
820 * and we handle the normal undef case within the do_undefinstr
821 * handler.
822 */
823 -struct kgdb_arch arch_kgdb_ops = {
824 +const struct kgdb_arch arch_kgdb_ops = {
825 #ifndef __ARMEB__
826 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
827 #else /* ! __ARMEB__ */
828 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
829 index 3f361a7..6e806e1 100644
830 --- a/arch/arm/kernel/traps.c
831 +++ b/arch/arm/kernel/traps.c
832 @@ -247,6 +247,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
833
834 DEFINE_SPINLOCK(die_lock);
835
836 +extern void gr_handle_kernel_exploit(void);
837 +
838 /*
839 * This function is protected against re-entrancy.
840 */
841 @@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
842 if (panic_on_oops)
843 panic("Fatal exception");
844
845 + gr_handle_kernel_exploit();
846 +
847 do_exit(SIGSEGV);
848 }
849
850 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
851 index e4fe124..0fc246b 100644
852 --- a/arch/arm/lib/copy_from_user.S
853 +++ b/arch/arm/lib/copy_from_user.S
854 @@ -16,7 +16,7 @@
855 /*
856 * Prototype:
857 *
858 - * size_t __copy_from_user(void *to, const void *from, size_t n)
859 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
860 *
861 * Purpose:
862 *
863 @@ -84,11 +84,11 @@
864
865 .text
866
867 -ENTRY(__copy_from_user)
868 +ENTRY(___copy_from_user)
869
870 #include "copy_template.S"
871
872 -ENDPROC(__copy_from_user)
873 +ENDPROC(___copy_from_user)
874
875 .section .fixup,"ax"
876 .align 0
877 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
878 index 1a71e15..ac7b258 100644
879 --- a/arch/arm/lib/copy_to_user.S
880 +++ b/arch/arm/lib/copy_to_user.S
881 @@ -16,7 +16,7 @@
882 /*
883 * Prototype:
884 *
885 - * size_t __copy_to_user(void *to, const void *from, size_t n)
886 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
887 *
888 * Purpose:
889 *
890 @@ -88,11 +88,11 @@
891 .text
892
893 ENTRY(__copy_to_user_std)
894 -WEAK(__copy_to_user)
895 +WEAK(___copy_to_user)
896
897 #include "copy_template.S"
898
899 -ENDPROC(__copy_to_user)
900 +ENDPROC(___copy_to_user)
901
902 .section .fixup,"ax"
903 .align 0
904 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
905 index ffdd274..91017b6 100644
906 --- a/arch/arm/lib/uaccess.S
907 +++ b/arch/arm/lib/uaccess.S
908 @@ -19,7 +19,7 @@
909
910 #define PAGE_SHIFT 12
911
912 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
913 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
914 * Purpose : copy a block to user memory from kernel memory
915 * Params : to - user memory
916 * : from - kernel memory
917 @@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
918 sub r2, r2, ip
919 b .Lc2u_dest_aligned
920
921 -ENTRY(__copy_to_user)
922 +ENTRY(___copy_to_user)
923 stmfd sp!, {r2, r4 - r7, lr}
924 cmp r2, #4
925 blt .Lc2u_not_enough
926 @@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fault
927 ldrgtb r3, [r1], #0
928 USER( strgtbt r3, [r0], #1) @ May fault
929 b .Lc2u_finished
930 -ENDPROC(__copy_to_user)
931 +ENDPROC(___copy_to_user)
932
933 .section .fixup,"ax"
934 .align 0
935 9001: ldmfd sp!, {r0, r4 - r7, pc}
936 .previous
937
938 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
939 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
940 * Purpose : copy a block from user memory to kernel memory
941 * Params : to - kernel memory
942 * : from - user memory
943 @@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
944 sub r2, r2, ip
945 b .Lcfu_dest_aligned
946
947 -ENTRY(__copy_from_user)
948 +ENTRY(___copy_from_user)
949 stmfd sp!, {r0, r2, r4 - r7, lr}
950 cmp r2, #4
951 blt .Lcfu_not_enough
952 @@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fault
953 USER( ldrgtbt r3, [r1], #1) @ May fault
954 strgtb r3, [r0], #1
955 b .Lcfu_finished
956 -ENDPROC(__copy_from_user)
957 +ENDPROC(___copy_from_user)
958
959 .section .fixup,"ax"
960 .align 0
961 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
962 index 6b967ff..67d5b2b 100644
963 --- a/arch/arm/lib/uaccess_with_memcpy.c
964 +++ b/arch/arm/lib/uaccess_with_memcpy.c
965 @@ -97,7 +97,7 @@ out:
966 }
967
968 unsigned long
969 -__copy_to_user(void __user *to, const void *from, unsigned long n)
970 +___copy_to_user(void __user *to, const void *from, unsigned long n)
971 {
972 /*
973 * This test is stubbed out of the main function above to keep
974 diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
975 index 4028724..beec230 100644
976 --- a/arch/arm/mach-at91/pm.c
977 +++ b/arch/arm/mach-at91/pm.c
978 @@ -348,7 +348,7 @@ static void at91_pm_end(void)
979 }
980
981
982 -static struct platform_suspend_ops at91_pm_ops ={
983 +static const struct platform_suspend_ops at91_pm_ops ={
984 .valid = at91_pm_valid_state,
985 .begin = at91_pm_begin,
986 .enter = at91_pm_enter,
987 diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
988 index 5218943..0a34552 100644
989 --- a/arch/arm/mach-omap1/pm.c
990 +++ b/arch/arm/mach-omap1/pm.c
991 @@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq = {
992
993
994
995 -static struct platform_suspend_ops omap_pm_ops ={
996 +static const struct platform_suspend_ops omap_pm_ops ={
997 .prepare = omap_pm_prepare,
998 .enter = omap_pm_enter,
999 .finish = omap_pm_finish,
1000 diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
1001 index bff5c4e..d4c649b 100644
1002 --- a/arch/arm/mach-omap2/pm24xx.c
1003 +++ b/arch/arm/mach-omap2/pm24xx.c
1004 @@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
1005 enable_hlt();
1006 }
1007
1008 -static struct platform_suspend_ops omap_pm_ops = {
1009 +static const struct platform_suspend_ops omap_pm_ops = {
1010 .prepare = omap2_pm_prepare,
1011 .enter = omap2_pm_enter,
1012 .finish = omap2_pm_finish,
1013 diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
1014 index 8946319..7d3e661 100644
1015 --- a/arch/arm/mach-omap2/pm34xx.c
1016 +++ b/arch/arm/mach-omap2/pm34xx.c
1017 @@ -401,7 +401,7 @@ static void omap3_pm_end(void)
1018 return;
1019 }
1020
1021 -static struct platform_suspend_ops omap_pm_ops = {
1022 +static const struct platform_suspend_ops omap_pm_ops = {
1023 .begin = omap3_pm_begin,
1024 .end = omap3_pm_end,
1025 .prepare = omap3_pm_prepare,
1026 diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
1027 index b3d8d53..6e68ebc 100644
1028 --- a/arch/arm/mach-pnx4008/pm.c
1029 +++ b/arch/arm/mach-pnx4008/pm.c
1030 @@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_state_t state)
1031 (state == PM_SUSPEND_MEM);
1032 }
1033
1034 -static struct platform_suspend_ops pnx4008_pm_ops = {
1035 +static const struct platform_suspend_ops pnx4008_pm_ops = {
1036 .enter = pnx4008_pm_enter,
1037 .valid = pnx4008_pm_valid,
1038 };
1039 diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
1040 index 7693355..9beb00a 100644
1041 --- a/arch/arm/mach-pxa/pm.c
1042 +++ b/arch/arm/mach-pxa/pm.c
1043 @@ -95,7 +95,7 @@ void pxa_pm_finish(void)
1044 pxa_cpu_pm_fns->finish();
1045 }
1046
1047 -static struct platform_suspend_ops pxa_pm_ops = {
1048 +static const struct platform_suspend_ops pxa_pm_ops = {
1049 .valid = pxa_pm_valid,
1050 .enter = pxa_pm_enter,
1051 .prepare = pxa_pm_prepare,
1052 diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
1053 index 629e05d..06be589 100644
1054 --- a/arch/arm/mach-pxa/sharpsl_pm.c
1055 +++ b/arch/arm/mach-pxa/sharpsl_pm.c
1056 @@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info)
1057 }
1058
1059 #ifdef CONFIG_PM
1060 -static struct platform_suspend_ops sharpsl_pm_ops = {
1061 +static const struct platform_suspend_ops sharpsl_pm_ops = {
1062 .prepare = pxa_pm_prepare,
1063 .finish = pxa_pm_finish,
1064 .enter = corgi_pxa_pm_enter,
1065 diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
1066 index c83fdc8..ab9fc44 100644
1067 --- a/arch/arm/mach-sa1100/pm.c
1068 +++ b/arch/arm/mach-sa1100/pm.c
1069 @@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
1070 return virt_to_phys(sp);
1071 }
1072
1073 -static struct platform_suspend_ops sa11x0_pm_ops = {
1074 +static const struct platform_suspend_ops sa11x0_pm_ops = {
1075 .enter = sa11x0_pm_enter,
1076 .valid = suspend_valid_only_mem,
1077 };
1078 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1079 index 3191cd6..c0739db 100644
1080 --- a/arch/arm/mm/fault.c
1081 +++ b/arch/arm/mm/fault.c
1082 @@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1083 }
1084 #endif
1085
1086 +#ifdef CONFIG_PAX_PAGEEXEC
1087 + if (fsr & FSR_LNX_PF) {
1088 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1089 + do_group_exit(SIGKILL);
1090 + }
1091 +#endif
1092 +
1093 tsk->thread.address = addr;
1094 tsk->thread.error_code = fsr;
1095 tsk->thread.trap_no = 14;
1096 @@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1097 }
1098 #endif /* CONFIG_MMU */
1099
1100 +#ifdef CONFIG_PAX_PAGEEXEC
1101 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1102 +{
1103 + long i;
1104 +
1105 + printk(KERN_ERR "PAX: bytes at PC: ");
1106 + for (i = 0; i < 20; i++) {
1107 + unsigned char c;
1108 + if (get_user(c, (__force unsigned char __user *)pc+i))
1109 + printk(KERN_CONT "?? ");
1110 + else
1111 + printk(KERN_CONT "%02x ", c);
1112 + }
1113 + printk("\n");
1114 +
1115 + printk(KERN_ERR "PAX: bytes at SP-4: ");
1116 + for (i = -1; i < 20; i++) {
1117 + unsigned long c;
1118 + if (get_user(c, (__force unsigned long __user *)sp+i))
1119 + printk(KERN_CONT "???????? ");
1120 + else
1121 + printk(KERN_CONT "%08lx ", c);
1122 + }
1123 + printk("\n");
1124 +}
1125 +#endif
1126 +
1127 /*
1128 * First Level Translation Fault Handler
1129 *
1130 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1131 index f5abc51..7ec524c 100644
1132 --- a/arch/arm/mm/mmap.c
1133 +++ b/arch/arm/mm/mmap.c
1134 @@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1135 if (len > TASK_SIZE)
1136 return -ENOMEM;
1137
1138 +#ifdef CONFIG_PAX_RANDMMAP
1139 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1140 +#endif
1141 +
1142 if (addr) {
1143 if (do_align)
1144 addr = COLOUR_ALIGN(addr, pgoff);
1145 @@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1146 addr = PAGE_ALIGN(addr);
1147
1148 vma = find_vma(mm, addr);
1149 - if (TASK_SIZE - len >= addr &&
1150 - (!vma || addr + len <= vma->vm_start))
1151 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1152 return addr;
1153 }
1154 if (len > mm->cached_hole_size) {
1155 - start_addr = addr = mm->free_area_cache;
1156 + start_addr = addr = mm->free_area_cache;
1157 } else {
1158 - start_addr = addr = TASK_UNMAPPED_BASE;
1159 - mm->cached_hole_size = 0;
1160 + start_addr = addr = mm->mmap_base;
1161 + mm->cached_hole_size = 0;
1162 }
1163
1164 full_search:
1165 @@ -94,14 +97,14 @@ full_search:
1166 * Start a new search - just in case we missed
1167 * some holes.
1168 */
1169 - if (start_addr != TASK_UNMAPPED_BASE) {
1170 - start_addr = addr = TASK_UNMAPPED_BASE;
1171 + if (start_addr != mm->mmap_base) {
1172 + start_addr = addr = mm->mmap_base;
1173 mm->cached_hole_size = 0;
1174 goto full_search;
1175 }
1176 return -ENOMEM;
1177 }
1178 - if (!vma || addr + len <= vma->vm_start) {
1179 + if (check_heap_stack_gap(vma, addr, len)) {
1180 /*
1181 * Remember the place where we stopped the search:
1182 */
1183 diff --git a/arch/arm/plat-s3c/pm.c b/arch/arm/plat-s3c/pm.c
1184 index 8d97db2..b66cfa5 100644
1185 --- a/arch/arm/plat-s3c/pm.c
1186 +++ b/arch/arm/plat-s3c/pm.c
1187 @@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
1188 s3c_pm_check_cleanup();
1189 }
1190
1191 -static struct platform_suspend_ops s3c_pm_ops = {
1192 +static const struct platform_suspend_ops s3c_pm_ops = {
1193 .enter = s3c_pm_enter,
1194 .prepare = s3c_pm_prepare,
1195 .finish = s3c_pm_finish,
1196 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1197 index d5d1d41..856e2ed 100644
1198 --- a/arch/avr32/include/asm/elf.h
1199 +++ b/arch/avr32/include/asm/elf.h
1200 @@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1201 the loader. We need to make sure that it is out of the way of the program
1202 that it will "exec", and that there is sufficient room for the brk. */
1203
1204 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1205 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1206
1207 +#ifdef CONFIG_PAX_ASLR
1208 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1209 +
1210 +#define PAX_DELTA_MMAP_LEN 15
1211 +#define PAX_DELTA_STACK_LEN 15
1212 +#endif
1213
1214 /* This yields a mask that user programs can use to figure out what
1215 instruction set this CPU supports. This could be done in user space,
1216 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1217 index b7f5c68..556135c 100644
1218 --- a/arch/avr32/include/asm/kmap_types.h
1219 +++ b/arch/avr32/include/asm/kmap_types.h
1220 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1221 D(11) KM_IRQ1,
1222 D(12) KM_SOFTIRQ0,
1223 D(13) KM_SOFTIRQ1,
1224 -D(14) KM_TYPE_NR
1225 +D(14) KM_CLEARPAGE,
1226 +D(15) KM_TYPE_NR
1227 };
1228
1229 #undef D
1230 diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
1231 index f021edf..32d680e 100644
1232 --- a/arch/avr32/mach-at32ap/pm.c
1233 +++ b/arch/avr32/mach-at32ap/pm.c
1234 @@ -176,7 +176,7 @@ out:
1235 return 0;
1236 }
1237
1238 -static struct platform_suspend_ops avr32_pm_ops = {
1239 +static const struct platform_suspend_ops avr32_pm_ops = {
1240 .valid = avr32_pm_valid_state,
1241 .enter = avr32_pm_enter,
1242 };
1243 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1244 index b61d86d..e292c7f 100644
1245 --- a/arch/avr32/mm/fault.c
1246 +++ b/arch/avr32/mm/fault.c
1247 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1248
1249 int exception_trace = 1;
1250
1251 +#ifdef CONFIG_PAX_PAGEEXEC
1252 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1253 +{
1254 + unsigned long i;
1255 +
1256 + printk(KERN_ERR "PAX: bytes at PC: ");
1257 + for (i = 0; i < 20; i++) {
1258 + unsigned char c;
1259 + if (get_user(c, (unsigned char *)pc+i))
1260 + printk(KERN_CONT "???????? ");
1261 + else
1262 + printk(KERN_CONT "%02x ", c);
1263 + }
1264 + printk("\n");
1265 +}
1266 +#endif
1267 +
1268 /*
1269 * This routine handles page faults. It determines the address and the
1270 * problem, and then passes it off to one of the appropriate routines.
1271 @@ -157,6 +174,16 @@ bad_area:
1272 up_read(&mm->mmap_sem);
1273
1274 if (user_mode(regs)) {
1275 +
1276 +#ifdef CONFIG_PAX_PAGEEXEC
1277 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1278 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1279 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1280 + do_group_exit(SIGKILL);
1281 + }
1282 + }
1283 +#endif
1284 +
1285 if (exception_trace && printk_ratelimit())
1286 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1287 "sp %08lx ecr %lu\n",
1288 diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
1289 index cce79d0..c406c85 100644
1290 --- a/arch/blackfin/kernel/kgdb.c
1291 +++ b/arch/blackfin/kernel/kgdb.c
1292 @@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vector, int signo,
1293 return -1; /* this means that we do not want to exit from the handler */
1294 }
1295
1296 -struct kgdb_arch arch_kgdb_ops = {
1297 +const struct kgdb_arch arch_kgdb_ops = {
1298 .gdb_bpt_instr = {0xa1},
1299 #ifdef CONFIG_SMP
1300 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
1301 diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
1302 index 8837be4..b2fb413 100644
1303 --- a/arch/blackfin/mach-common/pm.c
1304 +++ b/arch/blackfin/mach-common/pm.c
1305 @@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t state)
1306 return 0;
1307 }
1308
1309 -struct platform_suspend_ops bfin_pm_ops = {
1310 +const struct platform_suspend_ops bfin_pm_ops = {
1311 .enter = bfin_pm_enter,
1312 .valid = bfin_pm_valid,
1313 };
1314 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
1315 index 00a57af..c3ef0cd 100644
1316 --- a/arch/frv/include/asm/atomic.h
1317 +++ b/arch/frv/include/asm/atomic.h
1318 @@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
1319 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
1320 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
1321
1322 +#define atomic64_read_unchecked(v) atomic64_read(v)
1323 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1324 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1325 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1326 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1327 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
1328 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1329 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
1330 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1331 +
1332 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
1333 {
1334 int c, old;
1335 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1336 index f8e16b2..c73ff79 100644
1337 --- a/arch/frv/include/asm/kmap_types.h
1338 +++ b/arch/frv/include/asm/kmap_types.h
1339 @@ -23,6 +23,7 @@ enum km_type {
1340 KM_IRQ1,
1341 KM_SOFTIRQ0,
1342 KM_SOFTIRQ1,
1343 + KM_CLEARPAGE,
1344 KM_TYPE_NR
1345 };
1346
1347 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1348 index 385fd30..6c3d97e 100644
1349 --- a/arch/frv/mm/elf-fdpic.c
1350 +++ b/arch/frv/mm/elf-fdpic.c
1351 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1352 if (addr) {
1353 addr = PAGE_ALIGN(addr);
1354 vma = find_vma(current->mm, addr);
1355 - if (TASK_SIZE - len >= addr &&
1356 - (!vma || addr + len <= vma->vm_start))
1357 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1358 goto success;
1359 }
1360
1361 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1362 for (; vma; vma = vma->vm_next) {
1363 if (addr > limit)
1364 break;
1365 - if (addr + len <= vma->vm_start)
1366 + if (check_heap_stack_gap(vma, addr, len))
1367 goto success;
1368 addr = vma->vm_end;
1369 }
1370 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1371 for (; vma; vma = vma->vm_next) {
1372 if (addr > limit)
1373 break;
1374 - if (addr + len <= vma->vm_start)
1375 + if (check_heap_stack_gap(vma, addr, len))
1376 goto success;
1377 addr = vma->vm_end;
1378 }
1379 diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
1380 index e4a80d8..11a7ea1 100644
1381 --- a/arch/ia64/hp/common/hwsw_iommu.c
1382 +++ b/arch/ia64/hp/common/hwsw_iommu.c
1383 @@ -17,7 +17,7 @@
1384 #include <linux/swiotlb.h>
1385 #include <asm/machvec.h>
1386
1387 -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1388 +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1389
1390 /* swiotlb declarations & definitions: */
1391 extern int swiotlb_late_init_with_default_size (size_t size);
1392 @@ -33,7 +33,7 @@ static inline int use_swiotlb(struct device *dev)
1393 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
1394 }
1395
1396 -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1397 +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1398 {
1399 if (use_swiotlb(dev))
1400 return &swiotlb_dma_ops;
1401 diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
1402 index 01ae69b..35752fd 100644
1403 --- a/arch/ia64/hp/common/sba_iommu.c
1404 +++ b/arch/ia64/hp/common/sba_iommu.c
1405 @@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_driver = {
1406 },
1407 };
1408
1409 -extern struct dma_map_ops swiotlb_dma_ops;
1410 +extern const struct dma_map_ops swiotlb_dma_ops;
1411
1412 static int __init
1413 sba_init(void)
1414 @@ -2211,7 +2211,7 @@ sba_page_override(char *str)
1415
1416 __setup("sbapagesize=",sba_page_override);
1417
1418 -struct dma_map_ops sba_dma_ops = {
1419 +const struct dma_map_ops sba_dma_ops = {
1420 .alloc_coherent = sba_alloc_coherent,
1421 .free_coherent = sba_free_coherent,
1422 .map_page = sba_map_page,
1423 diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
1424 index c69552b..c7122f4 100644
1425 --- a/arch/ia64/ia32/binfmt_elf32.c
1426 +++ b/arch/ia64/ia32/binfmt_elf32.c
1427 @@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_top);
1428
1429 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
1430
1431 +#ifdef CONFIG_PAX_ASLR
1432 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1433 +
1434 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1435 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1436 +#endif
1437 +
1438 /* Ugly but avoids duplication */
1439 #include "../../../fs/binfmt_elf.c"
1440
1441 diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
1442 index 0f15349..26b3429 100644
1443 --- a/arch/ia64/ia32/ia32priv.h
1444 +++ b/arch/ia64/ia32/ia32priv.h
1445 @@ -296,7 +296,14 @@ typedef struct compat_siginfo {
1446 #define ELF_DATA ELFDATA2LSB
1447 #define ELF_ARCH EM_386
1448
1449 -#define IA32_STACK_TOP IA32_PAGE_OFFSET
1450 +#ifdef CONFIG_PAX_RANDUSTACK
1451 +#define __IA32_DELTA_STACK (current->mm->delta_stack)
1452 +#else
1453 +#define __IA32_DELTA_STACK 0UL
1454 +#endif
1455 +
1456 +#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
1457 +
1458 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
1459 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
1460
1461 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
1462 index 88405cb..de5ca5d 100644
1463 --- a/arch/ia64/include/asm/atomic.h
1464 +++ b/arch/ia64/include/asm/atomic.h
1465 @@ -210,6 +210,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
1466 #define atomic64_inc(v) atomic64_add(1, (v))
1467 #define atomic64_dec(v) atomic64_sub(1, (v))
1468
1469 +#define atomic64_read_unchecked(v) atomic64_read(v)
1470 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1471 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1472 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1473 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1474 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
1475 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1476 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
1477 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1478 +
1479 /* Atomic operations are already serializing */
1480 #define smp_mb__before_atomic_dec() barrier()
1481 #define smp_mb__after_atomic_dec() barrier()
1482 diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
1483 index 8d3c79c..71b3af6 100644
1484 --- a/arch/ia64/include/asm/dma-mapping.h
1485 +++ b/arch/ia64/include/asm/dma-mapping.h
1486 @@ -12,7 +12,7 @@
1487
1488 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
1489
1490 -extern struct dma_map_ops *dma_ops;
1491 +extern const struct dma_map_ops *dma_ops;
1492 extern struct ia64_machine_vector ia64_mv;
1493 extern void set_iommu_machvec(void);
1494
1495 @@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
1496 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1497 dma_addr_t *daddr, gfp_t gfp)
1498 {
1499 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1500 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1501 void *caddr;
1502
1503 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
1504 @@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1505 static inline void dma_free_coherent(struct device *dev, size_t size,
1506 void *caddr, dma_addr_t daddr)
1507 {
1508 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1509 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1510 debug_dma_free_coherent(dev, size, caddr, daddr);
1511 ops->free_coherent(dev, size, caddr, daddr);
1512 }
1513 @@ -49,13 +49,13 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
1514
1515 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
1516 {
1517 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1518 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1519 return ops->mapping_error(dev, daddr);
1520 }
1521
1522 static inline int dma_supported(struct device *dev, u64 mask)
1523 {
1524 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1525 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1526 return ops->dma_supported(dev, mask);
1527 }
1528
1529 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1530 index 86eddee..b116bb4 100644
1531 --- a/arch/ia64/include/asm/elf.h
1532 +++ b/arch/ia64/include/asm/elf.h
1533 @@ -43,6 +43,13 @@
1534 */
1535 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1536
1537 +#ifdef CONFIG_PAX_ASLR
1538 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1539 +
1540 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1541 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1542 +#endif
1543 +
1544 #define PT_IA_64_UNWIND 0x70000001
1545
1546 /* IA-64 relocations: */
1547 diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
1548 index 367d299..9ad4279 100644
1549 --- a/arch/ia64/include/asm/machvec.h
1550 +++ b/arch/ia64/include/asm/machvec.h
1551 @@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event_t(void);
1552 /* DMA-mapping interface: */
1553 typedef void ia64_mv_dma_init (void);
1554 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1555 -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1556 +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1557
1558 /*
1559 * WARNING: The legacy I/O space is _architected_. Platforms are
1560 @@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(const char *cmdline);
1561 # endif /* CONFIG_IA64_GENERIC */
1562
1563 extern void swiotlb_dma_init(void);
1564 -extern struct dma_map_ops *dma_get_ops(struct device *);
1565 +extern const struct dma_map_ops *dma_get_ops(struct device *);
1566
1567 /*
1568 * Define default versions so we can extend machvec for new platforms without having
1569 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1570 index 8840a69..cdb63d9 100644
1571 --- a/arch/ia64/include/asm/pgtable.h
1572 +++ b/arch/ia64/include/asm/pgtable.h
1573 @@ -12,7 +12,7 @@
1574 * David Mosberger-Tang <davidm@hpl.hp.com>
1575 */
1576
1577 -
1578 +#include <linux/const.h>
1579 #include <asm/mman.h>
1580 #include <asm/page.h>
1581 #include <asm/processor.h>
1582 @@ -143,6 +143,17 @@
1583 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1584 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1585 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1586 +
1587 +#ifdef CONFIG_PAX_PAGEEXEC
1588 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1589 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1590 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1591 +#else
1592 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1593 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1594 +# define PAGE_COPY_NOEXEC PAGE_COPY
1595 +#endif
1596 +
1597 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1598 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1599 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1600 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1601 index 239ecdc..f94170e 100644
1602 --- a/arch/ia64/include/asm/spinlock.h
1603 +++ b/arch/ia64/include/asm/spinlock.h
1604 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
1605 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1606
1607 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1608 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1609 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1610 }
1611
1612 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1613 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1614 index 449c8c0..432a3d2 100644
1615 --- a/arch/ia64/include/asm/uaccess.h
1616 +++ b/arch/ia64/include/asm/uaccess.h
1617 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1618 const void *__cu_from = (from); \
1619 long __cu_len = (n); \
1620 \
1621 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
1622 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1623 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1624 __cu_len; \
1625 })
1626 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1627 long __cu_len = (n); \
1628 \
1629 __chk_user_ptr(__cu_from); \
1630 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
1631 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1632 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1633 __cu_len; \
1634 })
1635 diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
1636 index f2c1600..969398a 100644
1637 --- a/arch/ia64/kernel/dma-mapping.c
1638 +++ b/arch/ia64/kernel/dma-mapping.c
1639 @@ -3,7 +3,7 @@
1640 /* Set this to 1 if there is a HW IOMMU in the system */
1641 int iommu_detected __read_mostly;
1642
1643 -struct dma_map_ops *dma_ops;
1644 +const struct dma_map_ops *dma_ops;
1645 EXPORT_SYMBOL(dma_ops);
1646
1647 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1648 @@ -16,7 +16,7 @@ static int __init dma_init(void)
1649 }
1650 fs_initcall(dma_init);
1651
1652 -struct dma_map_ops *dma_get_ops(struct device *dev)
1653 +const struct dma_map_ops *dma_get_ops(struct device *dev)
1654 {
1655 return dma_ops;
1656 }
1657 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1658 index 1481b0a..e7d38ff 100644
1659 --- a/arch/ia64/kernel/module.c
1660 +++ b/arch/ia64/kernel/module.c
1661 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1662 void
1663 module_free (struct module *mod, void *module_region)
1664 {
1665 - if (mod && mod->arch.init_unw_table &&
1666 - module_region == mod->module_init) {
1667 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1668 unw_remove_unwind_table(mod->arch.init_unw_table);
1669 mod->arch.init_unw_table = NULL;
1670 }
1671 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1672 }
1673
1674 static inline int
1675 +in_init_rx (const struct module *mod, uint64_t addr)
1676 +{
1677 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1678 +}
1679 +
1680 +static inline int
1681 +in_init_rw (const struct module *mod, uint64_t addr)
1682 +{
1683 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1684 +}
1685 +
1686 +static inline int
1687 in_init (const struct module *mod, uint64_t addr)
1688 {
1689 - return addr - (uint64_t) mod->module_init < mod->init_size;
1690 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1691 +}
1692 +
1693 +static inline int
1694 +in_core_rx (const struct module *mod, uint64_t addr)
1695 +{
1696 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1697 +}
1698 +
1699 +static inline int
1700 +in_core_rw (const struct module *mod, uint64_t addr)
1701 +{
1702 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1703 }
1704
1705 static inline int
1706 in_core (const struct module *mod, uint64_t addr)
1707 {
1708 - return addr - (uint64_t) mod->module_core < mod->core_size;
1709 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1710 }
1711
1712 static inline int
1713 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1714 break;
1715
1716 case RV_BDREL:
1717 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1718 + if (in_init_rx(mod, val))
1719 + val -= (uint64_t) mod->module_init_rx;
1720 + else if (in_init_rw(mod, val))
1721 + val -= (uint64_t) mod->module_init_rw;
1722 + else if (in_core_rx(mod, val))
1723 + val -= (uint64_t) mod->module_core_rx;
1724 + else if (in_core_rw(mod, val))
1725 + val -= (uint64_t) mod->module_core_rw;
1726 break;
1727
1728 case RV_LTV:
1729 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1730 * addresses have been selected...
1731 */
1732 uint64_t gp;
1733 - if (mod->core_size > MAX_LTOFF)
1734 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1735 /*
1736 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1737 * at the end of the module.
1738 */
1739 - gp = mod->core_size - MAX_LTOFF / 2;
1740 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1741 else
1742 - gp = mod->core_size / 2;
1743 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1744 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1745 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1746 mod->arch.gp = gp;
1747 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1748 }
1749 diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
1750 index f6b1ff0..de773fb 100644
1751 --- a/arch/ia64/kernel/pci-dma.c
1752 +++ b/arch/ia64/kernel/pci-dma.c
1753 @@ -43,7 +43,7 @@ struct device fallback_dev = {
1754 .dma_mask = &fallback_dev.coherent_dma_mask,
1755 };
1756
1757 -extern struct dma_map_ops intel_dma_ops;
1758 +extern const struct dma_map_ops intel_dma_ops;
1759
1760 static int __init pci_iommu_init(void)
1761 {
1762 @@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *dev, u64 mask)
1763 }
1764 EXPORT_SYMBOL(iommu_dma_supported);
1765
1766 +extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1767 +extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1768 +extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1769 +extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1770 +extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1771 +extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1772 +extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1773 +
1774 +static const struct dma_map_ops intel_iommu_dma_ops = {
1775 + /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1776 + .alloc_coherent = intel_alloc_coherent,
1777 + .free_coherent = intel_free_coherent,
1778 + .map_sg = intel_map_sg,
1779 + .unmap_sg = intel_unmap_sg,
1780 + .map_page = intel_map_page,
1781 + .unmap_page = intel_unmap_page,
1782 + .mapping_error = intel_mapping_error,
1783 +
1784 + .sync_single_for_cpu = machvec_dma_sync_single,
1785 + .sync_sg_for_cpu = machvec_dma_sync_sg,
1786 + .sync_single_for_device = machvec_dma_sync_single,
1787 + .sync_sg_for_device = machvec_dma_sync_sg,
1788 + .dma_supported = iommu_dma_supported,
1789 +};
1790 +
1791 void __init pci_iommu_alloc(void)
1792 {
1793 - dma_ops = &intel_dma_ops;
1794 -
1795 - dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1796 - dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1797 - dma_ops->sync_single_for_device = machvec_dma_sync_single;
1798 - dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1799 - dma_ops->dma_supported = iommu_dma_supported;
1800 + dma_ops = &intel_iommu_dma_ops;
1801
1802 /*
1803 * The order of these functions is important for
1804 diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
1805 index 285aae8..61dbab6 100644
1806 --- a/arch/ia64/kernel/pci-swiotlb.c
1807 +++ b/arch/ia64/kernel/pci-swiotlb.c
1808 @@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
1809 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1810 }
1811
1812 -struct dma_map_ops swiotlb_dma_ops = {
1813 +const struct dma_map_ops swiotlb_dma_ops = {
1814 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1815 .free_coherent = swiotlb_free_coherent,
1816 .map_page = swiotlb_map_page,
1817 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1818 index 609d500..7dde2a8 100644
1819 --- a/arch/ia64/kernel/sys_ia64.c
1820 +++ b/arch/ia64/kernel/sys_ia64.c
1821 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1822 if (REGION_NUMBER(addr) == RGN_HPAGE)
1823 addr = 0;
1824 #endif
1825 +
1826 +#ifdef CONFIG_PAX_RANDMMAP
1827 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1828 + addr = mm->free_area_cache;
1829 + else
1830 +#endif
1831 +
1832 if (!addr)
1833 addr = mm->free_area_cache;
1834
1835 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1836 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1837 /* At this point: (!vma || addr < vma->vm_end). */
1838 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1839 - if (start_addr != TASK_UNMAPPED_BASE) {
1840 + if (start_addr != mm->mmap_base) {
1841 /* Start a new search --- just in case we missed some holes. */
1842 - addr = TASK_UNMAPPED_BASE;
1843 + addr = mm->mmap_base;
1844 goto full_search;
1845 }
1846 return -ENOMEM;
1847 }
1848 - if (!vma || addr + len <= vma->vm_start) {
1849 + if (check_heap_stack_gap(vma, addr, len)) {
1850 /* Remember the address where we stopped this search: */
1851 mm->free_area_cache = addr + len;
1852 return addr;
1853 diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
1854 index 8f06035..b3a5818 100644
1855 --- a/arch/ia64/kernel/topology.c
1856 +++ b/arch/ia64/kernel/topology.c
1857 @@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char *
1858 return ret;
1859 }
1860
1861 -static struct sysfs_ops cache_sysfs_ops = {
1862 +static const struct sysfs_ops cache_sysfs_ops = {
1863 .show = cache_show
1864 };
1865
1866 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1867 index 0a0c77b..8e55a81 100644
1868 --- a/arch/ia64/kernel/vmlinux.lds.S
1869 +++ b/arch/ia64/kernel/vmlinux.lds.S
1870 @@ -190,7 +190,7 @@ SECTIONS
1871 /* Per-cpu data: */
1872 . = ALIGN(PERCPU_PAGE_SIZE);
1873 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1874 - __phys_per_cpu_start = __per_cpu_load;
1875 + __phys_per_cpu_start = per_cpu_load;
1876 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1877 * into percpu page size
1878 */
1879 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1880 index 19261a9..1611b7a 100644
1881 --- a/arch/ia64/mm/fault.c
1882 +++ b/arch/ia64/mm/fault.c
1883 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
1884 return pte_present(pte);
1885 }
1886
1887 +#ifdef CONFIG_PAX_PAGEEXEC
1888 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1889 +{
1890 + unsigned long i;
1891 +
1892 + printk(KERN_ERR "PAX: bytes at PC: ");
1893 + for (i = 0; i < 8; i++) {
1894 + unsigned int c;
1895 + if (get_user(c, (unsigned int *)pc+i))
1896 + printk(KERN_CONT "???????? ");
1897 + else
1898 + printk(KERN_CONT "%08x ", c);
1899 + }
1900 + printk("\n");
1901 +}
1902 +#endif
1903 +
1904 void __kprobes
1905 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1906 {
1907 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1908 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1909 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1910
1911 - if ((vma->vm_flags & mask) != mask)
1912 + if ((vma->vm_flags & mask) != mask) {
1913 +
1914 +#ifdef CONFIG_PAX_PAGEEXEC
1915 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1916 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1917 + goto bad_area;
1918 +
1919 + up_read(&mm->mmap_sem);
1920 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1921 + do_group_exit(SIGKILL);
1922 + }
1923 +#endif
1924 +
1925 goto bad_area;
1926
1927 + }
1928 +
1929 survive:
1930 /*
1931 * If for any reason at all we couldn't handle the fault, make
1932 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1933 index b0f6157..a082bbc 100644
1934 --- a/arch/ia64/mm/hugetlbpage.c
1935 +++ b/arch/ia64/mm/hugetlbpage.c
1936 @@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1937 /* At this point: (!vmm || addr < vmm->vm_end). */
1938 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1939 return -ENOMEM;
1940 - if (!vmm || (addr + len) <= vmm->vm_start)
1941 + if (check_heap_stack_gap(vmm, addr, len))
1942 return addr;
1943 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1944 }
1945 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1946 index 1857766..05cc6a3 100644
1947 --- a/arch/ia64/mm/init.c
1948 +++ b/arch/ia64/mm/init.c
1949 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1950 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1951 vma->vm_end = vma->vm_start + PAGE_SIZE;
1952 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1953 +
1954 +#ifdef CONFIG_PAX_PAGEEXEC
1955 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1956 + vma->vm_flags &= ~VM_EXEC;
1957 +
1958 +#ifdef CONFIG_PAX_MPROTECT
1959 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1960 + vma->vm_flags &= ~VM_MAYEXEC;
1961 +#endif
1962 +
1963 + }
1964 +#endif
1965 +
1966 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1967 down_write(&current->mm->mmap_sem);
1968 if (insert_vm_struct(current->mm, vma)) {
1969 diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
1970 index 98b6849..8046766 100644
1971 --- a/arch/ia64/sn/pci/pci_dma.c
1972 +++ b/arch/ia64/sn/pci/pci_dma.c
1973 @@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
1974 return ret;
1975 }
1976
1977 -static struct dma_map_ops sn_dma_ops = {
1978 +static const struct dma_map_ops sn_dma_ops = {
1979 .alloc_coherent = sn_dma_alloc_coherent,
1980 .free_coherent = sn_dma_free_coherent,
1981 .map_page = sn_dma_map_page,
1982 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1983 index 82abd15..d95ae5d 100644
1984 --- a/arch/m32r/lib/usercopy.c
1985 +++ b/arch/m32r/lib/usercopy.c
1986 @@ -14,6 +14,9 @@
1987 unsigned long
1988 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1989 {
1990 + if ((long)n < 0)
1991 + return n;
1992 +
1993 prefetch(from);
1994 if (access_ok(VERIFY_WRITE, to, n))
1995 __copy_user(to,from,n);
1996 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1997 unsigned long
1998 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1999 {
2000 + if ((long)n < 0)
2001 + return n;
2002 +
2003 prefetchw(to);
2004 if (access_ok(VERIFY_READ, from, n))
2005 __copy_user_zeroing(to,from,n);
2006 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
2007 index fd7620f..63d73a6 100644
2008 --- a/arch/mips/Kconfig
2009 +++ b/arch/mips/Kconfig
2010 @@ -5,6 +5,7 @@ config MIPS
2011 select HAVE_IDE
2012 select HAVE_OPROFILE
2013 select HAVE_ARCH_KGDB
2014 + select GENERIC_ATOMIC64 if !64BIT
2015 # Horrible source of confusion. Die, die, die ...
2016 select EMBEDDED
2017 select RTC_LIB if !LEMOTE_FULOONG2E
2018 diff --git a/arch/mips/Makefile b/arch/mips/Makefile
2019 index 77f5021..2b1db8a 100644
2020 --- a/arch/mips/Makefile
2021 +++ b/arch/mips/Makefile
2022 @@ -51,6 +51,8 @@ endif
2023 cflags-y := -ffunction-sections
2024 cflags-y += $(call cc-option, -mno-check-zero-division)
2025
2026 +cflags-y += -Wno-sign-compare -Wno-extra
2027 +
2028 ifdef CONFIG_32BIT
2029 ld-emul = $(32bit-emul)
2030 vmlinux-32 = vmlinux
2031 diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
2032 index 632f986..fd0378d 100644
2033 --- a/arch/mips/alchemy/devboards/pm.c
2034 +++ b/arch/mips/alchemy/devboards/pm.c
2035 @@ -78,7 +78,7 @@ static void db1x_pm_end(void)
2036
2037 }
2038
2039 -static struct platform_suspend_ops db1x_pm_ops = {
2040 +static const struct platform_suspend_ops db1x_pm_ops = {
2041 .valid = suspend_valid_only_mem,
2042 .begin = db1x_pm_begin,
2043 .enter = db1x_pm_enter,
2044 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2045 index 09e7128..111035b 100644
2046 --- a/arch/mips/include/asm/atomic.h
2047 +++ b/arch/mips/include/asm/atomic.h
2048 @@ -21,6 +21,10 @@
2049 #include <asm/war.h>
2050 #include <asm/system.h>
2051
2052 +#ifdef CONFIG_GENERIC_ATOMIC64
2053 +#include <asm-generic/atomic64.h>
2054 +#endif
2055 +
2056 #define ATOMIC_INIT(i) { (i) }
2057
2058 /*
2059 @@ -782,6 +786,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2060 */
2061 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2062
2063 +#define atomic64_read_unchecked(v) atomic64_read(v)
2064 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2065 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2066 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2067 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2068 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2069 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2070 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2071 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2072 +
2073 #endif /* CONFIG_64BIT */
2074
2075 /*
2076 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2077 index 7990694..4e93acf 100644
2078 --- a/arch/mips/include/asm/elf.h
2079 +++ b/arch/mips/include/asm/elf.h
2080 @@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
2081 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2082 #endif
2083
2084 +#ifdef CONFIG_PAX_ASLR
2085 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2086 +
2087 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2088 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2089 +#endif
2090 +
2091 #endif /* _ASM_ELF_H */
2092 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2093 index f266295..627cfff 100644
2094 --- a/arch/mips/include/asm/page.h
2095 +++ b/arch/mips/include/asm/page.h
2096 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2097 #ifdef CONFIG_CPU_MIPS32
2098 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2099 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2100 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2101 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2102 #else
2103 typedef struct { unsigned long long pte; } pte_t;
2104 #define pte_val(x) ((x).pte)
2105 diff --git a/arch/mips/include/asm/reboot.h b/arch/mips/include/asm/reboot.h
2106 index e48c0bf..f3acf65 100644
2107 --- a/arch/mips/include/asm/reboot.h
2108 +++ b/arch/mips/include/asm/reboot.h
2109 @@ -9,7 +9,7 @@
2110 #ifndef _ASM_REBOOT_H
2111 #define _ASM_REBOOT_H
2112
2113 -extern void (*_machine_restart)(char *command);
2114 -extern void (*_machine_halt)(void);
2115 +extern void (*__noreturn _machine_restart)(char *command);
2116 +extern void (*__noreturn _machine_halt)(void);
2117
2118 #endif /* _ASM_REBOOT_H */
2119 diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
2120 index 83b5509..9fa24a23 100644
2121 --- a/arch/mips/include/asm/system.h
2122 +++ b/arch/mips/include/asm/system.h
2123 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
2124 */
2125 #define __ARCH_WANT_UNLOCKED_CTXSW
2126
2127 -extern unsigned long arch_align_stack(unsigned long sp);
2128 +#define arch_align_stack(x) ((x) & ~0xfUL)
2129
2130 #endif /* _ASM_SYSTEM_H */
2131 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2132 index 9fdd8bc..fcf9d68 100644
2133 --- a/arch/mips/kernel/binfmt_elfn32.c
2134 +++ b/arch/mips/kernel/binfmt_elfn32.c
2135 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2136 #undef ELF_ET_DYN_BASE
2137 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2138
2139 +#ifdef CONFIG_PAX_ASLR
2140 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2141 +
2142 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2143 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2144 +#endif
2145 +
2146 #include <asm/processor.h>
2147 #include <linux/module.h>
2148 #include <linux/elfcore.h>
2149 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2150 index ff44823..cf0b48a 100644
2151 --- a/arch/mips/kernel/binfmt_elfo32.c
2152 +++ b/arch/mips/kernel/binfmt_elfo32.c
2153 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2154 #undef ELF_ET_DYN_BASE
2155 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2156
2157 +#ifdef CONFIG_PAX_ASLR
2158 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2159 +
2160 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2161 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2162 +#endif
2163 +
2164 #include <asm/processor.h>
2165
2166 /*
2167 diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
2168 index 50c9bb8..efdd5f8 100644
2169 --- a/arch/mips/kernel/kgdb.c
2170 +++ b/arch/mips/kernel/kgdb.c
2171 @@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
2172 return -1;
2173 }
2174
2175 +/* cannot be const */
2176 struct kgdb_arch arch_kgdb_ops;
2177
2178 /*
2179 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2180 index f3d73e1..bb3f57a 100644
2181 --- a/arch/mips/kernel/process.c
2182 +++ b/arch/mips/kernel/process.c
2183 @@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_struct *task)
2184 out:
2185 return pc;
2186 }
2187 -
2188 -/*
2189 - * Don't forget that the stack pointer must be aligned on a 8 bytes
2190 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2191 - */
2192 -unsigned long arch_align_stack(unsigned long sp)
2193 -{
2194 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2195 - sp -= get_random_int() & ~PAGE_MASK;
2196 -
2197 - return sp & ALMASK;
2198 -}
2199 diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
2200 index 060563a..7fbf310 100644
2201 --- a/arch/mips/kernel/reset.c
2202 +++ b/arch/mips/kernel/reset.c
2203 @@ -19,8 +19,8 @@
2204 * So handle all using function pointers to machine specific
2205 * functions.
2206 */
2207 -void (*_machine_restart)(char *command);
2208 -void (*_machine_halt)(void);
2209 +void (*__noreturn _machine_restart)(char *command);
2210 +void (*__noreturn _machine_halt)(void);
2211 void (*pm_power_off)(void);
2212
2213 EXPORT_SYMBOL(pm_power_off);
2214 @@ -29,16 +29,19 @@ void machine_restart(char *command)
2215 {
2216 if (_machine_restart)
2217 _machine_restart(command);
2218 + BUG();
2219 }
2220
2221 void machine_halt(void)
2222 {
2223 if (_machine_halt)
2224 _machine_halt();
2225 + BUG();
2226 }
2227
2228 void machine_power_off(void)
2229 {
2230 if (pm_power_off)
2231 pm_power_off();
2232 + BUG();
2233 }
2234 diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
2235 index 3f7f466..3abe0b5 100644
2236 --- a/arch/mips/kernel/syscall.c
2237 +++ b/arch/mips/kernel/syscall.c
2238 @@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2239 do_color_align = 0;
2240 if (filp || (flags & MAP_SHARED))
2241 do_color_align = 1;
2242 +
2243 +#ifdef CONFIG_PAX_RANDMMAP
2244 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2245 +#endif
2246 +
2247 if (addr) {
2248 if (do_color_align)
2249 addr = COLOUR_ALIGN(addr, pgoff);
2250 else
2251 addr = PAGE_ALIGN(addr);
2252 vmm = find_vma(current->mm, addr);
2253 - if (task_size - len >= addr &&
2254 - (!vmm || addr + len <= vmm->vm_start))
2255 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
2256 return addr;
2257 }
2258 - addr = TASK_UNMAPPED_BASE;
2259 + addr = current->mm->mmap_base;
2260 if (do_color_align)
2261 addr = COLOUR_ALIGN(addr, pgoff);
2262 else
2263 @@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2264 /* At this point: (!vmm || addr < vmm->vm_end). */
2265 if (task_size - len < addr)
2266 return -ENOMEM;
2267 - if (!vmm || addr + len <= vmm->vm_start)
2268 + if (check_heap_stack_gap(vmm, addr, len))
2269 return addr;
2270 addr = vmm->vm_end;
2271 if (do_color_align)
2272 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2273 index e97a7a2..f18f5b0 100644
2274 --- a/arch/mips/mm/fault.c
2275 +++ b/arch/mips/mm/fault.c
2276 @@ -26,6 +26,23 @@
2277 #include <asm/ptrace.h>
2278 #include <asm/highmem.h> /* For VMALLOC_END */
2279
2280 +#ifdef CONFIG_PAX_PAGEEXEC
2281 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2282 +{
2283 + unsigned long i;
2284 +
2285 + printk(KERN_ERR "PAX: bytes at PC: ");
2286 + for (i = 0; i < 5; i++) {
2287 + unsigned int c;
2288 + if (get_user(c, (unsigned int *)pc+i))
2289 + printk(KERN_CONT "???????? ");
2290 + else
2291 + printk(KERN_CONT "%08x ", c);
2292 + }
2293 + printk("\n");
2294 +}
2295 +#endif
2296 +
2297 /*
2298 * This routine handles page faults. It determines the address,
2299 * and the problem, and then passes it off to one of the appropriate
2300 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
2301 index 8bc9e96..26554f8 100644
2302 --- a/arch/parisc/include/asm/atomic.h
2303 +++ b/arch/parisc/include/asm/atomic.h
2304 @@ -336,6 +336,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2305
2306 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
2307
2308 +#define atomic64_read_unchecked(v) atomic64_read(v)
2309 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2310 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2311 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2312 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2313 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2314 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2315 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2316 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2317 +
2318 #else /* CONFIG_64BIT */
2319
2320 #include <asm-generic/atomic64.h>
2321 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
2322 index 9c802eb..0592e41 100644
2323 --- a/arch/parisc/include/asm/elf.h
2324 +++ b/arch/parisc/include/asm/elf.h
2325 @@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration... */
2326
2327 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
2328
2329 +#ifdef CONFIG_PAX_ASLR
2330 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
2331 +
2332 +#define PAX_DELTA_MMAP_LEN 16
2333 +#define PAX_DELTA_STACK_LEN 16
2334 +#endif
2335 +
2336 /* This yields a mask that user programs can use to figure out what
2337 instruction set this CPU supports. This could be done in user space,
2338 but it's not easy, and we've already done it here. */
2339 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
2340 index a27d2e2..18fd845 100644
2341 --- a/arch/parisc/include/asm/pgtable.h
2342 +++ b/arch/parisc/include/asm/pgtable.h
2343 @@ -207,6 +207,17 @@
2344 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
2345 #define PAGE_COPY PAGE_EXECREAD
2346 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
2347 +
2348 +#ifdef CONFIG_PAX_PAGEEXEC
2349 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
2350 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2351 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2352 +#else
2353 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
2354 +# define PAGE_COPY_NOEXEC PAGE_COPY
2355 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
2356 +#endif
2357 +
2358 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
2359 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
2360 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
2361 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
2362 index 2120746..8d70a5e 100644
2363 --- a/arch/parisc/kernel/module.c
2364 +++ b/arch/parisc/kernel/module.c
2365 @@ -95,16 +95,38 @@
2366
2367 /* three functions to determine where in the module core
2368 * or init pieces the location is */
2369 +static inline int in_init_rx(struct module *me, void *loc)
2370 +{
2371 + return (loc >= me->module_init_rx &&
2372 + loc < (me->module_init_rx + me->init_size_rx));
2373 +}
2374 +
2375 +static inline int in_init_rw(struct module *me, void *loc)
2376 +{
2377 + return (loc >= me->module_init_rw &&
2378 + loc < (me->module_init_rw + me->init_size_rw));
2379 +}
2380 +
2381 static inline int in_init(struct module *me, void *loc)
2382 {
2383 - return (loc >= me->module_init &&
2384 - loc <= (me->module_init + me->init_size));
2385 + return in_init_rx(me, loc) || in_init_rw(me, loc);
2386 +}
2387 +
2388 +static inline int in_core_rx(struct module *me, void *loc)
2389 +{
2390 + return (loc >= me->module_core_rx &&
2391 + loc < (me->module_core_rx + me->core_size_rx));
2392 +}
2393 +
2394 +static inline int in_core_rw(struct module *me, void *loc)
2395 +{
2396 + return (loc >= me->module_core_rw &&
2397 + loc < (me->module_core_rw + me->core_size_rw));
2398 }
2399
2400 static inline int in_core(struct module *me, void *loc)
2401 {
2402 - return (loc >= me->module_core &&
2403 - loc <= (me->module_core + me->core_size));
2404 + return in_core_rx(me, loc) || in_core_rw(me, loc);
2405 }
2406
2407 static inline int in_local(struct module *me, void *loc)
2408 @@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
2409 }
2410
2411 /* align things a bit */
2412 - me->core_size = ALIGN(me->core_size, 16);
2413 - me->arch.got_offset = me->core_size;
2414 - me->core_size += gots * sizeof(struct got_entry);
2415 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
2416 + me->arch.got_offset = me->core_size_rw;
2417 + me->core_size_rw += gots * sizeof(struct got_entry);
2418
2419 - me->core_size = ALIGN(me->core_size, 16);
2420 - me->arch.fdesc_offset = me->core_size;
2421 - me->core_size += fdescs * sizeof(Elf_Fdesc);
2422 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
2423 + me->arch.fdesc_offset = me->core_size_rw;
2424 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
2425
2426 me->arch.got_max = gots;
2427 me->arch.fdesc_max = fdescs;
2428 @@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2429
2430 BUG_ON(value == 0);
2431
2432 - got = me->module_core + me->arch.got_offset;
2433 + got = me->module_core_rw + me->arch.got_offset;
2434 for (i = 0; got[i].addr; i++)
2435 if (got[i].addr == value)
2436 goto out;
2437 @@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2438 #ifdef CONFIG_64BIT
2439 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2440 {
2441 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
2442 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
2443
2444 if (!value) {
2445 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
2446 @@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2447
2448 /* Create new one */
2449 fdesc->addr = value;
2450 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2451 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2452 return (Elf_Addr)fdesc;
2453 }
2454 #endif /* CONFIG_64BIT */
2455 @@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
2456
2457 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
2458 end = table + sechdrs[me->arch.unwind_section].sh_size;
2459 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2460 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2461
2462 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
2463 me->arch.unwind_section, table, end, gp);
2464 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
2465 index 9147391..f3d949a 100644
2466 --- a/arch/parisc/kernel/sys_parisc.c
2467 +++ b/arch/parisc/kernel/sys_parisc.c
2468 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
2469 /* At this point: (!vma || addr < vma->vm_end). */
2470 if (TASK_SIZE - len < addr)
2471 return -ENOMEM;
2472 - if (!vma || addr + len <= vma->vm_start)
2473 + if (check_heap_stack_gap(vma, addr, len))
2474 return addr;
2475 addr = vma->vm_end;
2476 }
2477 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
2478 /* At this point: (!vma || addr < vma->vm_end). */
2479 if (TASK_SIZE - len < addr)
2480 return -ENOMEM;
2481 - if (!vma || addr + len <= vma->vm_start)
2482 + if (check_heap_stack_gap(vma, addr, len))
2483 return addr;
2484 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
2485 if (addr < vma->vm_end) /* handle wraparound */
2486 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2487 if (flags & MAP_FIXED)
2488 return addr;
2489 if (!addr)
2490 - addr = TASK_UNMAPPED_BASE;
2491 + addr = current->mm->mmap_base;
2492
2493 if (filp) {
2494 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
2495 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
2496 index 8b58bf0..7afff03 100644
2497 --- a/arch/parisc/kernel/traps.c
2498 +++ b/arch/parisc/kernel/traps.c
2499 @@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
2500
2501 down_read(&current->mm->mmap_sem);
2502 vma = find_vma(current->mm,regs->iaoq[0]);
2503 - if (vma && (regs->iaoq[0] >= vma->vm_start)
2504 - && (vma->vm_flags & VM_EXEC)) {
2505 -
2506 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
2507 fault_address = regs->iaoq[0];
2508 fault_space = regs->iasq[0];
2509
2510 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
2511 index c6afbfc..c5839f6 100644
2512 --- a/arch/parisc/mm/fault.c
2513 +++ b/arch/parisc/mm/fault.c
2514 @@ -15,6 +15,7 @@
2515 #include <linux/sched.h>
2516 #include <linux/interrupt.h>
2517 #include <linux/module.h>
2518 +#include <linux/unistd.h>
2519
2520 #include <asm/uaccess.h>
2521 #include <asm/traps.h>
2522 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
2523 static unsigned long
2524 parisc_acctyp(unsigned long code, unsigned int inst)
2525 {
2526 - if (code == 6 || code == 16)
2527 + if (code == 6 || code == 7 || code == 16)
2528 return VM_EXEC;
2529
2530 switch (inst & 0xf0000000) {
2531 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
2532 }
2533 #endif
2534
2535 +#ifdef CONFIG_PAX_PAGEEXEC
2536 +/*
2537 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
2538 + *
2539 + * returns 1 when task should be killed
2540 + * 2 when rt_sigreturn trampoline was detected
2541 + * 3 when unpatched PLT trampoline was detected
2542 + */
2543 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2544 +{
2545 +
2546 +#ifdef CONFIG_PAX_EMUPLT
2547 + int err;
2548 +
2549 + do { /* PaX: unpatched PLT emulation */
2550 + unsigned int bl, depwi;
2551 +
2552 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
2553 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
2554 +
2555 + if (err)
2556 + break;
2557 +
2558 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
2559 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
2560 +
2561 + err = get_user(ldw, (unsigned int *)addr);
2562 + err |= get_user(bv, (unsigned int *)(addr+4));
2563 + err |= get_user(ldw2, (unsigned int *)(addr+8));
2564 +
2565 + if (err)
2566 + break;
2567 +
2568 + if (ldw == 0x0E801096U &&
2569 + bv == 0xEAC0C000U &&
2570 + ldw2 == 0x0E881095U)
2571 + {
2572 + unsigned int resolver, map;
2573 +
2574 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
2575 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
2576 + if (err)
2577 + break;
2578 +
2579 + regs->gr[20] = instruction_pointer(regs)+8;
2580 + regs->gr[21] = map;
2581 + regs->gr[22] = resolver;
2582 + regs->iaoq[0] = resolver | 3UL;
2583 + regs->iaoq[1] = regs->iaoq[0] + 4;
2584 + return 3;
2585 + }
2586 + }
2587 + } while (0);
2588 +#endif
2589 +
2590 +#ifdef CONFIG_PAX_EMUTRAMP
2591 +
2592 +#ifndef CONFIG_PAX_EMUSIGRT
2593 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
2594 + return 1;
2595 +#endif
2596 +
2597 + do { /* PaX: rt_sigreturn emulation */
2598 + unsigned int ldi1, ldi2, bel, nop;
2599 +
2600 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2601 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2602 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2603 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2604 +
2605 + if (err)
2606 + break;
2607 +
2608 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2609 + ldi2 == 0x3414015AU &&
2610 + bel == 0xE4008200U &&
2611 + nop == 0x08000240U)
2612 + {
2613 + regs->gr[25] = (ldi1 & 2) >> 1;
2614 + regs->gr[20] = __NR_rt_sigreturn;
2615 + regs->gr[31] = regs->iaoq[1] + 16;
2616 + regs->sr[0] = regs->iasq[1];
2617 + regs->iaoq[0] = 0x100UL;
2618 + regs->iaoq[1] = regs->iaoq[0] + 4;
2619 + regs->iasq[0] = regs->sr[2];
2620 + regs->iasq[1] = regs->sr[2];
2621 + return 2;
2622 + }
2623 + } while (0);
2624 +#endif
2625 +
2626 + return 1;
2627 +}
2628 +
2629 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2630 +{
2631 + unsigned long i;
2632 +
2633 + printk(KERN_ERR "PAX: bytes at PC: ");
2634 + for (i = 0; i < 5; i++) {
2635 + unsigned int c;
2636 + if (get_user(c, (unsigned int *)pc+i))
2637 + printk(KERN_CONT "???????? ");
2638 + else
2639 + printk(KERN_CONT "%08x ", c);
2640 + }
2641 + printk("\n");
2642 +}
2643 +#endif
2644 +
2645 int fixup_exception(struct pt_regs *regs)
2646 {
2647 const struct exception_table_entry *fix;
2648 @@ -192,8 +303,33 @@ good_area:
2649
2650 acc_type = parisc_acctyp(code,regs->iir);
2651
2652 - if ((vma->vm_flags & acc_type) != acc_type)
2653 + if ((vma->vm_flags & acc_type) != acc_type) {
2654 +
2655 +#ifdef CONFIG_PAX_PAGEEXEC
2656 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2657 + (address & ~3UL) == instruction_pointer(regs))
2658 + {
2659 + up_read(&mm->mmap_sem);
2660 + switch (pax_handle_fetch_fault(regs)) {
2661 +
2662 +#ifdef CONFIG_PAX_EMUPLT
2663 + case 3:
2664 + return;
2665 +#endif
2666 +
2667 +#ifdef CONFIG_PAX_EMUTRAMP
2668 + case 2:
2669 + return;
2670 +#endif
2671 +
2672 + }
2673 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2674 + do_group_exit(SIGKILL);
2675 + }
2676 +#endif
2677 +
2678 goto bad_area;
2679 + }
2680
2681 /*
2682 * If for any reason at all we couldn't handle the fault, make
2683 diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
2684 index c107b74..409dc0f 100644
2685 --- a/arch/powerpc/Makefile
2686 +++ b/arch/powerpc/Makefile
2687 @@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
2688 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
2689 CPP = $(CC) -E $(KBUILD_CFLAGS)
2690
2691 +cflags-y += -Wno-sign-compare -Wno-extra
2692 +
2693 CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
2694
2695 ifeq ($(CONFIG_PPC64),y)
2696 diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
2697 index 6d94d27..50d4cad 100644
2698 --- a/arch/powerpc/include/asm/device.h
2699 +++ b/arch/powerpc/include/asm/device.h
2700 @@ -14,7 +14,7 @@ struct dev_archdata {
2701 struct device_node *of_node;
2702
2703 /* DMA operations on that device */
2704 - struct dma_map_ops *dma_ops;
2705 + const struct dma_map_ops *dma_ops;
2706
2707 /*
2708 * When an iommu is in use, dma_data is used as a ptr to the base of the
2709 diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
2710 index e281dae..2b8a784 100644
2711 --- a/arch/powerpc/include/asm/dma-mapping.h
2712 +++ b/arch/powerpc/include/asm/dma-mapping.h
2713 @@ -69,9 +69,9 @@ static inline unsigned long device_to_mask(struct device *dev)
2714 #ifdef CONFIG_PPC64
2715 extern struct dma_map_ops dma_iommu_ops;
2716 #endif
2717 -extern struct dma_map_ops dma_direct_ops;
2718 +extern const struct dma_map_ops dma_direct_ops;
2719
2720 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2721 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2722 {
2723 /* We don't handle the NULL dev case for ISA for now. We could
2724 * do it via an out of line call but it is not needed for now. The
2725 @@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2726 return dev->archdata.dma_ops;
2727 }
2728
2729 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2730 +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2731 {
2732 dev->archdata.dma_ops = ops;
2733 }
2734 @@ -118,7 +118,7 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
2735
2736 static inline int dma_supported(struct device *dev, u64 mask)
2737 {
2738 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2739 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2740
2741 if (unlikely(dma_ops == NULL))
2742 return 0;
2743 @@ -132,7 +132,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
2744
2745 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2746 {
2747 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2748 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2749
2750 if (unlikely(dma_ops == NULL))
2751 return -EIO;
2752 @@ -147,7 +147,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2753 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2754 dma_addr_t *dma_handle, gfp_t flag)
2755 {
2756 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2757 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2758 void *cpu_addr;
2759
2760 BUG_ON(!dma_ops);
2761 @@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2762 static inline void dma_free_coherent(struct device *dev, size_t size,
2763 void *cpu_addr, dma_addr_t dma_handle)
2764 {
2765 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2766 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2767
2768 BUG_ON(!dma_ops);
2769
2770 @@ -173,7 +173,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
2771
2772 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2773 {
2774 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2775 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2776
2777 if (dma_ops->mapping_error)
2778 return dma_ops->mapping_error(dev, dma_addr);
2779 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2780 index 5698502..5db093c 100644
2781 --- a/arch/powerpc/include/asm/elf.h
2782 +++ b/arch/powerpc/include/asm/elf.h
2783 @@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2784 the loader. We need to make sure that it is out of the way of the program
2785 that it will "exec", and that there is sufficient room for the brk. */
2786
2787 -extern unsigned long randomize_et_dyn(unsigned long base);
2788 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2789 +#define ELF_ET_DYN_BASE (0x20000000)
2790 +
2791 +#ifdef CONFIG_PAX_ASLR
2792 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2793 +
2794 +#ifdef __powerpc64__
2795 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2796 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2797 +#else
2798 +#define PAX_DELTA_MMAP_LEN 15
2799 +#define PAX_DELTA_STACK_LEN 15
2800 +#endif
2801 +#endif
2802
2803 /*
2804 * Our registers are always unsigned longs, whether we're a 32 bit
2805 @@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2806 (0x7ff >> (PAGE_SHIFT - 12)) : \
2807 (0x3ffff >> (PAGE_SHIFT - 12)))
2808
2809 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2810 -#define arch_randomize_brk arch_randomize_brk
2811 -
2812 #endif /* __KERNEL__ */
2813
2814 /*
2815 diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
2816 index edfc980..1766f59 100644
2817 --- a/arch/powerpc/include/asm/iommu.h
2818 +++ b/arch/powerpc/include/asm/iommu.h
2819 @@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(void);
2820 extern void iommu_init_early_dart(void);
2821 extern void iommu_init_early_pasemi(void);
2822
2823 +/* dma-iommu.c */
2824 +extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2825 +
2826 #ifdef CONFIG_PCI
2827 extern void pci_iommu_init(void);
2828 extern void pci_direct_iommu_init(void);
2829 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2830 index 9163695..5a00112 100644
2831 --- a/arch/powerpc/include/asm/kmap_types.h
2832 +++ b/arch/powerpc/include/asm/kmap_types.h
2833 @@ -26,6 +26,7 @@ enum km_type {
2834 KM_SOFTIRQ1,
2835 KM_PPC_SYNC_PAGE,
2836 KM_PPC_SYNC_ICACHE,
2837 + KM_CLEARPAGE,
2838 KM_TYPE_NR
2839 };
2840
2841 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2842 index ff24254..fe45b21 100644
2843 --- a/arch/powerpc/include/asm/page.h
2844 +++ b/arch/powerpc/include/asm/page.h
2845 @@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2846 * and needs to be executable. This means the whole heap ends
2847 * up being executable.
2848 */
2849 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2850 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2851 +#define VM_DATA_DEFAULT_FLAGS32 \
2852 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2853 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2854
2855 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2856 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2857 @@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2858 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2859 #endif
2860
2861 +#define ktla_ktva(addr) (addr)
2862 +#define ktva_ktla(addr) (addr)
2863 +
2864 #ifndef __ASSEMBLY__
2865
2866 #undef STRICT_MM_TYPECHECKS
2867 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2868 index 3f17b83..1f9e766 100644
2869 --- a/arch/powerpc/include/asm/page_64.h
2870 +++ b/arch/powerpc/include/asm/page_64.h
2871 @@ -180,15 +180,18 @@ do { \
2872 * stack by default, so in the absense of a PT_GNU_STACK program header
2873 * we turn execute permission off.
2874 */
2875 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2876 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2877 +#define VM_STACK_DEFAULT_FLAGS32 \
2878 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2879 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2880
2881 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2882 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2883
2884 +#ifndef CONFIG_PAX_PAGEEXEC
2885 #define VM_STACK_DEFAULT_FLAGS \
2886 (test_thread_flag(TIF_32BIT) ? \
2887 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2888 +#endif
2889
2890 #include <asm-generic/getorder.h>
2891
2892 diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
2893 index b5ea626..40308222 100644
2894 --- a/arch/powerpc/include/asm/pci.h
2895 +++ b/arch/powerpc/include/asm/pci.h
2896 @@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
2897 }
2898
2899 #ifdef CONFIG_PCI
2900 -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2901 -extern struct dma_map_ops *get_pci_dma_ops(void);
2902 +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2903 +extern const struct dma_map_ops *get_pci_dma_ops(void);
2904 #else /* CONFIG_PCI */
2905 #define set_pci_dma_ops(d)
2906 #define get_pci_dma_ops() NULL
2907 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2908 index 2a5da06..d65bea2 100644
2909 --- a/arch/powerpc/include/asm/pgtable.h
2910 +++ b/arch/powerpc/include/asm/pgtable.h
2911 @@ -2,6 +2,7 @@
2912 #define _ASM_POWERPC_PGTABLE_H
2913 #ifdef __KERNEL__
2914
2915 +#include <linux/const.h>
2916 #ifndef __ASSEMBLY__
2917 #include <asm/processor.h> /* For TASK_SIZE */
2918 #include <asm/mmu.h>
2919 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2920 index 4aad413..85d86bf 100644
2921 --- a/arch/powerpc/include/asm/pte-hash32.h
2922 +++ b/arch/powerpc/include/asm/pte-hash32.h
2923 @@ -21,6 +21,7 @@
2924 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2925 #define _PAGE_USER 0x004 /* usermode access allowed */
2926 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2927 +#define _PAGE_EXEC _PAGE_GUARDED
2928 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2929 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2930 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2931 diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
2932 index 8c34149..78f425a 100644
2933 --- a/arch/powerpc/include/asm/ptrace.h
2934 +++ b/arch/powerpc/include/asm/ptrace.h
2935 @@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
2936 } while(0)
2937
2938 struct task_struct;
2939 -extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
2940 +extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
2941 extern int ptrace_put_reg(struct task_struct *task, int regno,
2942 unsigned long data);
2943
2944 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2945 index 32a7c30..be3a8bb 100644
2946 --- a/arch/powerpc/include/asm/reg.h
2947 +++ b/arch/powerpc/include/asm/reg.h
2948 @@ -191,6 +191,7 @@
2949 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2950 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2951 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2952 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2953 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2954 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2955 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2956 diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
2957 index 8979d4c..d2fd0d3 100644
2958 --- a/arch/powerpc/include/asm/swiotlb.h
2959 +++ b/arch/powerpc/include/asm/swiotlb.h
2960 @@ -13,7 +13,7 @@
2961
2962 #include <linux/swiotlb.h>
2963
2964 -extern struct dma_map_ops swiotlb_dma_ops;
2965 +extern const struct dma_map_ops swiotlb_dma_ops;
2966
2967 static inline void dma_mark_clean(void *addr, size_t size) {}
2968
2969 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2970 index 094a12a..877a60a 100644
2971 --- a/arch/powerpc/include/asm/system.h
2972 +++ b/arch/powerpc/include/asm/system.h
2973 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2974 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2975 #endif
2976
2977 -extern unsigned long arch_align_stack(unsigned long sp);
2978 +#define arch_align_stack(x) ((x) & ~0xfUL)
2979
2980 /* Used in very early kernel initialization. */
2981 extern unsigned long reloc_offset(void);
2982 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2983 index bd0fb84..a42a14b 100644
2984 --- a/arch/powerpc/include/asm/uaccess.h
2985 +++ b/arch/powerpc/include/asm/uaccess.h
2986 @@ -13,6 +13,8 @@
2987 #define VERIFY_READ 0
2988 #define VERIFY_WRITE 1
2989
2990 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2991 +
2992 /*
2993 * The fs value determines whether argument validity checking should be
2994 * performed or not. If get_fs() == USER_DS, checking is performed, with
2995 @@ -327,52 +329,6 @@ do { \
2996 extern unsigned long __copy_tofrom_user(void __user *to,
2997 const void __user *from, unsigned long size);
2998
2999 -#ifndef __powerpc64__
3000 -
3001 -static inline unsigned long copy_from_user(void *to,
3002 - const void __user *from, unsigned long n)
3003 -{
3004 - unsigned long over;
3005 -
3006 - if (access_ok(VERIFY_READ, from, n))
3007 - return __copy_tofrom_user((__force void __user *)to, from, n);
3008 - if ((unsigned long)from < TASK_SIZE) {
3009 - over = (unsigned long)from + n - TASK_SIZE;
3010 - return __copy_tofrom_user((__force void __user *)to, from,
3011 - n - over) + over;
3012 - }
3013 - return n;
3014 -}
3015 -
3016 -static inline unsigned long copy_to_user(void __user *to,
3017 - const void *from, unsigned long n)
3018 -{
3019 - unsigned long over;
3020 -
3021 - if (access_ok(VERIFY_WRITE, to, n))
3022 - return __copy_tofrom_user(to, (__force void __user *)from, n);
3023 - if ((unsigned long)to < TASK_SIZE) {
3024 - over = (unsigned long)to + n - TASK_SIZE;
3025 - return __copy_tofrom_user(to, (__force void __user *)from,
3026 - n - over) + over;
3027 - }
3028 - return n;
3029 -}
3030 -
3031 -#else /* __powerpc64__ */
3032 -
3033 -#define __copy_in_user(to, from, size) \
3034 - __copy_tofrom_user((to), (from), (size))
3035 -
3036 -extern unsigned long copy_from_user(void *to, const void __user *from,
3037 - unsigned long n);
3038 -extern unsigned long copy_to_user(void __user *to, const void *from,
3039 - unsigned long n);
3040 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
3041 - unsigned long n);
3042 -
3043 -#endif /* __powerpc64__ */
3044 -
3045 static inline unsigned long __copy_from_user_inatomic(void *to,
3046 const void __user *from, unsigned long n)
3047 {
3048 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
3049 if (ret == 0)
3050 return 0;
3051 }
3052 +
3053 + if (!__builtin_constant_p(n))
3054 + check_object_size(to, n, false);
3055 +
3056 return __copy_tofrom_user((__force void __user *)to, from, n);
3057 }
3058
3059 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
3060 if (ret == 0)
3061 return 0;
3062 }
3063 +
3064 + if (!__builtin_constant_p(n))
3065 + check_object_size(from, n, true);
3066 +
3067 return __copy_tofrom_user(to, (__force const void __user *)from, n);
3068 }
3069
3070 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
3071 return __copy_to_user_inatomic(to, from, size);
3072 }
3073
3074 +#ifndef __powerpc64__
3075 +
3076 +static inline unsigned long __must_check copy_from_user(void *to,
3077 + const void __user *from, unsigned long n)
3078 +{
3079 + unsigned long over;
3080 +
3081 + if ((long)n < 0)
3082 + return n;
3083 +
3084 + if (access_ok(VERIFY_READ, from, n)) {
3085 + if (!__builtin_constant_p(n))
3086 + check_object_size(to, n, false);
3087 + return __copy_tofrom_user((__force void __user *)to, from, n);
3088 + }
3089 + if ((unsigned long)from < TASK_SIZE) {
3090 + over = (unsigned long)from + n - TASK_SIZE;
3091 + if (!__builtin_constant_p(n - over))
3092 + check_object_size(to, n - over, false);
3093 + return __copy_tofrom_user((__force void __user *)to, from,
3094 + n - over) + over;
3095 + }
3096 + return n;
3097 +}
3098 +
3099 +static inline unsigned long __must_check copy_to_user(void __user *to,
3100 + const void *from, unsigned long n)
3101 +{
3102 + unsigned long over;
3103 +
3104 + if ((long)n < 0)
3105 + return n;
3106 +
3107 + if (access_ok(VERIFY_WRITE, to, n)) {
3108 + if (!__builtin_constant_p(n))
3109 + check_object_size(from, n, true);
3110 + return __copy_tofrom_user(to, (__force void __user *)from, n);
3111 + }
3112 + if ((unsigned long)to < TASK_SIZE) {
3113 + over = (unsigned long)to + n - TASK_SIZE;
3114 + if (!__builtin_constant_p(n))
3115 + check_object_size(from, n - over, true);
3116 + return __copy_tofrom_user(to, (__force void __user *)from,
3117 + n - over) + over;
3118 + }
3119 + return n;
3120 +}
3121 +
3122 +#else /* __powerpc64__ */
3123 +
3124 +#define __copy_in_user(to, from, size) \
3125 + __copy_tofrom_user((to), (from), (size))
3126 +
3127 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
3128 +{
3129 + if ((long)n < 0 || n > INT_MAX)
3130 + return n;
3131 +
3132 + if (!__builtin_constant_p(n))
3133 + check_object_size(to, n, false);
3134 +
3135 + if (likely(access_ok(VERIFY_READ, from, n)))
3136 + n = __copy_from_user(to, from, n);
3137 + else
3138 + memset(to, 0, n);
3139 + return n;
3140 +}
3141 +
3142 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
3143 +{
3144 + if ((long)n < 0 || n > INT_MAX)
3145 + return n;
3146 +
3147 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
3148 + if (!__builtin_constant_p(n))
3149 + check_object_size(from, n, true);
3150 + n = __copy_to_user(to, from, n);
3151 + }
3152 + return n;
3153 +}
3154 +
3155 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
3156 + unsigned long n);
3157 +
3158 +#endif /* __powerpc64__ */
3159 +
3160 extern unsigned long __clear_user(void __user *addr, unsigned long size);
3161
3162 static inline unsigned long clear_user(void __user *addr, unsigned long size)
3163 diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
3164 index bb37b1d..01fe9ce 100644
3165 --- a/arch/powerpc/kernel/cacheinfo.c
3166 +++ b/arch/powerpc/kernel/cacheinfo.c
3167 @@ -642,7 +642,7 @@ static struct kobj_attribute *cache_index_opt_attrs[] = {
3168 &cache_assoc_attr,
3169 };
3170
3171 -static struct sysfs_ops cache_index_ops = {
3172 +static const struct sysfs_ops cache_index_ops = {
3173 .show = cache_index_show,
3174 };
3175
3176 diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
3177 index 37771a5..648530c 100644
3178 --- a/arch/powerpc/kernel/dma-iommu.c
3179 +++ b/arch/powerpc/kernel/dma-iommu.c
3180 @@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
3181 }
3182
3183 /* We support DMA to/from any memory page via the iommu */
3184 -static int dma_iommu_dma_supported(struct device *dev, u64 mask)
3185 +int dma_iommu_dma_supported(struct device *dev, u64 mask)
3186 {
3187 struct iommu_table *tbl = get_iommu_table_base(dev);
3188
3189 diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
3190 index e96cbbd..bdd6d41 100644
3191 --- a/arch/powerpc/kernel/dma-swiotlb.c
3192 +++ b/arch/powerpc/kernel/dma-swiotlb.c
3193 @@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
3194 * map_page, and unmap_page on highmem, use normal dma_ops
3195 * for everything else.
3196 */
3197 -struct dma_map_ops swiotlb_dma_ops = {
3198 +const struct dma_map_ops swiotlb_dma_ops = {
3199 .alloc_coherent = dma_direct_alloc_coherent,
3200 .free_coherent = dma_direct_free_coherent,
3201 .map_sg = swiotlb_map_sg_attrs,
3202 diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
3203 index 6215062..ebea59c 100644
3204 --- a/arch/powerpc/kernel/dma.c
3205 +++ b/arch/powerpc/kernel/dma.c
3206 @@ -134,7 +134,7 @@ static inline void dma_direct_sync_single_range(struct device *dev,
3207 }
3208 #endif
3209
3210 -struct dma_map_ops dma_direct_ops = {
3211 +const struct dma_map_ops dma_direct_ops = {
3212 .alloc_coherent = dma_direct_alloc_coherent,
3213 .free_coherent = dma_direct_free_coherent,
3214 .map_sg = dma_direct_map_sg,
3215 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3216 index 24dcc0e..a300455 100644
3217 --- a/arch/powerpc/kernel/exceptions-64e.S
3218 +++ b/arch/powerpc/kernel/exceptions-64e.S
3219 @@ -455,6 +455,7 @@ storage_fault_common:
3220 std r14,_DAR(r1)
3221 std r15,_DSISR(r1)
3222 addi r3,r1,STACK_FRAME_OVERHEAD
3223 + bl .save_nvgprs
3224 mr r4,r14
3225 mr r5,r15
3226 ld r14,PACA_EXGEN+EX_R14(r13)
3227 @@ -464,8 +465,7 @@ storage_fault_common:
3228 cmpdi r3,0
3229 bne- 1f
3230 b .ret_from_except_lite
3231 -1: bl .save_nvgprs
3232 - mr r5,r3
3233 +1: mr r5,r3
3234 addi r3,r1,STACK_FRAME_OVERHEAD
3235 ld r4,_DAR(r1)
3236 bl .bad_page_fault
3237 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3238 index 1808876..9fd206a 100644
3239 --- a/arch/powerpc/kernel/exceptions-64s.S
3240 +++ b/arch/powerpc/kernel/exceptions-64s.S
3241 @@ -818,10 +818,10 @@ handle_page_fault:
3242 11: ld r4,_DAR(r1)
3243 ld r5,_DSISR(r1)
3244 addi r3,r1,STACK_FRAME_OVERHEAD
3245 + bl .save_nvgprs
3246 bl .do_page_fault
3247 cmpdi r3,0
3248 beq+ 13f
3249 - bl .save_nvgprs
3250 mr r5,r3
3251 addi r3,r1,STACK_FRAME_OVERHEAD
3252 lwz r4,_DAR(r1)
3253 diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
3254 index a4c8b38..1b09ad9 100644
3255 --- a/arch/powerpc/kernel/ibmebus.c
3256 +++ b/arch/powerpc/kernel/ibmebus.c
3257 @@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
3258 return 1;
3259 }
3260
3261 -static struct dma_map_ops ibmebus_dma_ops = {
3262 +static const struct dma_map_ops ibmebus_dma_ops = {
3263 .alloc_coherent = ibmebus_alloc_coherent,
3264 .free_coherent = ibmebus_free_coherent,
3265 .map_sg = ibmebus_map_sg,
3266 diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
3267 index 641c74b..8339ad7 100644
3268 --- a/arch/powerpc/kernel/kgdb.c
3269 +++ b/arch/powerpc/kernel/kgdb.c
3270 @@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
3271 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
3272 return 0;
3273
3274 - if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3275 + if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3276 regs->nip += 4;
3277
3278 return 1;
3279 @@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
3280 /*
3281 * Global data
3282 */
3283 -struct kgdb_arch arch_kgdb_ops = {
3284 +const struct kgdb_arch arch_kgdb_ops = {
3285 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
3286 };
3287
3288 diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
3289 index 477c663..4f50234 100644
3290 --- a/arch/powerpc/kernel/module.c
3291 +++ b/arch/powerpc/kernel/module.c
3292 @@ -31,11 +31,24 @@
3293
3294 LIST_HEAD(module_bug_list);
3295
3296 +#ifdef CONFIG_PAX_KERNEXEC
3297 void *module_alloc(unsigned long size)
3298 {
3299 if (size == 0)
3300 return NULL;
3301
3302 + return vmalloc(size);
3303 +}
3304 +
3305 +void *module_alloc_exec(unsigned long size)
3306 +#else
3307 +void *module_alloc(unsigned long size)
3308 +#endif
3309 +
3310 +{
3311 + if (size == 0)
3312 + return NULL;
3313 +
3314 return vmalloc_exec(size);
3315 }
3316
3317 @@ -45,6 +58,13 @@ void module_free(struct module *mod, void *module_region)
3318 vfree(module_region);
3319 }
3320
3321 +#ifdef CONFIG_PAX_KERNEXEC
3322 +void module_free_exec(struct module *mod, void *module_region)
3323 +{
3324 + module_free(mod, module_region);
3325 +}
3326 +#endif
3327 +
3328 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
3329 const Elf_Shdr *sechdrs,
3330 const char *name)
3331 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
3332 index f832773..0507238 100644
3333 --- a/arch/powerpc/kernel/module_32.c
3334 +++ b/arch/powerpc/kernel/module_32.c
3335 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
3336 me->arch.core_plt_section = i;
3337 }
3338 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
3339 - printk("Module doesn't contain .plt or .init.plt sections.\n");
3340 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
3341 return -ENOEXEC;
3342 }
3343
3344 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *location,
3345
3346 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
3347 /* Init, or core PLT? */
3348 - if (location >= mod->module_core
3349 - && location < mod->module_core + mod->core_size)
3350 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
3351 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
3352 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
3353 - else
3354 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
3355 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
3356 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
3357 + else {
3358 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
3359 + return ~0UL;
3360 + }
3361
3362 /* Find this entry, or if that fails, the next avail. entry */
3363 while (entry->jump[0]) {
3364 diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
3365 index cadbed6..b9bbb00 100644
3366 --- a/arch/powerpc/kernel/pci-common.c
3367 +++ b/arch/powerpc/kernel/pci-common.c
3368 @@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
3369 unsigned int ppc_pci_flags = 0;
3370
3371
3372 -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3373 +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3374
3375 -void set_pci_dma_ops(struct dma_map_ops *dma_ops)
3376 +void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
3377 {
3378 pci_dma_ops = dma_ops;
3379 }
3380
3381 -struct dma_map_ops *get_pci_dma_ops(void)
3382 +const struct dma_map_ops *get_pci_dma_ops(void)
3383 {
3384 return pci_dma_ops;
3385 }
3386 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
3387 index 7b816da..8d5c277 100644
3388 --- a/arch/powerpc/kernel/process.c
3389 +++ b/arch/powerpc/kernel/process.c
3390 @@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
3391 * Lookup NIP late so we have the best change of getting the
3392 * above info out without failing
3393 */
3394 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
3395 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
3396 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
3397 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
3398 #endif
3399 show_stack(current, (unsigned long *) regs->gpr[1]);
3400 if (!user_mode(regs))
3401 @@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3402 newsp = stack[0];
3403 ip = stack[STACK_FRAME_LR_SAVE];
3404 if (!firstframe || ip != lr) {
3405 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
3406 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
3407 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3408 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
3409 - printk(" (%pS)",
3410 + printk(" (%pA)",
3411 (void *)current->ret_stack[curr_frame].ret);
3412 curr_frame--;
3413 }
3414 @@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3415 struct pt_regs *regs = (struct pt_regs *)
3416 (sp + STACK_FRAME_OVERHEAD);
3417 lr = regs->link;
3418 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
3419 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
3420 regs->trap, (void *)regs->nip, (void *)lr);
3421 firstframe = 1;
3422 }
3423 @@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
3424 }
3425
3426 #endif /* THREAD_SHIFT < PAGE_SHIFT */
3427 -
3428 -unsigned long arch_align_stack(unsigned long sp)
3429 -{
3430 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3431 - sp -= get_random_int() & ~PAGE_MASK;
3432 - return sp & ~0xf;
3433 -}
3434 -
3435 -static inline unsigned long brk_rnd(void)
3436 -{
3437 - unsigned long rnd = 0;
3438 -
3439 - /* 8MB for 32bit, 1GB for 64bit */
3440 - if (is_32bit_task())
3441 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
3442 - else
3443 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
3444 -
3445 - return rnd << PAGE_SHIFT;
3446 -}
3447 -
3448 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3449 -{
3450 - unsigned long base = mm->brk;
3451 - unsigned long ret;
3452 -
3453 -#ifdef CONFIG_PPC_STD_MMU_64
3454 - /*
3455 - * If we are using 1TB segments and we are allowed to randomise
3456 - * the heap, we can put it above 1TB so it is backed by a 1TB
3457 - * segment. Otherwise the heap will be in the bottom 1TB
3458 - * which always uses 256MB segments and this may result in a
3459 - * performance penalty.
3460 - */
3461 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
3462 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
3463 -#endif
3464 -
3465 - ret = PAGE_ALIGN(base + brk_rnd());
3466 -
3467 - if (ret < mm->brk)
3468 - return mm->brk;
3469 -
3470 - return ret;
3471 -}
3472 -
3473 -unsigned long randomize_et_dyn(unsigned long base)
3474 -{
3475 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3476 -
3477 - if (ret < base)
3478 - return base;
3479 -
3480 - return ret;
3481 -}
3482 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
3483 index ef14988..856c4bc 100644
3484 --- a/arch/powerpc/kernel/ptrace.c
3485 +++ b/arch/powerpc/kernel/ptrace.c
3486 @@ -86,7 +86,7 @@ static int set_user_trap(struct task_struct *task, unsigned long trap)
3487 /*
3488 * Get contents of register REGNO in task TASK.
3489 */
3490 -unsigned long ptrace_get_reg(struct task_struct *task, int regno)
3491 +unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
3492 {
3493 if (task->thread.regs == NULL)
3494 return -EIO;
3495 @@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
3496
3497 CHECK_FULL_REGS(child->thread.regs);
3498 if (index < PT_FPR0) {
3499 - tmp = ptrace_get_reg(child, (int) index);
3500 + tmp = ptrace_get_reg(child, index);
3501 } else {
3502 flush_fp_to_thread(child);
3503 tmp = ((unsigned long *)child->thread.fpr)
3504 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
3505 index d670429..2bc59b2 100644
3506 --- a/arch/powerpc/kernel/signal_32.c
3507 +++ b/arch/powerpc/kernel/signal_32.c
3508 @@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
3509 /* Save user registers on the stack */
3510 frame = &rt_sf->uc.uc_mcontext;
3511 addr = frame;
3512 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
3513 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3514 if (save_user_regs(regs, frame, 0, 1))
3515 goto badframe;
3516 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
3517 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
3518 index 2fe6fc6..ada0d96 100644
3519 --- a/arch/powerpc/kernel/signal_64.c
3520 +++ b/arch/powerpc/kernel/signal_64.c
3521 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
3522 current->thread.fpscr.val = 0;
3523
3524 /* Set up to return from userspace. */
3525 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
3526 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3527 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
3528 } else {
3529 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
3530 diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
3531 index b97c2d6..dd01a6a 100644
3532 --- a/arch/powerpc/kernel/sys_ppc32.c
3533 +++ b/arch/powerpc/kernel/sys_ppc32.c
3534 @@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args)
3535 if (oldlenp) {
3536 if (!error) {
3537 if (get_user(oldlen, oldlenp) ||
3538 - put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
3539 + put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
3540 + copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
3541 error = -EFAULT;
3542 }
3543 - copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
3544 }
3545 return error;
3546 }
3547 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
3548 index 6f0ae1a..e4b6a56 100644
3549 --- a/arch/powerpc/kernel/traps.c
3550 +++ b/arch/powerpc/kernel/traps.c
3551 @@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
3552 static inline void pmac_backlight_unblank(void) { }
3553 #endif
3554
3555 +extern void gr_handle_kernel_exploit(void);
3556 +
3557 int die(const char *str, struct pt_regs *regs, long err)
3558 {
3559 static struct {
3560 @@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs *regs, long err)
3561 if (panic_on_oops)
3562 panic("Fatal exception");
3563
3564 + gr_handle_kernel_exploit();
3565 +
3566 oops_exit();
3567 do_exit(err);
3568
3569 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
3570 index 137dc22..fe57a79 100644
3571 --- a/arch/powerpc/kernel/vdso.c
3572 +++ b/arch/powerpc/kernel/vdso.c
3573 @@ -36,6 +36,7 @@
3574 #include <asm/firmware.h>
3575 #include <asm/vdso.h>
3576 #include <asm/vdso_datapage.h>
3577 +#include <asm/mman.h>
3578
3579 #include "setup.h"
3580
3581 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3582 vdso_base = VDSO32_MBASE;
3583 #endif
3584
3585 - current->mm->context.vdso_base = 0;
3586 + current->mm->context.vdso_base = ~0UL;
3587
3588 /* vDSO has a problem and was disabled, just don't "enable" it for the
3589 * process
3590 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3591 vdso_base = get_unmapped_area(NULL, vdso_base,
3592 (vdso_pages << PAGE_SHIFT) +
3593 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
3594 - 0, 0);
3595 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
3596 if (IS_ERR_VALUE(vdso_base)) {
3597 rc = vdso_base;
3598 goto fail_mmapsem;
3599 diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
3600 index 77f6421..829564a 100644
3601 --- a/arch/powerpc/kernel/vio.c
3602 +++ b/arch/powerpc/kernel/vio.c
3603 @@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
3604 vio_cmo_dealloc(viodev, alloc_size);
3605 }
3606
3607 -struct dma_map_ops vio_dma_mapping_ops = {
3608 +static const struct dma_map_ops vio_dma_mapping_ops = {
3609 .alloc_coherent = vio_dma_iommu_alloc_coherent,
3610 .free_coherent = vio_dma_iommu_free_coherent,
3611 .map_sg = vio_dma_iommu_map_sg,
3612 .unmap_sg = vio_dma_iommu_unmap_sg,
3613 + .dma_supported = dma_iommu_dma_supported,
3614 .map_page = vio_dma_iommu_map_page,
3615 .unmap_page = vio_dma_iommu_unmap_page,
3616
3617 @@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev)
3618
3619 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
3620 {
3621 - vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
3622 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
3623 }
3624
3625 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
3626 index 5eea6f3..5d10396 100644
3627 --- a/arch/powerpc/lib/usercopy_64.c
3628 +++ b/arch/powerpc/lib/usercopy_64.c
3629 @@ -9,22 +9,6 @@
3630 #include <linux/module.h>
3631 #include <asm/uaccess.h>
3632
3633 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3634 -{
3635 - if (likely(access_ok(VERIFY_READ, from, n)))
3636 - n = __copy_from_user(to, from, n);
3637 - else
3638 - memset(to, 0, n);
3639 - return n;
3640 -}
3641 -
3642 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3643 -{
3644 - if (likely(access_ok(VERIFY_WRITE, to, n)))
3645 - n = __copy_to_user(to, from, n);
3646 - return n;
3647 -}
3648 -
3649 unsigned long copy_in_user(void __user *to, const void __user *from,
3650 unsigned long n)
3651 {
3652 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
3653 return n;
3654 }
3655
3656 -EXPORT_SYMBOL(copy_from_user);
3657 -EXPORT_SYMBOL(copy_to_user);
3658 EXPORT_SYMBOL(copy_in_user);
3659
3660 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
3661 index e7dae82..877ce0d 100644
3662 --- a/arch/powerpc/mm/fault.c
3663 +++ b/arch/powerpc/mm/fault.c
3664 @@ -30,6 +30,10 @@
3665 #include <linux/kprobes.h>
3666 #include <linux/kdebug.h>
3667 #include <linux/perf_event.h>
3668 +#include <linux/slab.h>
3669 +#include <linux/pagemap.h>
3670 +#include <linux/compiler.h>
3671 +#include <linux/unistd.h>
3672
3673 #include <asm/firmware.h>
3674 #include <asm/page.h>
3675 @@ -40,6 +44,7 @@
3676 #include <asm/uaccess.h>
3677 #include <asm/tlbflush.h>
3678 #include <asm/siginfo.h>
3679 +#include <asm/ptrace.h>
3680
3681
3682 #ifdef CONFIG_KPROBES
3683 @@ -64,6 +69,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
3684 }
3685 #endif
3686
3687 +#ifdef CONFIG_PAX_PAGEEXEC
3688 +/*
3689 + * PaX: decide what to do with offenders (regs->nip = fault address)
3690 + *
3691 + * returns 1 when task should be killed
3692 + */
3693 +static int pax_handle_fetch_fault(struct pt_regs *regs)
3694 +{
3695 + return 1;
3696 +}
3697 +
3698 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3699 +{
3700 + unsigned long i;
3701 +
3702 + printk(KERN_ERR "PAX: bytes at PC: ");
3703 + for (i = 0; i < 5; i++) {
3704 + unsigned int c;
3705 + if (get_user(c, (unsigned int __user *)pc+i))
3706 + printk(KERN_CONT "???????? ");
3707 + else
3708 + printk(KERN_CONT "%08x ", c);
3709 + }
3710 + printk("\n");
3711 +}
3712 +#endif
3713 +
3714 /*
3715 * Check whether the instruction at regs->nip is a store using
3716 * an update addressing form which will update r1.
3717 @@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
3718 * indicate errors in DSISR but can validly be set in SRR1.
3719 */
3720 if (trap == 0x400)
3721 - error_code &= 0x48200000;
3722 + error_code &= 0x58200000;
3723 else
3724 is_write = error_code & DSISR_ISSTORE;
3725 #else
3726 @@ -250,7 +282,7 @@ good_area:
3727 * "undefined". Of those that can be set, this is the only
3728 * one which seems bad.
3729 */
3730 - if (error_code & 0x10000000)
3731 + if (error_code & DSISR_GUARDED)
3732 /* Guarded storage error. */
3733 goto bad_area;
3734 #endif /* CONFIG_8xx */
3735 @@ -265,7 +297,7 @@ good_area:
3736 * processors use the same I/D cache coherency mechanism
3737 * as embedded.
3738 */
3739 - if (error_code & DSISR_PROTFAULT)
3740 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
3741 goto bad_area;
3742 #endif /* CONFIG_PPC_STD_MMU */
3743
3744 @@ -335,6 +367,23 @@ bad_area:
3745 bad_area_nosemaphore:
3746 /* User mode accesses cause a SIGSEGV */
3747 if (user_mode(regs)) {
3748 +
3749 +#ifdef CONFIG_PAX_PAGEEXEC
3750 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3751 +#ifdef CONFIG_PPC_STD_MMU
3752 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
3753 +#else
3754 + if (is_exec && regs->nip == address) {
3755 +#endif
3756 + switch (pax_handle_fetch_fault(regs)) {
3757 + }
3758 +
3759 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
3760 + do_group_exit(SIGKILL);
3761 + }
3762 + }
3763 +#endif
3764 +
3765 _exception(SIGSEGV, regs, code, address);
3766 return 0;
3767 }
3768 diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
3769 index 5973631..ad617af 100644
3770 --- a/arch/powerpc/mm/mem.c
3771 +++ b/arch/powerpc/mm/mem.c
3772 @@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(void)
3773 {
3774 unsigned long lmb_next_region_start_pfn,
3775 lmb_region_max_pfn;
3776 - int i;
3777 + unsigned int i;
3778
3779 for (i = 0; i < lmb.memory.cnt - 1; i++) {
3780 lmb_region_max_pfn =
3781 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
3782 index 0d957a4..26d968f 100644
3783 --- a/arch/powerpc/mm/mmap_64.c
3784 +++ b/arch/powerpc/mm/mmap_64.c
3785 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3786 */
3787 if (mmap_is_legacy()) {
3788 mm->mmap_base = TASK_UNMAPPED_BASE;
3789 +
3790 +#ifdef CONFIG_PAX_RANDMMAP
3791 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3792 + mm->mmap_base += mm->delta_mmap;
3793 +#endif
3794 +
3795 mm->get_unmapped_area = arch_get_unmapped_area;
3796 mm->unmap_area = arch_unmap_area;
3797 } else {
3798 mm->mmap_base = mmap_base();
3799 +
3800 +#ifdef CONFIG_PAX_RANDMMAP
3801 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3802 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3803 +#endif
3804 +
3805 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3806 mm->unmap_area = arch_unmap_area_topdown;
3807 }
3808 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
3809 index ba51948..23009d9 100644
3810 --- a/arch/powerpc/mm/slice.c
3811 +++ b/arch/powerpc/mm/slice.c
3812 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
3813 if ((mm->task_size - len) < addr)
3814 return 0;
3815 vma = find_vma(mm, addr);
3816 - return (!vma || (addr + len) <= vma->vm_start);
3817 + return check_heap_stack_gap(vma, addr, len);
3818 }
3819
3820 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3821 @@ -256,7 +256,7 @@ full_search:
3822 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3823 continue;
3824 }
3825 - if (!vma || addr + len <= vma->vm_start) {
3826 + if (check_heap_stack_gap(vma, addr, len)) {
3827 /*
3828 * Remember the place where we stopped the search:
3829 */
3830 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3831 }
3832 }
3833
3834 - addr = mm->mmap_base;
3835 - while (addr > len) {
3836 + if (mm->mmap_base < len)
3837 + addr = -ENOMEM;
3838 + else
3839 + addr = mm->mmap_base - len;
3840 +
3841 + while (!IS_ERR_VALUE(addr)) {
3842 /* Go down by chunk size */
3843 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3844 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
3845
3846 /* Check for hit with different page size */
3847 mask = slice_range_to_mask(addr, len);
3848 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3849 * return with success:
3850 */
3851 vma = find_vma(mm, addr);
3852 - if (!vma || (addr + len) <= vma->vm_start) {
3853 + if (check_heap_stack_gap(vma, addr, len)) {
3854 /* remember the address as a hint for next time */
3855 if (use_cache)
3856 mm->free_area_cache = addr;
3857 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3858 mm->cached_hole_size = vma->vm_start - addr;
3859
3860 /* try just below the current vma->vm_start */
3861 - addr = vma->vm_start;
3862 + addr = skip_heap_stack_gap(vma, len);
3863 }
3864
3865 /*
3866 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
3867 if (fixed && addr > (mm->task_size - len))
3868 return -EINVAL;
3869
3870 +#ifdef CONFIG_PAX_RANDMMAP
3871 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3872 + addr = 0;
3873 +#endif
3874 +
3875 /* If hint, make sure it matches our alignment restrictions */
3876 if (!fixed && addr) {
3877 addr = _ALIGN_UP(addr, 1ul << pshift);
3878 diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
3879 index b5c753d..8f01abe 100644
3880 --- a/arch/powerpc/platforms/52xx/lite5200_pm.c
3881 +++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
3882 @@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3883 lite5200_pm_target_state = PM_SUSPEND_ON;
3884 }
3885
3886 -static struct platform_suspend_ops lite5200_pm_ops = {
3887 +static const struct platform_suspend_ops lite5200_pm_ops = {
3888 .valid = lite5200_pm_valid,
3889 .begin = lite5200_pm_begin,
3890 .prepare = lite5200_pm_prepare,
3891 diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3892 index a55b0b6..478c18e 100644
3893 --- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3894 +++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3895 @@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3896 iounmap(mbar);
3897 }
3898
3899 -static struct platform_suspend_ops mpc52xx_pm_ops = {
3900 +static const struct platform_suspend_ops mpc52xx_pm_ops = {
3901 .valid = mpc52xx_pm_valid,
3902 .prepare = mpc52xx_pm_prepare,
3903 .enter = mpc52xx_pm_enter,
3904 diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
3905 index 08e65fc..643d3ac 100644
3906 --- a/arch/powerpc/platforms/83xx/suspend.c
3907 +++ b/arch/powerpc/platforms/83xx/suspend.c
3908 @@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3909 return ret;
3910 }
3911
3912 -static struct platform_suspend_ops mpc83xx_suspend_ops = {
3913 +static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3914 .valid = mpc83xx_suspend_valid,
3915 .begin = mpc83xx_suspend_begin,
3916 .enter = mpc83xx_suspend_enter,
3917 diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
3918 index ca5bfdf..1602e09 100644
3919 --- a/arch/powerpc/platforms/cell/iommu.c
3920 +++ b/arch/powerpc/platforms/cell/iommu.c
3921 @@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
3922
3923 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3924
3925 -struct dma_map_ops dma_iommu_fixed_ops = {
3926 +const struct dma_map_ops dma_iommu_fixed_ops = {
3927 .alloc_coherent = dma_fixed_alloc_coherent,
3928 .free_coherent = dma_fixed_free_coherent,
3929 .map_sg = dma_fixed_map_sg,
3930 diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
3931 index e34b305..20e48ec 100644
3932 --- a/arch/powerpc/platforms/ps3/system-bus.c
3933 +++ b/arch/powerpc/platforms/ps3/system-bus.c
3934 @@ -694,7 +694,7 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
3935 return mask >= DMA_BIT_MASK(32);
3936 }
3937
3938 -static struct dma_map_ops ps3_sb_dma_ops = {
3939 +static const struct dma_map_ops ps3_sb_dma_ops = {
3940 .alloc_coherent = ps3_alloc_coherent,
3941 .free_coherent = ps3_free_coherent,
3942 .map_sg = ps3_sb_map_sg,
3943 @@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops = {
3944 .unmap_page = ps3_unmap_page,
3945 };
3946
3947 -static struct dma_map_ops ps3_ioc0_dma_ops = {
3948 +static const struct dma_map_ops ps3_ioc0_dma_ops = {
3949 .alloc_coherent = ps3_alloc_coherent,
3950 .free_coherent = ps3_free_coherent,
3951 .map_sg = ps3_ioc0_map_sg,
3952 diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
3953 index f0e6f28..60d53ed 100644
3954 --- a/arch/powerpc/platforms/pseries/Kconfig
3955 +++ b/arch/powerpc/platforms/pseries/Kconfig
3956 @@ -2,6 +2,8 @@ config PPC_PSERIES
3957 depends on PPC64 && PPC_BOOK3S
3958 bool "IBM pSeries & new (POWER5-based) iSeries"
3959 select MPIC
3960 + select PCI_MSI
3961 + select XICS
3962 select PPC_I8259
3963 select PPC_RTAS
3964 select RTAS_ERROR_LOGGING
3965 diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
3966 index 43c0aca..42c045b 100644
3967 --- a/arch/s390/Kconfig
3968 +++ b/arch/s390/Kconfig
3969 @@ -194,28 +194,26 @@ config AUDIT_ARCH
3970
3971 config S390_SWITCH_AMODE
3972 bool "Switch kernel/user addressing modes"
3973 + default y
3974 help
3975 This option allows to switch the addressing modes of kernel and user
3976 - space. The kernel parameter switch_amode=on will enable this feature,
3977 - default is disabled. Enabling this (via kernel parameter) on machines
3978 - earlier than IBM System z9-109 EC/BC will reduce system performance.
3979 + space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3980 + will reduce system performance.
3981
3982 Note that this option will also be selected by selecting the execute
3983 - protection option below. Enabling the execute protection via the
3984 - noexec kernel parameter will also switch the addressing modes,
3985 - independent of the switch_amode kernel parameter.
3986 + protection option below. Enabling the execute protection will also
3987 + switch the addressing modes, independent of this option.
3988
3989
3990 config S390_EXEC_PROTECT
3991 bool "Data execute protection"
3992 + default y
3993 select S390_SWITCH_AMODE
3994 help
3995 This option allows to enable a buffer overflow protection for user
3996 space programs and it also selects the addressing mode option above.
3997 - The kernel parameter noexec=on will enable this feature and also
3998 - switch the addressing modes, default is disabled. Enabling this (via
3999 - kernel parameter) on machines earlier than IBM System z9-109 EC/BC
4000 - will reduce system performance.
4001 + Enabling this on machines earlier than IBM System z9-109 EC/BC will
4002 + reduce system performance.
4003
4004 comment "Code generation options"
4005
4006 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4007 index ae7c8f9..3f01a0c 100644
4008 --- a/arch/s390/include/asm/atomic.h
4009 +++ b/arch/s390/include/asm/atomic.h
4010 @@ -362,6 +362,16 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
4011 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4012 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4013
4014 +#define atomic64_read_unchecked(v) atomic64_read(v)
4015 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4016 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4017 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4018 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4019 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4020 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4021 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4022 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4023 +
4024 #define smp_mb__before_atomic_dec() smp_mb()
4025 #define smp_mb__after_atomic_dec() smp_mb()
4026 #define smp_mb__before_atomic_inc() smp_mb()
4027 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4028 index e885442..e3a2817 100644
4029 --- a/arch/s390/include/asm/elf.h
4030 +++ b/arch/s390/include/asm/elf.h
4031 @@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
4032 that it will "exec", and that there is sufficient room for the brk. */
4033 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4034
4035 +#ifdef CONFIG_PAX_ASLR
4036 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4037 +
4038 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4039 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4040 +#endif
4041 +
4042 /* This yields a mask that user programs can use to figure out what
4043 instruction set this CPU supports. */
4044
4045 diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
4046 index e37478e..9ce0e9f 100644
4047 --- a/arch/s390/include/asm/setup.h
4048 +++ b/arch/s390/include/asm/setup.h
4049 @@ -50,13 +50,13 @@ extern unsigned long memory_end;
4050 void detect_memory_layout(struct mem_chunk chunk[]);
4051
4052 #ifdef CONFIG_S390_SWITCH_AMODE
4053 -extern unsigned int switch_amode;
4054 +#define switch_amode (1)
4055 #else
4056 #define switch_amode (0)
4057 #endif
4058
4059 #ifdef CONFIG_S390_EXEC_PROTECT
4060 -extern unsigned int s390_noexec;
4061 +#define s390_noexec (1)
4062 #else
4063 #define s390_noexec (0)
4064 #endif
4065 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4066 index 8377e91..e28e6f1 100644
4067 --- a/arch/s390/include/asm/uaccess.h
4068 +++ b/arch/s390/include/asm/uaccess.h
4069 @@ -232,6 +232,10 @@ static inline unsigned long __must_check
4070 copy_to_user(void __user *to, const void *from, unsigned long n)
4071 {
4072 might_fault();
4073 +
4074 + if ((long)n < 0)
4075 + return n;
4076 +
4077 if (access_ok(VERIFY_WRITE, to, n))
4078 n = __copy_to_user(to, from, n);
4079 return n;
4080 @@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4081 static inline unsigned long __must_check
4082 __copy_from_user(void *to, const void __user *from, unsigned long n)
4083 {
4084 + if ((long)n < 0)
4085 + return n;
4086 +
4087 if (__builtin_constant_p(n) && (n <= 256))
4088 return uaccess.copy_from_user_small(n, from, to);
4089 else
4090 @@ -283,6 +290,10 @@ static inline unsigned long __must_check
4091 copy_from_user(void *to, const void __user *from, unsigned long n)
4092 {
4093 might_fault();
4094 +
4095 + if ((long)n < 0)
4096 + return n;
4097 +
4098 if (access_ok(VERIFY_READ, from, n))
4099 n = __copy_from_user(to, from, n);
4100 else
4101 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4102 index 639380a..72e3c02 100644
4103 --- a/arch/s390/kernel/module.c
4104 +++ b/arch/s390/kernel/module.c
4105 @@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4106
4107 /* Increase core size by size of got & plt and set start
4108 offsets for got and plt. */
4109 - me->core_size = ALIGN(me->core_size, 4);
4110 - me->arch.got_offset = me->core_size;
4111 - me->core_size += me->arch.got_size;
4112 - me->arch.plt_offset = me->core_size;
4113 - me->core_size += me->arch.plt_size;
4114 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
4115 + me->arch.got_offset = me->core_size_rw;
4116 + me->core_size_rw += me->arch.got_size;
4117 + me->arch.plt_offset = me->core_size_rx;
4118 + me->core_size_rx += me->arch.plt_size;
4119 return 0;
4120 }
4121
4122 @@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4123 if (info->got_initialized == 0) {
4124 Elf_Addr *gotent;
4125
4126 - gotent = me->module_core + me->arch.got_offset +
4127 + gotent = me->module_core_rw + me->arch.got_offset +
4128 info->got_offset;
4129 *gotent = val;
4130 info->got_initialized = 1;
4131 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4132 else if (r_type == R_390_GOTENT ||
4133 r_type == R_390_GOTPLTENT)
4134 *(unsigned int *) loc =
4135 - (val + (Elf_Addr) me->module_core - loc) >> 1;
4136 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4137 else if (r_type == R_390_GOT64 ||
4138 r_type == R_390_GOTPLT64)
4139 *(unsigned long *) loc = val;
4140 @@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4141 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4142 if (info->plt_initialized == 0) {
4143 unsigned int *ip;
4144 - ip = me->module_core + me->arch.plt_offset +
4145 + ip = me->module_core_rx + me->arch.plt_offset +
4146 info->plt_offset;
4147 #ifndef CONFIG_64BIT
4148 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4149 @@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4150 val - loc + 0xffffUL < 0x1ffffeUL) ||
4151 (r_type == R_390_PLT32DBL &&
4152 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4153 - val = (Elf_Addr) me->module_core +
4154 + val = (Elf_Addr) me->module_core_rx +
4155 me->arch.plt_offset +
4156 info->plt_offset;
4157 val += rela->r_addend - loc;
4158 @@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4159 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4160 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4161 val = val + rela->r_addend -
4162 - ((Elf_Addr) me->module_core + me->arch.got_offset);
4163 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4164 if (r_type == R_390_GOTOFF16)
4165 *(unsigned short *) loc = val;
4166 else if (r_type == R_390_GOTOFF32)
4167 @@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4168 break;
4169 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4170 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4171 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
4172 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4173 rela->r_addend - loc;
4174 if (r_type == R_390_GOTPC)
4175 *(unsigned int *) loc = val;
4176 diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
4177 index 061479f..dbfb08c 100644
4178 --- a/arch/s390/kernel/setup.c
4179 +++ b/arch/s390/kernel/setup.c
4180 @@ -306,9 +306,6 @@ static int __init early_parse_mem(char *p)
4181 early_param("mem", early_parse_mem);
4182
4183 #ifdef CONFIG_S390_SWITCH_AMODE
4184 -unsigned int switch_amode = 0;
4185 -EXPORT_SYMBOL_GPL(switch_amode);
4186 -
4187 static int set_amode_and_uaccess(unsigned long user_amode,
4188 unsigned long user32_amode)
4189 {
4190 @@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigned long user_amode,
4191 return 0;
4192 }
4193 }
4194 -
4195 -/*
4196 - * Switch kernel/user addressing modes?
4197 - */
4198 -static int __init early_parse_switch_amode(char *p)
4199 -{
4200 - switch_amode = 1;
4201 - return 0;
4202 -}
4203 -early_param("switch_amode", early_parse_switch_amode);
4204 -
4205 #else /* CONFIG_S390_SWITCH_AMODE */
4206 static inline int set_amode_and_uaccess(unsigned long user_amode,
4207 unsigned long user32_amode)
4208 @@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(unsigned long user_amode,
4209 }
4210 #endif /* CONFIG_S390_SWITCH_AMODE */
4211
4212 -#ifdef CONFIG_S390_EXEC_PROTECT
4213 -unsigned int s390_noexec = 0;
4214 -EXPORT_SYMBOL_GPL(s390_noexec);
4215 -
4216 -/*
4217 - * Enable execute protection?
4218 - */
4219 -static int __init early_parse_noexec(char *p)
4220 -{
4221 - if (!strncmp(p, "off", 3))
4222 - return 0;
4223 - switch_amode = 1;
4224 - s390_noexec = 1;
4225 - return 0;
4226 -}
4227 -early_param("noexec", early_parse_noexec);
4228 -#endif /* CONFIG_S390_EXEC_PROTECT */
4229 -
4230 static void setup_addressing_mode(void)
4231 {
4232 if (s390_noexec) {
4233 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4234 index f4558cc..e461f37 100644
4235 --- a/arch/s390/mm/mmap.c
4236 +++ b/arch/s390/mm/mmap.c
4237 @@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4238 */
4239 if (mmap_is_legacy()) {
4240 mm->mmap_base = TASK_UNMAPPED_BASE;
4241 +
4242 +#ifdef CONFIG_PAX_RANDMMAP
4243 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4244 + mm->mmap_base += mm->delta_mmap;
4245 +#endif
4246 +
4247 mm->get_unmapped_area = arch_get_unmapped_area;
4248 mm->unmap_area = arch_unmap_area;
4249 } else {
4250 mm->mmap_base = mmap_base();
4251 +
4252 +#ifdef CONFIG_PAX_RANDMMAP
4253 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4254 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4255 +#endif
4256 +
4257 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4258 mm->unmap_area = arch_unmap_area_topdown;
4259 }
4260 @@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4261 */
4262 if (mmap_is_legacy()) {
4263 mm->mmap_base = TASK_UNMAPPED_BASE;
4264 +
4265 +#ifdef CONFIG_PAX_RANDMMAP
4266 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4267 + mm->mmap_base += mm->delta_mmap;
4268 +#endif
4269 +
4270 mm->get_unmapped_area = s390_get_unmapped_area;
4271 mm->unmap_area = arch_unmap_area;
4272 } else {
4273 mm->mmap_base = mmap_base();
4274 +
4275 +#ifdef CONFIG_PAX_RANDMMAP
4276 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4277 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4278 +#endif
4279 +
4280 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4281 mm->unmap_area = arch_unmap_area_topdown;
4282 }
4283 diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
4284 index 589d5c7..669e274 100644
4285 --- a/arch/score/include/asm/system.h
4286 +++ b/arch/score/include/asm/system.h
4287 @@ -17,7 +17,7 @@ do { \
4288 #define finish_arch_switch(prev) do {} while (0)
4289
4290 typedef void (*vi_handler_t)(void);
4291 -extern unsigned long arch_align_stack(unsigned long sp);
4292 +#define arch_align_stack(x) (x)
4293
4294 #define mb() barrier()
4295 #define rmb() barrier()
4296 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4297 index 25d0803..d6c8e36 100644
4298 --- a/arch/score/kernel/process.c
4299 +++ b/arch/score/kernel/process.c
4300 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
4301
4302 return task_pt_regs(task)->cp0_epc;
4303 }
4304 -
4305 -unsigned long arch_align_stack(unsigned long sp)
4306 -{
4307 - return sp;
4308 -}
4309 diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
4310 index d936c1a..304a252 100644
4311 --- a/arch/sh/boards/mach-hp6xx/pm.c
4312 +++ b/arch/sh/boards/mach-hp6xx/pm.c
4313 @@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_t state)
4314 return 0;
4315 }
4316
4317 -static struct platform_suspend_ops hp6x0_pm_ops = {
4318 +static const struct platform_suspend_ops hp6x0_pm_ops = {
4319 .enter = hp6x0_pm_enter,
4320 .valid = suspend_valid_only_mem,
4321 };
4322 diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
4323 index 8a8a993..7b3079b 100644
4324 --- a/arch/sh/kernel/cpu/sh4/sq.c
4325 +++ b/arch/sh/kernel/cpu/sh4/sq.c
4326 @@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[] = {
4327 NULL,
4328 };
4329
4330 -static struct sysfs_ops sq_sysfs_ops = {
4331 +static const struct sysfs_ops sq_sysfs_ops = {
4332 .show = sq_sysfs_show,
4333 .store = sq_sysfs_store,
4334 };
4335 diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
4336 index ee3c2aa..c49cee6 100644
4337 --- a/arch/sh/kernel/cpu/shmobile/pm.c
4338 +++ b/arch/sh/kernel/cpu/shmobile/pm.c
4339 @@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t state)
4340 return 0;
4341 }
4342
4343 -static struct platform_suspend_ops sh_pm_ops = {
4344 +static const struct platform_suspend_ops sh_pm_ops = {
4345 .enter = sh_pm_enter,
4346 .valid = suspend_valid_only_mem,
4347 };
4348 diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
4349 index 3e532d0..9faa306 100644
4350 --- a/arch/sh/kernel/kgdb.c
4351 +++ b/arch/sh/kernel/kgdb.c
4352 @@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
4353 {
4354 }
4355
4356 -struct kgdb_arch arch_kgdb_ops = {
4357 +const struct kgdb_arch arch_kgdb_ops = {
4358 /* Breakpoint instruction: trapa #0x3c */
4359 #ifdef CONFIG_CPU_LITTLE_ENDIAN
4360 .gdb_bpt_instr = { 0x3c, 0xc3 },
4361 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4362 index afeb710..d1d1289 100644
4363 --- a/arch/sh/mm/mmap.c
4364 +++ b/arch/sh/mm/mmap.c
4365 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4366 addr = PAGE_ALIGN(addr);
4367
4368 vma = find_vma(mm, addr);
4369 - if (TASK_SIZE - len >= addr &&
4370 - (!vma || addr + len <= vma->vm_start))
4371 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4372 return addr;
4373 }
4374
4375 @@ -106,7 +105,7 @@ full_search:
4376 }
4377 return -ENOMEM;
4378 }
4379 - if (likely(!vma || addr + len <= vma->vm_start)) {
4380 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4381 /*
4382 * Remember the place where we stopped the search:
4383 */
4384 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4385 addr = PAGE_ALIGN(addr);
4386
4387 vma = find_vma(mm, addr);
4388 - if (TASK_SIZE - len >= addr &&
4389 - (!vma || addr + len <= vma->vm_start))
4390 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4391 return addr;
4392 }
4393
4394 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4395 /* make sure it can fit in the remaining address space */
4396 if (likely(addr > len)) {
4397 vma = find_vma(mm, addr-len);
4398 - if (!vma || addr <= vma->vm_start) {
4399 + if (check_heap_stack_gap(vma, addr - len, len)) {
4400 /* remember the address as a hint for next time */
4401 return (mm->free_area_cache = addr-len);
4402 }
4403 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4404 if (unlikely(mm->mmap_base < len))
4405 goto bottomup;
4406
4407 - addr = mm->mmap_base-len;
4408 - if (do_colour_align)
4409 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4410 + addr = mm->mmap_base - len;
4411
4412 do {
4413 + if (do_colour_align)
4414 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4415 /*
4416 * Lookup failure means no vma is above this address,
4417 * else if new region fits below vma->vm_start,
4418 * return with success:
4419 */
4420 vma = find_vma(mm, addr);
4421 - if (likely(!vma || addr+len <= vma->vm_start)) {
4422 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4423 /* remember the address as a hint for next time */
4424 return (mm->free_area_cache = addr);
4425 }
4426 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4427 mm->cached_hole_size = vma->vm_start - addr;
4428
4429 /* try just below the current vma->vm_start */
4430 - addr = vma->vm_start-len;
4431 - if (do_colour_align)
4432 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4433 - } while (likely(len < vma->vm_start));
4434 + addr = skip_heap_stack_gap(vma, len);
4435 + } while (!IS_ERR_VALUE(addr));
4436
4437 bottomup:
4438 /*
4439 diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
4440 index 05ef538..dc9c857 100644
4441 --- a/arch/sparc/Kconfig
4442 +++ b/arch/sparc/Kconfig
4443 @@ -32,6 +32,7 @@ config SPARC
4444
4445 config SPARC32
4446 def_bool !64BIT
4447 + select GENERIC_ATOMIC64
4448
4449 config SPARC64
4450 def_bool 64BIT
4451 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
4452 index 113225b..7fd04e7 100644
4453 --- a/arch/sparc/Makefile
4454 +++ b/arch/sparc/Makefile
4455 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
4456 # Export what is needed by arch/sparc/boot/Makefile
4457 export VMLINUX_INIT VMLINUX_MAIN
4458 VMLINUX_INIT := $(head-y) $(init-y)
4459 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4460 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4461 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4462 VMLINUX_MAIN += $(drivers-y) $(net-y)
4463
4464 diff --git a/arch/sparc/include/asm/atomic.h b/arch/sparc/include/asm/atomic.h
4465 index 8ff83d8..4a459c2 100644
4466 --- a/arch/sparc/include/asm/atomic.h
4467 +++ b/arch/sparc/include/asm/atomic.h
4468 @@ -4,5 +4,6 @@
4469 #include <asm/atomic_64.h>
4470 #else
4471 #include <asm/atomic_32.h>
4472 +#include <asm-generic/atomic64.h>
4473 #endif
4474 #endif
4475 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
4476 index f5cc06f..f858d47 100644
4477 --- a/arch/sparc/include/asm/atomic_64.h
4478 +++ b/arch/sparc/include/asm/atomic_64.h
4479 @@ -14,18 +14,40 @@
4480 #define ATOMIC64_INIT(i) { (i) }
4481
4482 #define atomic_read(v) ((v)->counter)
4483 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
4484 +{
4485 + return v->counter;
4486 +}
4487 #define atomic64_read(v) ((v)->counter)
4488 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
4489 +{
4490 + return v->counter;
4491 +}
4492
4493 #define atomic_set(v, i) (((v)->counter) = i)
4494 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
4495 +{
4496 + v->counter = i;
4497 +}
4498 #define atomic64_set(v, i) (((v)->counter) = i)
4499 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
4500 +{
4501 + v->counter = i;
4502 +}
4503
4504 extern void atomic_add(int, atomic_t *);
4505 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
4506 extern void atomic64_add(long, atomic64_t *);
4507 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
4508 extern void atomic_sub(int, atomic_t *);
4509 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
4510 extern void atomic64_sub(long, atomic64_t *);
4511 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
4512
4513 extern int atomic_add_ret(int, atomic_t *);
4514 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
4515 extern long atomic64_add_ret(long, atomic64_t *);
4516 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
4517 extern int atomic_sub_ret(int, atomic_t *);
4518 extern long atomic64_sub_ret(long, atomic64_t *);
4519
4520 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4521 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
4522
4523 #define atomic_inc_return(v) atomic_add_ret(1, v)
4524 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
4525 +{
4526 + return atomic_add_ret_unchecked(1, v);
4527 +}
4528 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
4529 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
4530 +{
4531 + return atomic64_add_ret_unchecked(1, v);
4532 +}
4533
4534 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
4535 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
4536
4537 #define atomic_add_return(i, v) atomic_add_ret(i, v)
4538 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
4539 +{
4540 + return atomic_add_ret_unchecked(i, v);
4541 +}
4542 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
4543 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
4544 +{
4545 + return atomic64_add_ret_unchecked(i, v);
4546 +}
4547
4548 /*
4549 * atomic_inc_and_test - increment and test
4550 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4551 * other cases.
4552 */
4553 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
4554 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
4555 +{
4556 + return atomic_inc_return_unchecked(v) == 0;
4557 +}
4558 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
4559
4560 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
4561 @@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4562 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
4563
4564 #define atomic_inc(v) atomic_add(1, v)
4565 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
4566 +{
4567 + atomic_add_unchecked(1, v);
4568 +}
4569 #define atomic64_inc(v) atomic64_add(1, v)
4570 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
4571 +{
4572 + atomic64_add_unchecked(1, v);
4573 +}
4574
4575 #define atomic_dec(v) atomic_sub(1, v)
4576 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
4577 +{
4578 + atomic_sub_unchecked(1, v);
4579 +}
4580 #define atomic64_dec(v) atomic64_sub(1, v)
4581 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
4582 +{
4583 + atomic64_sub_unchecked(1, v);
4584 +}
4585
4586 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
4587 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
4588
4589 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
4590 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
4591 +{
4592 + return cmpxchg(&v->counter, old, new);
4593 +}
4594 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4595 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
4596 +{
4597 + return xchg(&v->counter, new);
4598 +}
4599
4600 static inline int atomic_add_unless(atomic_t *v, int a, int u)
4601 {
4602 - int c, old;
4603 + int c, old, new;
4604 c = atomic_read(v);
4605 for (;;) {
4606 - if (unlikely(c == (u)))
4607 + if (unlikely(c == u))
4608 break;
4609 - old = atomic_cmpxchg((v), c, c + (a));
4610 +
4611 + asm volatile("addcc %2, %0, %0\n"
4612 +
4613 +#ifdef CONFIG_PAX_REFCOUNT
4614 + "tvs %%icc, 6\n"
4615 +#endif
4616 +
4617 + : "=r" (new)
4618 + : "0" (c), "ir" (a)
4619 + : "cc");
4620 +
4621 + old = atomic_cmpxchg(v, c, new);
4622 if (likely(old == c))
4623 break;
4624 c = old;
4625 }
4626 - return c != (u);
4627 + return c != u;
4628 }
4629
4630 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
4631 @@ -90,20 +167,35 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
4632 #define atomic64_cmpxchg(v, o, n) \
4633 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
4634 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
4635 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
4636 +{
4637 + return xchg(&v->counter, new);
4638 +}
4639
4640 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
4641 {
4642 - long c, old;
4643 + long c, old, new;
4644 c = atomic64_read(v);
4645 for (;;) {
4646 - if (unlikely(c == (u)))
4647 + if (unlikely(c == u))
4648 break;
4649 - old = atomic64_cmpxchg((v), c, c + (a));
4650 +
4651 + asm volatile("addcc %2, %0, %0\n"
4652 +
4653 +#ifdef CONFIG_PAX_REFCOUNT
4654 + "tvs %%xcc, 6\n"
4655 +#endif
4656 +
4657 + : "=r" (new)
4658 + : "0" (c), "ir" (a)
4659 + : "cc");
4660 +
4661 + old = atomic64_cmpxchg(v, c, new);
4662 if (likely(old == c))
4663 break;
4664 c = old;
4665 }
4666 - return c != (u);
4667 + return c != u;
4668 }
4669
4670 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4671 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
4672 index 41f85ae..fb54d5e 100644
4673 --- a/arch/sparc/include/asm/cache.h
4674 +++ b/arch/sparc/include/asm/cache.h
4675 @@ -8,7 +8,7 @@
4676 #define _SPARC_CACHE_H
4677
4678 #define L1_CACHE_SHIFT 5
4679 -#define L1_CACHE_BYTES 32
4680 +#define L1_CACHE_BYTES 32UL
4681 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
4682
4683 #ifdef CONFIG_SPARC32
4684 diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
4685 index 5a8c308..38def92 100644
4686 --- a/arch/sparc/include/asm/dma-mapping.h
4687 +++ b/arch/sparc/include/asm/dma-mapping.h
4688 @@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
4689 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
4690 #define dma_is_consistent(d, h) (1)
4691
4692 -extern struct dma_map_ops *dma_ops, pci32_dma_ops;
4693 +extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
4694 extern struct bus_type pci_bus_type;
4695
4696 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4697 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
4698 {
4699 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
4700 if (dev->bus == &pci_bus_type)
4701 @@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4702 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4703 dma_addr_t *dma_handle, gfp_t flag)
4704 {
4705 - struct dma_map_ops *ops = get_dma_ops(dev);
4706 + const struct dma_map_ops *ops = get_dma_ops(dev);
4707 void *cpu_addr;
4708
4709 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
4710 @@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4711 static inline void dma_free_coherent(struct device *dev, size_t size,
4712 void *cpu_addr, dma_addr_t dma_handle)
4713 {
4714 - struct dma_map_ops *ops = get_dma_ops(dev);
4715 + const struct dma_map_ops *ops = get_dma_ops(dev);
4716
4717 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
4718 ops->free_coherent(dev, size, cpu_addr, dma_handle);
4719 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
4720 index 381a1b5..b97e3ff 100644
4721 --- a/arch/sparc/include/asm/elf_32.h
4722 +++ b/arch/sparc/include/asm/elf_32.h
4723 @@ -116,6 +116,13 @@ typedef struct {
4724
4725 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
4726
4727 +#ifdef CONFIG_PAX_ASLR
4728 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
4729 +
4730 +#define PAX_DELTA_MMAP_LEN 16
4731 +#define PAX_DELTA_STACK_LEN 16
4732 +#endif
4733 +
4734 /* This yields a mask that user programs can use to figure out what
4735 instruction set this cpu supports. This can NOT be done in userspace
4736 on Sparc. */
4737 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
4738 index 9968085..c2106ef 100644
4739 --- a/arch/sparc/include/asm/elf_64.h
4740 +++ b/arch/sparc/include/asm/elf_64.h
4741 @@ -163,6 +163,12 @@ typedef struct {
4742 #define ELF_ET_DYN_BASE 0x0000010000000000UL
4743 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
4744
4745 +#ifdef CONFIG_PAX_ASLR
4746 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
4747 +
4748 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
4749 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
4750 +#endif
4751
4752 /* This yields a mask that user programs can use to figure out what
4753 instruction set this cpu supports. */
4754 diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
4755 index 156707b..aefa786 100644
4756 --- a/arch/sparc/include/asm/page_32.h
4757 +++ b/arch/sparc/include/asm/page_32.h
4758 @@ -8,6 +8,8 @@
4759 #ifndef _SPARC_PAGE_H
4760 #define _SPARC_PAGE_H
4761
4762 +#include <linux/const.h>
4763 +
4764 #define PAGE_SHIFT 12
4765
4766 #ifndef __ASSEMBLY__
4767 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
4768 index e0cabe7..efd60f1 100644
4769 --- a/arch/sparc/include/asm/pgtable_32.h
4770 +++ b/arch/sparc/include/asm/pgtable_32.h
4771 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
4772 BTFIXUPDEF_INT(page_none)
4773 BTFIXUPDEF_INT(page_copy)
4774 BTFIXUPDEF_INT(page_readonly)
4775 +
4776 +#ifdef CONFIG_PAX_PAGEEXEC
4777 +BTFIXUPDEF_INT(page_shared_noexec)
4778 +BTFIXUPDEF_INT(page_copy_noexec)
4779 +BTFIXUPDEF_INT(page_readonly_noexec)
4780 +#endif
4781 +
4782 BTFIXUPDEF_INT(page_kernel)
4783
4784 #define PMD_SHIFT SUN4C_PMD_SHIFT
4785 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
4786 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4787 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4788
4789 +#ifdef CONFIG_PAX_PAGEEXEC
4790 +extern pgprot_t PAGE_SHARED_NOEXEC;
4791 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4792 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4793 +#else
4794 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
4795 +# define PAGE_COPY_NOEXEC PAGE_COPY
4796 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
4797 +#endif
4798 +
4799 extern unsigned long page_kernel;
4800
4801 #ifdef MODULE
4802 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
4803 index 1407c07..7e10231 100644
4804 --- a/arch/sparc/include/asm/pgtsrmmu.h
4805 +++ b/arch/sparc/include/asm/pgtsrmmu.h
4806 @@ -115,6 +115,13 @@
4807 SRMMU_EXEC | SRMMU_REF)
4808 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4809 SRMMU_EXEC | SRMMU_REF)
4810 +
4811 +#ifdef CONFIG_PAX_PAGEEXEC
4812 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4813 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4814 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4815 +#endif
4816 +
4817 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4818 SRMMU_DIRTY | SRMMU_REF)
4819
4820 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
4821 index 43e5147..47622a1 100644
4822 --- a/arch/sparc/include/asm/spinlock_64.h
4823 +++ b/arch/sparc/include/asm/spinlock_64.h
4824 @@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
4825
4826 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4827
4828 -static void inline arch_read_lock(raw_rwlock_t *lock)
4829 +static inline void arch_read_lock(raw_rwlock_t *lock)
4830 {
4831 unsigned long tmp1, tmp2;
4832
4833 __asm__ __volatile__ (
4834 "1: ldsw [%2], %0\n"
4835 " brlz,pn %0, 2f\n"
4836 -"4: add %0, 1, %1\n"
4837 +"4: addcc %0, 1, %1\n"
4838 +
4839 +#ifdef CONFIG_PAX_REFCOUNT
4840 +" tvs %%icc, 6\n"
4841 +#endif
4842 +
4843 " cas [%2], %0, %1\n"
4844 " cmp %0, %1\n"
4845 " bne,pn %%icc, 1b\n"
4846 @@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
4847 " .previous"
4848 : "=&r" (tmp1), "=&r" (tmp2)
4849 : "r" (lock)
4850 - : "memory");
4851 + : "memory", "cc");
4852 }
4853
4854 -static int inline arch_read_trylock(raw_rwlock_t *lock)
4855 +static inline int arch_read_trylock(raw_rwlock_t *lock)
4856 {
4857 int tmp1, tmp2;
4858
4859 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4860 "1: ldsw [%2], %0\n"
4861 " brlz,a,pn %0, 2f\n"
4862 " mov 0, %0\n"
4863 -" add %0, 1, %1\n"
4864 +" addcc %0, 1, %1\n"
4865 +
4866 +#ifdef CONFIG_PAX_REFCOUNT
4867 +" tvs %%icc, 6\n"
4868 +#endif
4869 +
4870 " cas [%2], %0, %1\n"
4871 " cmp %0, %1\n"
4872 " bne,pn %%icc, 1b\n"
4873 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4874 return tmp1;
4875 }
4876
4877 -static void inline arch_read_unlock(raw_rwlock_t *lock)
4878 +static inline void arch_read_unlock(raw_rwlock_t *lock)
4879 {
4880 unsigned long tmp1, tmp2;
4881
4882 __asm__ __volatile__(
4883 "1: lduw [%2], %0\n"
4884 -" sub %0, 1, %1\n"
4885 +" subcc %0, 1, %1\n"
4886 +
4887 +#ifdef CONFIG_PAX_REFCOUNT
4888 +" tvs %%icc, 6\n"
4889 +#endif
4890 +
4891 " cas [%2], %0, %1\n"
4892 " cmp %0, %1\n"
4893 " bne,pn %%xcc, 1b\n"
4894 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
4895 : "memory");
4896 }
4897
4898 -static void inline arch_write_lock(raw_rwlock_t *lock)
4899 +static inline void arch_write_lock(raw_rwlock_t *lock)
4900 {
4901 unsigned long mask, tmp1, tmp2;
4902
4903 @@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
4904 : "memory");
4905 }
4906
4907 -static void inline arch_write_unlock(raw_rwlock_t *lock)
4908 +static inline void arch_write_unlock(raw_rwlock_t *lock)
4909 {
4910 __asm__ __volatile__(
4911 " stw %%g0, [%0]"
4912 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
4913 : "memory");
4914 }
4915
4916 -static int inline arch_write_trylock(raw_rwlock_t *lock)
4917 +static inline int arch_write_trylock(raw_rwlock_t *lock)
4918 {
4919 unsigned long mask, tmp1, tmp2, result;
4920
4921 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
4922 index 844d73a..f787fb9 100644
4923 --- a/arch/sparc/include/asm/thread_info_32.h
4924 +++ b/arch/sparc/include/asm/thread_info_32.h
4925 @@ -50,6 +50,8 @@ struct thread_info {
4926 unsigned long w_saved;
4927
4928 struct restart_block restart_block;
4929 +
4930 + unsigned long lowest_stack;
4931 };
4932
4933 /*
4934 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
4935 index f78ad9a..9f55fc7 100644
4936 --- a/arch/sparc/include/asm/thread_info_64.h
4937 +++ b/arch/sparc/include/asm/thread_info_64.h
4938 @@ -68,6 +68,8 @@ struct thread_info {
4939 struct pt_regs *kern_una_regs;
4940 unsigned int kern_una_insn;
4941
4942 + unsigned long lowest_stack;
4943 +
4944 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4945 };
4946
4947 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
4948 index e88fbe5..96b0ce5 100644
4949 --- a/arch/sparc/include/asm/uaccess.h
4950 +++ b/arch/sparc/include/asm/uaccess.h
4951 @@ -1,5 +1,13 @@
4952 #ifndef ___ASM_SPARC_UACCESS_H
4953 #define ___ASM_SPARC_UACCESS_H
4954 +
4955 +#ifdef __KERNEL__
4956 +#ifndef __ASSEMBLY__
4957 +#include <linux/types.h>
4958 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
4959 +#endif
4960 +#endif
4961 +
4962 #if defined(__sparc__) && defined(__arch64__)
4963 #include <asm/uaccess_64.h>
4964 #else
4965 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
4966 index 8303ac4..07f333d 100644
4967 --- a/arch/sparc/include/asm/uaccess_32.h
4968 +++ b/arch/sparc/include/asm/uaccess_32.h
4969 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
4970
4971 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4972 {
4973 - if (n && __access_ok((unsigned long) to, n))
4974 + if ((long)n < 0)
4975 + return n;
4976 +
4977 + if (n && __access_ok((unsigned long) to, n)) {
4978 + if (!__builtin_constant_p(n))
4979 + check_object_size(from, n, true);
4980 return __copy_user(to, (__force void __user *) from, n);
4981 - else
4982 + } else
4983 return n;
4984 }
4985
4986 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4987 {
4988 + if ((long)n < 0)
4989 + return n;
4990 +
4991 + if (!__builtin_constant_p(n))
4992 + check_object_size(from, n, true);
4993 +
4994 return __copy_user(to, (__force void __user *) from, n);
4995 }
4996
4997 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4998 {
4999 - if (n && __access_ok((unsigned long) from, n))
5000 + if ((long)n < 0)
5001 + return n;
5002 +
5003 + if (n && __access_ok((unsigned long) from, n)) {
5004 + if (!__builtin_constant_p(n))
5005 + check_object_size(to, n, false);
5006 return __copy_user((__force void __user *) to, from, n);
5007 - else
5008 + } else
5009 return n;
5010 }
5011
5012 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5013 {
5014 + if ((long)n < 0)
5015 + return n;
5016 +
5017 return __copy_user((__force void __user *) to, from, n);
5018 }
5019
5020 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5021 index 9ea271e..7b8a271 100644
5022 --- a/arch/sparc/include/asm/uaccess_64.h
5023 +++ b/arch/sparc/include/asm/uaccess_64.h
5024 @@ -9,6 +9,7 @@
5025 #include <linux/compiler.h>
5026 #include <linux/string.h>
5027 #include <linux/thread_info.h>
5028 +#include <linux/kernel.h>
5029 #include <asm/asi.h>
5030 #include <asm/system.h>
5031 #include <asm/spitfire.h>
5032 @@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5033 static inline unsigned long __must_check
5034 copy_from_user(void *to, const void __user *from, unsigned long size)
5035 {
5036 - unsigned long ret = ___copy_from_user(to, from, size);
5037 + unsigned long ret;
5038
5039 + if ((long)size < 0 || size > INT_MAX)
5040 + return size;
5041 +
5042 + if (!__builtin_constant_p(size))
5043 + check_object_size(to, size, false);
5044 +
5045 + ret = ___copy_from_user(to, from, size);
5046 if (unlikely(ret))
5047 ret = copy_from_user_fixup(to, from, size);
5048 return ret;
5049 @@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5050 static inline unsigned long __must_check
5051 copy_to_user(void __user *to, const void *from, unsigned long size)
5052 {
5053 - unsigned long ret = ___copy_to_user(to, from, size);
5054 + unsigned long ret;
5055
5056 + if ((long)size < 0 || size > INT_MAX)
5057 + return size;
5058 +
5059 + if (!__builtin_constant_p(size))
5060 + check_object_size(from, size, true);
5061 +
5062 + ret = ___copy_to_user(to, from, size);
5063 if (unlikely(ret))
5064 ret = copy_to_user_fixup(to, from, size);
5065 return ret;
5066 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5067 index 2782681..77ded84 100644
5068 --- a/arch/sparc/kernel/Makefile
5069 +++ b/arch/sparc/kernel/Makefile
5070 @@ -3,7 +3,7 @@
5071 #
5072
5073 asflags-y := -ansi
5074 -ccflags-y := -Werror
5075 +#ccflags-y := -Werror
5076
5077 extra-y := head_$(BITS).o
5078 extra-y += init_task.o
5079 diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
5080 index 7690cc2..ece64c9 100644
5081 --- a/arch/sparc/kernel/iommu.c
5082 +++ b/arch/sparc/kernel/iommu.c
5083 @@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
5084 spin_unlock_irqrestore(&iommu->lock, flags);
5085 }
5086
5087 -static struct dma_map_ops sun4u_dma_ops = {
5088 +static const struct dma_map_ops sun4u_dma_ops = {
5089 .alloc_coherent = dma_4u_alloc_coherent,
5090 .free_coherent = dma_4u_free_coherent,
5091 .map_page = dma_4u_map_page,
5092 @@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops = {
5093 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
5094 };
5095
5096 -struct dma_map_ops *dma_ops = &sun4u_dma_ops;
5097 +const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
5098 EXPORT_SYMBOL(dma_ops);
5099
5100 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
5101 diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
5102 index 9f61fd8..bd048db 100644
5103 --- a/arch/sparc/kernel/ioport.c
5104 +++ b/arch/sparc/kernel/ioport.c
5105 @@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
5106 BUG();
5107 }
5108
5109 -struct dma_map_ops sbus_dma_ops = {
5110 +const struct dma_map_ops sbus_dma_ops = {
5111 .alloc_coherent = sbus_alloc_coherent,
5112 .free_coherent = sbus_free_coherent,
5113 .map_page = sbus_map_page,
5114 @@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
5115 .sync_sg_for_device = sbus_sync_sg_for_device,
5116 };
5117
5118 -struct dma_map_ops *dma_ops = &sbus_dma_ops;
5119 +const struct dma_map_ops *dma_ops = &sbus_dma_ops;
5120 EXPORT_SYMBOL(dma_ops);
5121
5122 static int __init sparc_register_ioport(void)
5123 @@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
5124 }
5125 }
5126
5127 -struct dma_map_ops pci32_dma_ops = {
5128 +const struct dma_map_ops pci32_dma_ops = {
5129 .alloc_coherent = pci32_alloc_coherent,
5130 .free_coherent = pci32_free_coherent,
5131 .map_page = pci32_map_page,
5132 diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
5133 index 04df4ed..55c4b6e 100644
5134 --- a/arch/sparc/kernel/kgdb_32.c
5135 +++ b/arch/sparc/kernel/kgdb_32.c
5136 @@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
5137 {
5138 }
5139
5140 -struct kgdb_arch arch_kgdb_ops = {
5141 +const struct kgdb_arch arch_kgdb_ops = {
5142 /* Breakpoint instruction: ta 0x7d */
5143 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
5144 };
5145 diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
5146 index f5a0fd4..d886f71 100644
5147 --- a/arch/sparc/kernel/kgdb_64.c
5148 +++ b/arch/sparc/kernel/kgdb_64.c
5149 @@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
5150 {
5151 }
5152
5153 -struct kgdb_arch arch_kgdb_ops = {
5154 +const struct kgdb_arch arch_kgdb_ops = {
5155 /* Breakpoint instruction: ta 0x72 */
5156 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
5157 };
5158 diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
5159 index 23c33ff..d137fbd 100644
5160 --- a/arch/sparc/kernel/pci_sun4v.c
5161 +++ b/arch/sparc/kernel/pci_sun4v.c
5162 @@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
5163 spin_unlock_irqrestore(&iommu->lock, flags);
5164 }
5165
5166 -static struct dma_map_ops sun4v_dma_ops = {
5167 +static const struct dma_map_ops sun4v_dma_ops = {
5168 .alloc_coherent = dma_4v_alloc_coherent,
5169 .free_coherent = dma_4v_free_coherent,
5170 .map_page = dma_4v_map_page,
5171 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5172 index c49865b..b41a81b 100644
5173 --- a/arch/sparc/kernel/process_32.c
5174 +++ b/arch/sparc/kernel/process_32.c
5175 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
5176 rw->ins[4], rw->ins[5],
5177 rw->ins[6],
5178 rw->ins[7]);
5179 - printk("%pS\n", (void *) rw->ins[7]);
5180 + printk("%pA\n", (void *) rw->ins[7]);
5181 rw = (struct reg_window32 *) rw->ins[6];
5182 }
5183 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
5184 @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
5185
5186 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5187 r->psr, r->pc, r->npc, r->y, print_tainted());
5188 - printk("PC: <%pS>\n", (void *) r->pc);
5189 + printk("PC: <%pA>\n", (void *) r->pc);
5190 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5191 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5192 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5193 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5194 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5195 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5196 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5197 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5198
5199 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5200 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
5201 @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5202 rw = (struct reg_window32 *) fp;
5203 pc = rw->ins[7];
5204 printk("[%08lx : ", pc);
5205 - printk("%pS ] ", (void *) pc);
5206 + printk("%pA ] ", (void *) pc);
5207 fp = rw->ins[6];
5208 } while (++count < 16);
5209 printk("\n");
5210 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5211 index cb70476..3d0c191 100644
5212 --- a/arch/sparc/kernel/process_64.c
5213 +++ b/arch/sparc/kernel/process_64.c
5214 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
5215 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5216 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5217 if (regs->tstate & TSTATE_PRIV)
5218 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5219 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5220 }
5221
5222 void show_regs(struct pt_regs *regs)
5223 {
5224 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5225 regs->tpc, regs->tnpc, regs->y, print_tainted());
5226 - printk("TPC: <%pS>\n", (void *) regs->tpc);
5227 + printk("TPC: <%pA>\n", (void *) regs->tpc);
5228 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5229 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5230 regs->u_regs[3]);
5231 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
5232 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5233 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5234 regs->u_regs[15]);
5235 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5236 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5237 show_regwindow(regs);
5238 }
5239
5240 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
5241 ((tp && tp->task) ? tp->task->pid : -1));
5242
5243 if (gp->tstate & TSTATE_PRIV) {
5244 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5245 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5246 (void *) gp->tpc,
5247 (void *) gp->o7,
5248 (void *) gp->i7,
5249 diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
5250 index 6edc4e5..06a69b4 100644
5251 --- a/arch/sparc/kernel/sigutil_64.c
5252 +++ b/arch/sparc/kernel/sigutil_64.c
5253 @@ -2,6 +2,7 @@
5254 #include <linux/types.h>
5255 #include <linux/thread_info.h>
5256 #include <linux/uaccess.h>
5257 +#include <linux/errno.h>
5258
5259 #include <asm/sigcontext.h>
5260 #include <asm/fpumacro.h>
5261 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5262 index 3a82e65..ce0a53a 100644
5263 --- a/arch/sparc/kernel/sys_sparc_32.c
5264 +++ b/arch/sparc/kernel/sys_sparc_32.c
5265 @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5266 if (ARCH_SUN4C && len > 0x20000000)
5267 return -ENOMEM;
5268 if (!addr)
5269 - addr = TASK_UNMAPPED_BASE;
5270 + addr = current->mm->mmap_base;
5271
5272 if (flags & MAP_SHARED)
5273 addr = COLOUR_ALIGN(addr);
5274 @@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5275 }
5276 if (TASK_SIZE - PAGE_SIZE - len < addr)
5277 return -ENOMEM;
5278 - if (!vmm || addr + len <= vmm->vm_start)
5279 + if (check_heap_stack_gap(vmm, addr, len))
5280 return addr;
5281 addr = vmm->vm_end;
5282 if (flags & MAP_SHARED)
5283 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5284 index cfa0e19..98972ac 100644
5285 --- a/arch/sparc/kernel/sys_sparc_64.c
5286 +++ b/arch/sparc/kernel/sys_sparc_64.c
5287 @@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5288 /* We do not accept a shared mapping if it would violate
5289 * cache aliasing constraints.
5290 */
5291 - if ((flags & MAP_SHARED) &&
5292 + if ((filp || (flags & MAP_SHARED)) &&
5293 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5294 return -EINVAL;
5295 return addr;
5296 @@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5297 if (filp || (flags & MAP_SHARED))
5298 do_color_align = 1;
5299
5300 +#ifdef CONFIG_PAX_RANDMMAP
5301 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5302 +#endif
5303 +
5304 if (addr) {
5305 if (do_color_align)
5306 addr = COLOUR_ALIGN(addr, pgoff);
5307 @@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5308 addr = PAGE_ALIGN(addr);
5309
5310 vma = find_vma(mm, addr);
5311 - if (task_size - len >= addr &&
5312 - (!vma || addr + len <= vma->vm_start))
5313 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5314 return addr;
5315 }
5316
5317 if (len > mm->cached_hole_size) {
5318 - start_addr = addr = mm->free_area_cache;
5319 + start_addr = addr = mm->free_area_cache;
5320 } else {
5321 - start_addr = addr = TASK_UNMAPPED_BASE;
5322 + start_addr = addr = mm->mmap_base;
5323 mm->cached_hole_size = 0;
5324 }
5325
5326 @@ -175,14 +178,14 @@ full_search:
5327 vma = find_vma(mm, VA_EXCLUDE_END);
5328 }
5329 if (unlikely(task_size < addr)) {
5330 - if (start_addr != TASK_UNMAPPED_BASE) {
5331 - start_addr = addr = TASK_UNMAPPED_BASE;
5332 + if (start_addr != mm->mmap_base) {
5333 + start_addr = addr = mm->mmap_base;
5334 mm->cached_hole_size = 0;
5335 goto full_search;
5336 }
5337 return -ENOMEM;
5338 }
5339 - if (likely(!vma || addr + len <= vma->vm_start)) {
5340 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5341 /*
5342 * Remember the place where we stopped the search:
5343 */
5344 @@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5345 /* We do not accept a shared mapping if it would violate
5346 * cache aliasing constraints.
5347 */
5348 - if ((flags & MAP_SHARED) &&
5349 + if ((filp || (flags & MAP_SHARED)) &&
5350 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5351 return -EINVAL;
5352 return addr;
5353 @@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5354 addr = PAGE_ALIGN(addr);
5355
5356 vma = find_vma(mm, addr);
5357 - if (task_size - len >= addr &&
5358 - (!vma || addr + len <= vma->vm_start))
5359 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5360 return addr;
5361 }
5362
5363 @@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5364 /* make sure it can fit in the remaining address space */
5365 if (likely(addr > len)) {
5366 vma = find_vma(mm, addr-len);
5367 - if (!vma || addr <= vma->vm_start) {
5368 + if (check_heap_stack_gap(vma, addr - len, len)) {
5369 /* remember the address as a hint for next time */
5370 return (mm->free_area_cache = addr-len);
5371 }
5372 @@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5373 if (unlikely(mm->mmap_base < len))
5374 goto bottomup;
5375
5376 - addr = mm->mmap_base-len;
5377 - if (do_color_align)
5378 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5379 + addr = mm->mmap_base - len;
5380
5381 do {
5382 + if (do_color_align)
5383 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5384 /*
5385 * Lookup failure means no vma is above this address,
5386 * else if new region fits below vma->vm_start,
5387 * return with success:
5388 */
5389 vma = find_vma(mm, addr);
5390 - if (likely(!vma || addr+len <= vma->vm_start)) {
5391 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5392 /* remember the address as a hint for next time */
5393 return (mm->free_area_cache = addr);
5394 }
5395 @@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5396 mm->cached_hole_size = vma->vm_start - addr;
5397
5398 /* try just below the current vma->vm_start */
5399 - addr = vma->vm_start-len;
5400 - if (do_color_align)
5401 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5402 - } while (likely(len < vma->vm_start));
5403 + addr = skip_heap_stack_gap(vma, len);
5404 + } while (!IS_ERR_VALUE(addr));
5405
5406 bottomup:
5407 /*
5408 @@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5409 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
5410 sysctl_legacy_va_layout) {
5411 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5412 +
5413 +#ifdef CONFIG_PAX_RANDMMAP
5414 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5415 + mm->mmap_base += mm->delta_mmap;
5416 +#endif
5417 +
5418 mm->get_unmapped_area = arch_get_unmapped_area;
5419 mm->unmap_area = arch_unmap_area;
5420 } else {
5421 @@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5422 gap = (task_size / 6 * 5);
5423
5424 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5425 +
5426 +#ifdef CONFIG_PAX_RANDMMAP
5427 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5428 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5429 +#endif
5430 +
5431 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5432 mm->unmap_area = arch_unmap_area_topdown;
5433 }
5434 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
5435 index c0490c7..84959d1 100644
5436 --- a/arch/sparc/kernel/traps_32.c
5437 +++ b/arch/sparc/kernel/traps_32.c
5438 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
5439 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
5440 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
5441
5442 +extern void gr_handle_kernel_exploit(void);
5443 +
5444 void die_if_kernel(char *str, struct pt_regs *regs)
5445 {
5446 static int die_counter;
5447 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5448 count++ < 30 &&
5449 (((unsigned long) rw) >= PAGE_OFFSET) &&
5450 !(((unsigned long) rw) & 0x7)) {
5451 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
5452 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
5453 (void *) rw->ins[7]);
5454 rw = (struct reg_window32 *)rw->ins[6];
5455 }
5456 }
5457 printk("Instruction DUMP:");
5458 instruction_dump ((unsigned long *) regs->pc);
5459 - if(regs->psr & PSR_PS)
5460 + if(regs->psr & PSR_PS) {
5461 + gr_handle_kernel_exploit();
5462 do_exit(SIGKILL);
5463 + }
5464 do_exit(SIGSEGV);
5465 }
5466
5467 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
5468 index 10f7bb9..cdb6793 100644
5469 --- a/arch/sparc/kernel/traps_64.c
5470 +++ b/arch/sparc/kernel/traps_64.c
5471 @@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
5472 i + 1,
5473 p->trapstack[i].tstate, p->trapstack[i].tpc,
5474 p->trapstack[i].tnpc, p->trapstack[i].tt);
5475 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
5476 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
5477 }
5478 }
5479
5480 @@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
5481
5482 lvl -= 0x100;
5483 if (regs->tstate & TSTATE_PRIV) {
5484 +
5485 +#ifdef CONFIG_PAX_REFCOUNT
5486 + if (lvl == 6)
5487 + pax_report_refcount_overflow(regs);
5488 +#endif
5489 +
5490 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
5491 die_if_kernel(buffer, regs);
5492 }
5493 @@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
5494 void bad_trap_tl1(struct pt_regs *regs, long lvl)
5495 {
5496 char buffer[32];
5497 -
5498 +
5499 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
5500 0, lvl, SIGTRAP) == NOTIFY_STOP)
5501 return;
5502
5503 +#ifdef CONFIG_PAX_REFCOUNT
5504 + if (lvl == 6)
5505 + pax_report_refcount_overflow(regs);
5506 +#endif
5507 +
5508 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
5509
5510 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
5511 @@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
5512 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
5513 printk("%s" "ERROR(%d): ",
5514 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
5515 - printk("TPC<%pS>\n", (void *) regs->tpc);
5516 + printk("TPC<%pA>\n", (void *) regs->tpc);
5517 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
5518 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
5519 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
5520 @@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5521 smp_processor_id(),
5522 (type & 0x1) ? 'I' : 'D',
5523 regs->tpc);
5524 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
5525 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
5526 panic("Irrecoverable Cheetah+ parity error.");
5527 }
5528
5529 @@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5530 smp_processor_id(),
5531 (type & 0x1) ? 'I' : 'D',
5532 regs->tpc);
5533 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
5534 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
5535 }
5536
5537 struct sun4v_error_entry {
5538 @@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
5539
5540 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
5541 regs->tpc, tl);
5542 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
5543 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
5544 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5545 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
5546 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
5547 (void *) regs->u_regs[UREG_I7]);
5548 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
5549 "pte[%lx] error[%lx]\n",
5550 @@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
5551
5552 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
5553 regs->tpc, tl);
5554 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
5555 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
5556 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5557 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
5558 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
5559 (void *) regs->u_regs[UREG_I7]);
5560 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
5561 "pte[%lx] error[%lx]\n",
5562 @@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5563 fp = (unsigned long)sf->fp + STACK_BIAS;
5564 }
5565
5566 - printk(" [%016lx] %pS\n", pc, (void *) pc);
5567 + printk(" [%016lx] %pA\n", pc, (void *) pc);
5568 } while (++count < 16);
5569 }
5570
5571 @@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
5572 return (struct reg_window *) (fp + STACK_BIAS);
5573 }
5574
5575 +extern void gr_handle_kernel_exploit(void);
5576 +
5577 void die_if_kernel(char *str, struct pt_regs *regs)
5578 {
5579 static int die_counter;
5580 @@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5581 while (rw &&
5582 count++ < 30&&
5583 is_kernel_stack(current, rw)) {
5584 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
5585 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
5586 (void *) rw->ins[7]);
5587
5588 rw = kernel_stack_up(rw);
5589 @@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5590 }
5591 user_instruction_dump ((unsigned int __user *) regs->tpc);
5592 }
5593 - if (regs->tstate & TSTATE_PRIV)
5594 + if (regs->tstate & TSTATE_PRIV) {
5595 + gr_handle_kernel_exploit();
5596 do_exit(SIGKILL);
5597 + }
5598 +
5599 do_exit(SIGSEGV);
5600 }
5601 EXPORT_SYMBOL(die_if_kernel);
5602 diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S
5603 index be183fe..1c8d332 100644
5604 --- a/arch/sparc/kernel/una_asm_64.S
5605 +++ b/arch/sparc/kernel/una_asm_64.S
5606 @@ -127,7 +127,7 @@ do_int_load:
5607 wr %o5, 0x0, %asi
5608 retl
5609 mov 0, %o0
5610 - .size __do_int_load, .-__do_int_load
5611 + .size do_int_load, .-do_int_load
5612
5613 .section __ex_table,"a"
5614 .word 4b, __retl_efault
5615 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
5616 index 3792099..2af17d8 100644
5617 --- a/arch/sparc/kernel/unaligned_64.c
5618 +++ b/arch/sparc/kernel/unaligned_64.c
5619 @@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs *regs)
5620 if (count < 5) {
5621 last_time = jiffies;
5622 count++;
5623 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
5624 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
5625 regs->tpc, (void *) regs->tpc);
5626 }
5627 }
5628 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
5629 index e75faf0..24f12f9 100644
5630 --- a/arch/sparc/lib/Makefile
5631 +++ b/arch/sparc/lib/Makefile
5632 @@ -2,7 +2,7 @@
5633 #
5634
5635 asflags-y := -ansi -DST_DIV0=0x02
5636 -ccflags-y := -Werror
5637 +#ccflags-y := -Werror
5638
5639 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5640 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5641 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
5642 index 0268210..f0291ca 100644
5643 --- a/arch/sparc/lib/atomic_64.S
5644 +++ b/arch/sparc/lib/atomic_64.S
5645 @@ -18,7 +18,12 @@
5646 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5647 BACKOFF_SETUP(%o2)
5648 1: lduw [%o1], %g1
5649 - add %g1, %o0, %g7
5650 + addcc %g1, %o0, %g7
5651 +
5652 +#ifdef CONFIG_PAX_REFCOUNT
5653 + tvs %icc, 6
5654 +#endif
5655 +
5656 cas [%o1], %g1, %g7
5657 cmp %g1, %g7
5658 bne,pn %icc, 2f
5659 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5660 2: BACKOFF_SPIN(%o2, %o3, 1b)
5661 .size atomic_add, .-atomic_add
5662
5663 + .globl atomic_add_unchecked
5664 + .type atomic_add_unchecked,#function
5665 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5666 + BACKOFF_SETUP(%o2)
5667 +1: lduw [%o1], %g1
5668 + add %g1, %o0, %g7
5669 + cas [%o1], %g1, %g7
5670 + cmp %g1, %g7
5671 + bne,pn %icc, 2f
5672 + nop
5673 + retl
5674 + nop
5675 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5676 + .size atomic_add_unchecked, .-atomic_add_unchecked
5677 +
5678 .globl atomic_sub
5679 .type atomic_sub,#function
5680 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5681 BACKOFF_SETUP(%o2)
5682 1: lduw [%o1], %g1
5683 - sub %g1, %o0, %g7
5684 + subcc %g1, %o0, %g7
5685 +
5686 +#ifdef CONFIG_PAX_REFCOUNT
5687 + tvs %icc, 6
5688 +#endif
5689 +
5690 cas [%o1], %g1, %g7
5691 cmp %g1, %g7
5692 bne,pn %icc, 2f
5693 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5694 2: BACKOFF_SPIN(%o2, %o3, 1b)
5695 .size atomic_sub, .-atomic_sub
5696
5697 + .globl atomic_sub_unchecked
5698 + .type atomic_sub_unchecked,#function
5699 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5700 + BACKOFF_SETUP(%o2)
5701 +1: lduw [%o1], %g1
5702 + sub %g1, %o0, %g7
5703 + cas [%o1], %g1, %g7
5704 + cmp %g1, %g7
5705 + bne,pn %icc, 2f
5706 + nop
5707 + retl
5708 + nop
5709 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5710 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
5711 +
5712 .globl atomic_add_ret
5713 .type atomic_add_ret,#function
5714 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5715 BACKOFF_SETUP(%o2)
5716 1: lduw [%o1], %g1
5717 - add %g1, %o0, %g7
5718 + addcc %g1, %o0, %g7
5719 +
5720 +#ifdef CONFIG_PAX_REFCOUNT
5721 + tvs %icc, 6
5722 +#endif
5723 +
5724 cas [%o1], %g1, %g7
5725 cmp %g1, %g7
5726 bne,pn %icc, 2f
5727 @@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5728 2: BACKOFF_SPIN(%o2, %o3, 1b)
5729 .size atomic_add_ret, .-atomic_add_ret
5730
5731 + .globl atomic_add_ret_unchecked
5732 + .type atomic_add_ret_unchecked,#function
5733 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5734 + BACKOFF_SETUP(%o2)
5735 +1: lduw [%o1], %g1
5736 + addcc %g1, %o0, %g7
5737 + cas [%o1], %g1, %g7
5738 + cmp %g1, %g7
5739 + bne,pn %icc, 2f
5740 + add %g7, %o0, %g7
5741 + sra %g7, 0, %o0
5742 + retl
5743 + nop
5744 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5745 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
5746 +
5747 .globl atomic_sub_ret
5748 .type atomic_sub_ret,#function
5749 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5750 BACKOFF_SETUP(%o2)
5751 1: lduw [%o1], %g1
5752 - sub %g1, %o0, %g7
5753 + subcc %g1, %o0, %g7
5754 +
5755 +#ifdef CONFIG_PAX_REFCOUNT
5756 + tvs %icc, 6
5757 +#endif
5758 +
5759 cas [%o1], %g1, %g7
5760 cmp %g1, %g7
5761 bne,pn %icc, 2f
5762 @@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5763 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5764 BACKOFF_SETUP(%o2)
5765 1: ldx [%o1], %g1
5766 - add %g1, %o0, %g7
5767 + addcc %g1, %o0, %g7
5768 +
5769 +#ifdef CONFIG_PAX_REFCOUNT
5770 + tvs %xcc, 6
5771 +#endif
5772 +
5773 casx [%o1], %g1, %g7
5774 cmp %g1, %g7
5775 bne,pn %xcc, 2f
5776 @@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5777 2: BACKOFF_SPIN(%o2, %o3, 1b)
5778 .size atomic64_add, .-atomic64_add
5779
5780 + .globl atomic64_add_unchecked
5781 + .type atomic64_add_unchecked,#function
5782 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5783 + BACKOFF_SETUP(%o2)
5784 +1: ldx [%o1], %g1
5785 + addcc %g1, %o0, %g7
5786 + casx [%o1], %g1, %g7
5787 + cmp %g1, %g7
5788 + bne,pn %xcc, 2f
5789 + nop
5790 + retl
5791 + nop
5792 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5793 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
5794 +
5795 .globl atomic64_sub
5796 .type atomic64_sub,#function
5797 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5798 BACKOFF_SETUP(%o2)
5799 1: ldx [%o1], %g1
5800 - sub %g1, %o0, %g7
5801 + subcc %g1, %o0, %g7
5802 +
5803 +#ifdef CONFIG_PAX_REFCOUNT
5804 + tvs %xcc, 6
5805 +#endif
5806 +
5807 casx [%o1], %g1, %g7
5808 cmp %g1, %g7
5809 bne,pn %xcc, 2f
5810 @@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5811 2: BACKOFF_SPIN(%o2, %o3, 1b)
5812 .size atomic64_sub, .-atomic64_sub
5813
5814 + .globl atomic64_sub_unchecked
5815 + .type atomic64_sub_unchecked,#function
5816 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5817 + BACKOFF_SETUP(%o2)
5818 +1: ldx [%o1], %g1
5819 + subcc %g1, %o0, %g7
5820 + casx [%o1], %g1, %g7
5821 + cmp %g1, %g7
5822 + bne,pn %xcc, 2f
5823 + nop
5824 + retl
5825 + nop
5826 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5827 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5828 +
5829 .globl atomic64_add_ret
5830 .type atomic64_add_ret,#function
5831 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5832 BACKOFF_SETUP(%o2)
5833 1: ldx [%o1], %g1
5834 - add %g1, %o0, %g7
5835 + addcc %g1, %o0, %g7
5836 +
5837 +#ifdef CONFIG_PAX_REFCOUNT
5838 + tvs %xcc, 6
5839 +#endif
5840 +
5841 casx [%o1], %g1, %g7
5842 cmp %g1, %g7
5843 bne,pn %xcc, 2f
5844 @@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5845 2: BACKOFF_SPIN(%o2, %o3, 1b)
5846 .size atomic64_add_ret, .-atomic64_add_ret
5847
5848 + .globl atomic64_add_ret_unchecked
5849 + .type atomic64_add_ret_unchecked,#function
5850 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5851 + BACKOFF_SETUP(%o2)
5852 +1: ldx [%o1], %g1
5853 + addcc %g1, %o0, %g7
5854 + casx [%o1], %g1, %g7
5855 + cmp %g1, %g7
5856 + bne,pn %xcc, 2f
5857 + add %g7, %o0, %g7
5858 + mov %g7, %o0
5859 + retl
5860 + nop
5861 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5862 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5863 +
5864 .globl atomic64_sub_ret
5865 .type atomic64_sub_ret,#function
5866 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5867 BACKOFF_SETUP(%o2)
5868 1: ldx [%o1], %g1
5869 - sub %g1, %o0, %g7
5870 + subcc %g1, %o0, %g7
5871 +
5872 +#ifdef CONFIG_PAX_REFCOUNT
5873 + tvs %xcc, 6
5874 +#endif
5875 +
5876 casx [%o1], %g1, %g7
5877 cmp %g1, %g7
5878 bne,pn %xcc, 2f
5879 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
5880 index 704b126..2e79d76 100644
5881 --- a/arch/sparc/lib/ksyms.c
5882 +++ b/arch/sparc/lib/ksyms.c
5883 @@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
5884
5885 /* Atomic counter implementation. */
5886 EXPORT_SYMBOL(atomic_add);
5887 +EXPORT_SYMBOL(atomic_add_unchecked);
5888 EXPORT_SYMBOL(atomic_add_ret);
5889 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
5890 EXPORT_SYMBOL(atomic_sub);
5891 +EXPORT_SYMBOL(atomic_sub_unchecked);
5892 EXPORT_SYMBOL(atomic_sub_ret);
5893 EXPORT_SYMBOL(atomic64_add);
5894 +EXPORT_SYMBOL(atomic64_add_unchecked);
5895 EXPORT_SYMBOL(atomic64_add_ret);
5896 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5897 EXPORT_SYMBOL(atomic64_sub);
5898 +EXPORT_SYMBOL(atomic64_sub_unchecked);
5899 EXPORT_SYMBOL(atomic64_sub_ret);
5900
5901 /* Atomic bit operations. */
5902 diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S
5903 index 91a7d29..ce75c29 100644
5904 --- a/arch/sparc/lib/rwsem_64.S
5905 +++ b/arch/sparc/lib/rwsem_64.S
5906 @@ -11,7 +11,12 @@
5907 .globl __down_read
5908 __down_read:
5909 1: lduw [%o0], %g1
5910 - add %g1, 1, %g7
5911 + addcc %g1, 1, %g7
5912 +
5913 +#ifdef CONFIG_PAX_REFCOUNT
5914 + tvs %icc, 6
5915 +#endif
5916 +
5917 cas [%o0], %g1, %g7
5918 cmp %g1, %g7
5919 bne,pn %icc, 1b
5920 @@ -33,7 +38,12 @@ __down_read:
5921 .globl __down_read_trylock
5922 __down_read_trylock:
5923 1: lduw [%o0], %g1
5924 - add %g1, 1, %g7
5925 + addcc %g1, 1, %g7
5926 +
5927 +#ifdef CONFIG_PAX_REFCOUNT
5928 + tvs %icc, 6
5929 +#endif
5930 +
5931 cmp %g7, 0
5932 bl,pn %icc, 2f
5933 mov 0, %o1
5934 @@ -51,7 +61,12 @@ __down_write:
5935 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5936 1:
5937 lduw [%o0], %g3
5938 - add %g3, %g1, %g7
5939 + addcc %g3, %g1, %g7
5940 +
5941 +#ifdef CONFIG_PAX_REFCOUNT
5942 + tvs %icc, 6
5943 +#endif
5944 +
5945 cas [%o0], %g3, %g7
5946 cmp %g3, %g7
5947 bne,pn %icc, 1b
5948 @@ -77,7 +92,12 @@ __down_write_trylock:
5949 cmp %g3, 0
5950 bne,pn %icc, 2f
5951 mov 0, %o1
5952 - add %g3, %g1, %g7
5953 + addcc %g3, %g1, %g7
5954 +
5955 +#ifdef CONFIG_PAX_REFCOUNT
5956 + tvs %icc, 6
5957 +#endif
5958 +
5959 cas [%o0], %g3, %g7
5960 cmp %g3, %g7
5961 bne,pn %icc, 1b
5962 @@ -90,7 +110,12 @@ __down_write_trylock:
5963 __up_read:
5964 1:
5965 lduw [%o0], %g1
5966 - sub %g1, 1, %g7
5967 + subcc %g1, 1, %g7
5968 +
5969 +#ifdef CONFIG_PAX_REFCOUNT
5970 + tvs %icc, 6
5971 +#endif
5972 +
5973 cas [%o0], %g1, %g7
5974 cmp %g1, %g7
5975 bne,pn %icc, 1b
5976 @@ -118,7 +143,12 @@ __up_write:
5977 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5978 1:
5979 lduw [%o0], %g3
5980 - sub %g3, %g1, %g7
5981 + subcc %g3, %g1, %g7
5982 +
5983 +#ifdef CONFIG_PAX_REFCOUNT
5984 + tvs %icc, 6
5985 +#endif
5986 +
5987 cas [%o0], %g3, %g7
5988 cmp %g3, %g7
5989 bne,pn %icc, 1b
5990 @@ -143,7 +173,12 @@ __downgrade_write:
5991 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5992 1:
5993 lduw [%o0], %g3
5994 - sub %g3, %g1, %g7
5995 + subcc %g3, %g1, %g7
5996 +
5997 +#ifdef CONFIG_PAX_REFCOUNT
5998 + tvs %icc, 6
5999 +#endif
6000 +
6001 cas [%o0], %g3, %g7
6002 cmp %g3, %g7
6003 bne,pn %icc, 1b
6004 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
6005 index 79836a7..62f47a2 100644
6006 --- a/arch/sparc/mm/Makefile
6007 +++ b/arch/sparc/mm/Makefile
6008 @@ -2,7 +2,7 @@
6009 #
6010
6011 asflags-y := -ansi
6012 -ccflags-y := -Werror
6013 +#ccflags-y := -Werror
6014
6015 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
6016 obj-y += fault_$(BITS).o
6017 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
6018 index b99f81c..3453e93 100644
6019 --- a/arch/sparc/mm/fault_32.c
6020 +++ b/arch/sparc/mm/fault_32.c
6021 @@ -21,6 +21,9 @@
6022 #include <linux/interrupt.h>
6023 #include <linux/module.h>
6024 #include <linux/kdebug.h>
6025 +#include <linux/slab.h>
6026 +#include <linux/pagemap.h>
6027 +#include <linux/compiler.h>
6028
6029 #include <asm/system.h>
6030 #include <asm/page.h>
6031 @@ -167,6 +170,267 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
6032 return safe_compute_effective_address(regs, insn);
6033 }
6034
6035 +#ifdef CONFIG_PAX_PAGEEXEC
6036 +#ifdef CONFIG_PAX_DLRESOLVE
6037 +static void pax_emuplt_close(struct vm_area_struct *vma)
6038 +{
6039 + vma->vm_mm->call_dl_resolve = 0UL;
6040 +}
6041 +
6042 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6043 +{
6044 + unsigned int *kaddr;
6045 +
6046 + vmf->page = alloc_page(GFP_HIGHUSER);
6047 + if (!vmf->page)
6048 + return VM_FAULT_OOM;
6049 +
6050 + kaddr = kmap(vmf->page);
6051 + memset(kaddr, 0, PAGE_SIZE);
6052 + kaddr[0] = 0x9DE3BFA8U; /* save */
6053 + flush_dcache_page(vmf->page);
6054 + kunmap(vmf->page);
6055 + return VM_FAULT_MAJOR;
6056 +}
6057 +
6058 +static const struct vm_operations_struct pax_vm_ops = {
6059 + .close = pax_emuplt_close,
6060 + .fault = pax_emuplt_fault
6061 +};
6062 +
6063 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6064 +{
6065 + int ret;
6066 +
6067 + vma->vm_mm = current->mm;
6068 + vma->vm_start = addr;
6069 + vma->vm_end = addr + PAGE_SIZE;
6070 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6071 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6072 + vma->vm_ops = &pax_vm_ops;
6073 +
6074 + ret = insert_vm_struct(current->mm, vma);
6075 + if (ret)
6076 + return ret;
6077 +
6078 + ++current->mm->total_vm;
6079 + return 0;
6080 +}
6081 +#endif
6082 +
6083 +/*
6084 + * PaX: decide what to do with offenders (regs->pc = fault address)
6085 + *
6086 + * returns 1 when task should be killed
6087 + * 2 when patched PLT trampoline was detected
6088 + * 3 when unpatched PLT trampoline was detected
6089 + */
6090 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6091 +{
6092 +
6093 +#ifdef CONFIG_PAX_EMUPLT
6094 + int err;
6095 +
6096 + do { /* PaX: patched PLT emulation #1 */
6097 + unsigned int sethi1, sethi2, jmpl;
6098 +
6099 + err = get_user(sethi1, (unsigned int *)regs->pc);
6100 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6101 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6102 +
6103 + if (err)
6104 + break;
6105 +
6106 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6107 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6108 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6109 + {
6110 + unsigned int addr;
6111 +
6112 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6113 + addr = regs->u_regs[UREG_G1];
6114 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6115 + regs->pc = addr;
6116 + regs->npc = addr+4;
6117 + return 2;
6118 + }
6119 + } while (0);
6120 +
6121 + { /* PaX: patched PLT emulation #2 */
6122 + unsigned int ba;
6123 +
6124 + err = get_user(ba, (unsigned int *)regs->pc);
6125 +
6126 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6127 + unsigned int addr;
6128 +
6129 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6130 + regs->pc = addr;
6131 + regs->npc = addr+4;
6132 + return 2;
6133 + }
6134 + }
6135 +
6136 + do { /* PaX: patched PLT emulation #3 */
6137 + unsigned int sethi, jmpl, nop;
6138 +
6139 + err = get_user(sethi, (unsigned int *)regs->pc);
6140 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6141 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6142 +
6143 + if (err)
6144 + break;
6145 +
6146 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6147 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6148 + nop == 0x01000000U)
6149 + {
6150 + unsigned int addr;
6151 +
6152 + addr = (sethi & 0x003FFFFFU) << 10;
6153 + regs->u_regs[UREG_G1] = addr;
6154 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6155 + regs->pc = addr;
6156 + regs->npc = addr+4;
6157 + return 2;
6158 + }
6159 + } while (0);
6160 +
6161 + do { /* PaX: unpatched PLT emulation step 1 */
6162 + unsigned int sethi, ba, nop;
6163 +
6164 + err = get_user(sethi, (unsigned int *)regs->pc);
6165 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
6166 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6167 +
6168 + if (err)
6169 + break;
6170 +
6171 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6172 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6173 + nop == 0x01000000U)
6174 + {
6175 + unsigned int addr, save, call;
6176 +
6177 + if ((ba & 0xFFC00000U) == 0x30800000U)
6178 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6179 + else
6180 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6181 +
6182 + err = get_user(save, (unsigned int *)addr);
6183 + err |= get_user(call, (unsigned int *)(addr+4));
6184 + err |= get_user(nop, (unsigned int *)(addr+8));
6185 + if (err)
6186 + break;
6187 +
6188 +#ifdef CONFIG_PAX_DLRESOLVE
6189 + if (save == 0x9DE3BFA8U &&
6190 + (call & 0xC0000000U) == 0x40000000U &&
6191 + nop == 0x01000000U)
6192 + {
6193 + struct vm_area_struct *vma;
6194 + unsigned long call_dl_resolve;
6195 +
6196 + down_read(&current->mm->mmap_sem);
6197 + call_dl_resolve = current->mm->call_dl_resolve;
6198 + up_read(&current->mm->mmap_sem);
6199 + if (likely(call_dl_resolve))
6200 + goto emulate;
6201 +
6202 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6203 +
6204 + down_write(&current->mm->mmap_sem);
6205 + if (current->mm->call_dl_resolve) {
6206 + call_dl_resolve = current->mm->call_dl_resolve;
6207 + up_write(&current->mm->mmap_sem);
6208 + if (vma)
6209 + kmem_cache_free(vm_area_cachep, vma);
6210 + goto emulate;
6211 + }
6212 +
6213 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6214 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6215 + up_write(&current->mm->mmap_sem);
6216 + if (vma)
6217 + kmem_cache_free(vm_area_cachep, vma);
6218 + return 1;
6219 + }
6220 +
6221 + if (pax_insert_vma(vma, call_dl_resolve)) {
6222 + up_write(&current->mm->mmap_sem);
6223 + kmem_cache_free(vm_area_cachep, vma);
6224 + return 1;
6225 + }
6226 +
6227 + current->mm->call_dl_resolve = call_dl_resolve;
6228 + up_write(&current->mm->mmap_sem);
6229 +
6230 +emulate:
6231 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6232 + regs->pc = call_dl_resolve;
6233 + regs->npc = addr+4;
6234 + return 3;
6235 + }
6236 +#endif
6237 +
6238 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6239 + if ((save & 0xFFC00000U) == 0x05000000U &&
6240 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6241 + nop == 0x01000000U)
6242 + {
6243 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6244 + regs->u_regs[UREG_G2] = addr + 4;
6245 + addr = (save & 0x003FFFFFU) << 10;
6246 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6247 + regs->pc = addr;
6248 + regs->npc = addr+4;
6249 + return 3;
6250 + }
6251 + }
6252 + } while (0);
6253 +
6254 + do { /* PaX: unpatched PLT emulation step 2 */
6255 + unsigned int save, call, nop;
6256 +
6257 + err = get_user(save, (unsigned int *)(regs->pc-4));
6258 + err |= get_user(call, (unsigned int *)regs->pc);
6259 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
6260 + if (err)
6261 + break;
6262 +
6263 + if (save == 0x9DE3BFA8U &&
6264 + (call & 0xC0000000U) == 0x40000000U &&
6265 + nop == 0x01000000U)
6266 + {
6267 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6268 +
6269 + regs->u_regs[UREG_RETPC] = regs->pc;
6270 + regs->pc = dl_resolve;
6271 + regs->npc = dl_resolve+4;
6272 + return 3;
6273 + }
6274 + } while (0);
6275 +#endif
6276 +
6277 + return 1;
6278 +}
6279 +
6280 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6281 +{
6282 + unsigned long i;
6283 +
6284 + printk(KERN_ERR "PAX: bytes at PC: ");
6285 + for (i = 0; i < 8; i++) {
6286 + unsigned int c;
6287 + if (get_user(c, (unsigned int *)pc+i))
6288 + printk(KERN_CONT "???????? ");
6289 + else
6290 + printk(KERN_CONT "%08x ", c);
6291 + }
6292 + printk("\n");
6293 +}
6294 +#endif
6295 +
6296 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
6297 unsigned long address)
6298 {
6299 @@ -231,6 +495,24 @@ good_area:
6300 if(!(vma->vm_flags & VM_WRITE))
6301 goto bad_area;
6302 } else {
6303 +
6304 +#ifdef CONFIG_PAX_PAGEEXEC
6305 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6306 + up_read(&mm->mmap_sem);
6307 + switch (pax_handle_fetch_fault(regs)) {
6308 +
6309 +#ifdef CONFIG_PAX_EMUPLT
6310 + case 2:
6311 + case 3:
6312 + return;
6313 +#endif
6314 +
6315 + }
6316 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6317 + do_group_exit(SIGKILL);
6318 + }
6319 +#endif
6320 +
6321 /* Allow reads even for write-only mappings */
6322 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6323 goto bad_area;
6324 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6325 index 43b0da9..a0b78f9 100644
6326 --- a/arch/sparc/mm/fault_64.c
6327 +++ b/arch/sparc/mm/fault_64.c
6328 @@ -20,6 +20,9 @@
6329 #include <linux/kprobes.h>
6330 #include <linux/kdebug.h>
6331 #include <linux/percpu.h>
6332 +#include <linux/slab.h>
6333 +#include <linux/pagemap.h>
6334 +#include <linux/compiler.h>
6335
6336 #include <asm/page.h>
6337 #include <asm/pgtable.h>
6338 @@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6339 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6340 regs->tpc);
6341 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6342 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6343 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6344 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6345 dump_stack();
6346 unhandled_fault(regs->tpc, current, regs);
6347 @@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
6348 show_regs(regs);
6349 }
6350
6351 +#ifdef CONFIG_PAX_PAGEEXEC
6352 +#ifdef CONFIG_PAX_DLRESOLVE
6353 +static void pax_emuplt_close(struct vm_area_struct *vma)
6354 +{
6355 + vma->vm_mm->call_dl_resolve = 0UL;
6356 +}
6357 +
6358 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6359 +{
6360 + unsigned int *kaddr;
6361 +
6362 + vmf->page = alloc_page(GFP_HIGHUSER);
6363 + if (!vmf->page)
6364 + return VM_FAULT_OOM;
6365 +
6366 + kaddr = kmap(vmf->page);
6367 + memset(kaddr, 0, PAGE_SIZE);
6368 + kaddr[0] = 0x9DE3BFA8U; /* save */
6369 + flush_dcache_page(vmf->page);
6370 + kunmap(vmf->page);
6371 + return VM_FAULT_MAJOR;
6372 +}
6373 +
6374 +static const struct vm_operations_struct pax_vm_ops = {
6375 + .close = pax_emuplt_close,
6376 + .fault = pax_emuplt_fault
6377 +};
6378 +
6379 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6380 +{
6381 + int ret;
6382 +
6383 + vma->vm_mm = current->mm;
6384 + vma->vm_start = addr;
6385 + vma->vm_end = addr + PAGE_SIZE;
6386 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6387 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6388 + vma->vm_ops = &pax_vm_ops;
6389 +
6390 + ret = insert_vm_struct(current->mm, vma);
6391 + if (ret)
6392 + return ret;
6393 +
6394 + ++current->mm->total_vm;
6395 + return 0;
6396 +}
6397 +#endif
6398 +
6399 +/*
6400 + * PaX: decide what to do with offenders (regs->tpc = fault address)
6401 + *
6402 + * returns 1 when task should be killed
6403 + * 2 when patched PLT trampoline was detected
6404 + * 3 when unpatched PLT trampoline was detected
6405 + */
6406 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6407 +{
6408 +
6409 +#ifdef CONFIG_PAX_EMUPLT
6410 + int err;
6411 +
6412 + do { /* PaX: patched PLT emulation #1 */
6413 + unsigned int sethi1, sethi2, jmpl;
6414 +
6415 + err = get_user(sethi1, (unsigned int *)regs->tpc);
6416 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6417 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6418 +
6419 + if (err)
6420 + break;
6421 +
6422 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6423 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6424 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6425 + {
6426 + unsigned long addr;
6427 +
6428 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6429 + addr = regs->u_regs[UREG_G1];
6430 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6431 +
6432 + if (test_thread_flag(TIF_32BIT))
6433 + addr &= 0xFFFFFFFFUL;
6434 +
6435 + regs->tpc = addr;
6436 + regs->tnpc = addr+4;
6437 + return 2;
6438 + }
6439 + } while (0);
6440 +
6441 + { /* PaX: patched PLT emulation #2 */
6442 + unsigned int ba;
6443 +
6444 + err = get_user(ba, (unsigned int *)regs->tpc);
6445 +
6446 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6447 + unsigned long addr;
6448 +
6449 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6450 +
6451 + if (test_thread_flag(TIF_32BIT))
6452 + addr &= 0xFFFFFFFFUL;
6453 +
6454 + regs->tpc = addr;
6455 + regs->tnpc = addr+4;
6456 + return 2;
6457 + }
6458 + }
6459 +
6460 + do { /* PaX: patched PLT emulation #3 */
6461 + unsigned int sethi, jmpl, nop;
6462 +
6463 + err = get_user(sethi, (unsigned int *)regs->tpc);
6464 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6465 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6466 +
6467 + if (err)
6468 + break;
6469 +
6470 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6471 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6472 + nop == 0x01000000U)
6473 + {
6474 + unsigned long addr;
6475 +
6476 + addr = (sethi & 0x003FFFFFU) << 10;
6477 + regs->u_regs[UREG_G1] = addr;
6478 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6479 +
6480 + if (test_thread_flag(TIF_32BIT))
6481 + addr &= 0xFFFFFFFFUL;
6482 +
6483 + regs->tpc = addr;
6484 + regs->tnpc = addr+4;
6485 + return 2;
6486 + }
6487 + } while (0);
6488 +
6489 + do { /* PaX: patched PLT emulation #4 */
6490 + unsigned int sethi, mov1, call, mov2;
6491 +
6492 + err = get_user(sethi, (unsigned int *)regs->tpc);
6493 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6494 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
6495 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6496 +
6497 + if (err)
6498 + break;
6499 +
6500 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6501 + mov1 == 0x8210000FU &&
6502 + (call & 0xC0000000U) == 0x40000000U &&
6503 + mov2 == 0x9E100001U)
6504 + {
6505 + unsigned long addr;
6506 +
6507 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6508 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6509 +
6510 + if (test_thread_flag(TIF_32BIT))
6511 + addr &= 0xFFFFFFFFUL;
6512 +
6513 + regs->tpc = addr;
6514 + regs->tnpc = addr+4;
6515 + return 2;
6516 + }
6517 + } while (0);
6518 +
6519 + do { /* PaX: patched PLT emulation #5 */
6520 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6521 +
6522 + err = get_user(sethi, (unsigned int *)regs->tpc);
6523 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6524 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6525 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6526 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6527 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6528 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6529 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
6530 +
6531 + if (err)
6532 + break;
6533 +
6534 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6535 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6536 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6537 + (or1 & 0xFFFFE000U) == 0x82106000U &&
6538 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6539 + sllx == 0x83287020U &&
6540 + jmpl == 0x81C04005U &&
6541 + nop == 0x01000000U)
6542 + {
6543 + unsigned long addr;
6544 +
6545 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6546 + regs->u_regs[UREG_G1] <<= 32;
6547 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6548 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6549 + regs->tpc = addr;
6550 + regs->tnpc = addr+4;
6551 + return 2;
6552 + }
6553 + } while (0);
6554 +
6555 + do { /* PaX: patched PLT emulation #6 */
6556 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
6557 +
6558 + err = get_user(sethi, (unsigned int *)regs->tpc);
6559 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6560 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6561 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
6562 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
6563 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
6564 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
6565 +
6566 + if (err)
6567 + break;
6568 +
6569 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6570 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6571 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6572 + sllx == 0x83287020U &&
6573 + (or & 0xFFFFE000U) == 0x8A116000U &&
6574 + jmpl == 0x81C04005U &&
6575 + nop == 0x01000000U)
6576 + {
6577 + unsigned long addr;
6578 +
6579 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
6580 + regs->u_regs[UREG_G1] <<= 32;
6581 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
6582 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6583 + regs->tpc = addr;
6584 + regs->tnpc = addr+4;
6585 + return 2;
6586 + }
6587 + } while (0);
6588 +
6589 + do { /* PaX: unpatched PLT emulation step 1 */
6590 + unsigned int sethi, ba, nop;
6591 +
6592 + err = get_user(sethi, (unsigned int *)regs->tpc);
6593 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6594 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6595 +
6596 + if (err)
6597 + break;
6598 +
6599 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6600 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6601 + nop == 0x01000000U)
6602 + {
6603 + unsigned long addr;
6604 + unsigned int save, call;
6605 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
6606 +
6607 + if ((ba & 0xFFC00000U) == 0x30800000U)
6608 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6609 + else
6610 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6611 +
6612 + if (test_thread_flag(TIF_32BIT))
6613 + addr &= 0xFFFFFFFFUL;
6614 +
6615 + err = get_user(save, (unsigned int *)addr);
6616 + err |= get_user(call, (unsigned int *)(addr+4));
6617 + err |= get_user(nop, (unsigned int *)(addr+8));
6618 + if (err)
6619 + break;
6620 +
6621 +#ifdef CONFIG_PAX_DLRESOLVE
6622 + if (save == 0x9DE3BFA8U &&
6623 + (call & 0xC0000000U) == 0x40000000U &&
6624 + nop == 0x01000000U)
6625 + {
6626 + struct vm_area_struct *vma;
6627 + unsigned long call_dl_resolve;
6628 +
6629 + down_read(&current->mm->mmap_sem);
6630 + call_dl_resolve = current->mm->call_dl_resolve;
6631 + up_read(&current->mm->mmap_sem);
6632 + if (likely(call_dl_resolve))
6633 + goto emulate;
6634 +
6635 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6636 +
6637 + down_write(&current->mm->mmap_sem);
6638 + if (current->mm->call_dl_resolve) {
6639 + call_dl_resolve = current->mm->call_dl_resolve;
6640 + up_write(&current->mm->mmap_sem);
6641 + if (vma)
6642 + kmem_cache_free(vm_area_cachep, vma);
6643 + goto emulate;
6644 + }
6645 +
6646 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6647 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6648 + up_write(&current->mm->mmap_sem);
6649 + if (vma)
6650 + kmem_cache_free(vm_area_cachep, vma);
6651 + return 1;
6652 + }
6653 +
6654 + if (pax_insert_vma(vma, call_dl_resolve)) {
6655 + up_write(&current->mm->mmap_sem);
6656 + kmem_cache_free(vm_area_cachep, vma);
6657 + return 1;
6658 + }
6659 +
6660 + current->mm->call_dl_resolve = call_dl_resolve;
6661 + up_write(&current->mm->mmap_sem);
6662 +
6663 +emulate:
6664 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6665 + regs->tpc = call_dl_resolve;
6666 + regs->tnpc = addr+4;
6667 + return 3;
6668 + }
6669 +#endif
6670 +
6671 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6672 + if ((save & 0xFFC00000U) == 0x05000000U &&
6673 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6674 + nop == 0x01000000U)
6675 + {
6676 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6677 + regs->u_regs[UREG_G2] = addr + 4;
6678 + addr = (save & 0x003FFFFFU) << 10;
6679 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6680 +
6681 + if (test_thread_flag(TIF_32BIT))
6682 + addr &= 0xFFFFFFFFUL;
6683 +
6684 + regs->tpc = addr;
6685 + regs->tnpc = addr+4;
6686 + return 3;
6687 + }
6688 +
6689 + /* PaX: 64-bit PLT stub */
6690 + err = get_user(sethi1, (unsigned int *)addr);
6691 + err |= get_user(sethi2, (unsigned int *)(addr+4));
6692 + err |= get_user(or1, (unsigned int *)(addr+8));
6693 + err |= get_user(or2, (unsigned int *)(addr+12));
6694 + err |= get_user(sllx, (unsigned int *)(addr+16));
6695 + err |= get_user(add, (unsigned int *)(addr+20));
6696 + err |= get_user(jmpl, (unsigned int *)(addr+24));
6697 + err |= get_user(nop, (unsigned int *)(addr+28));
6698 + if (err)
6699 + break;
6700 +
6701 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
6702 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6703 + (or1 & 0xFFFFE000U) == 0x88112000U &&
6704 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6705 + sllx == 0x89293020U &&
6706 + add == 0x8A010005U &&
6707 + jmpl == 0x89C14000U &&
6708 + nop == 0x01000000U)
6709 + {
6710 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6711 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6712 + regs->u_regs[UREG_G4] <<= 32;
6713 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6714 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
6715 + regs->u_regs[UREG_G4] = addr + 24;
6716 + addr = regs->u_regs[UREG_G5];
6717 + regs->tpc = addr;
6718 + regs->tnpc = addr+4;
6719 + return 3;
6720 + }
6721 + }
6722 + } while (0);
6723 +
6724 +#ifdef CONFIG_PAX_DLRESOLVE
6725 + do { /* PaX: unpatched PLT emulation step 2 */
6726 + unsigned int save, call, nop;
6727 +
6728 + err = get_user(save, (unsigned int *)(regs->tpc-4));
6729 + err |= get_user(call, (unsigned int *)regs->tpc);
6730 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
6731 + if (err)
6732 + break;
6733 +
6734 + if (save == 0x9DE3BFA8U &&
6735 + (call & 0xC0000000U) == 0x40000000U &&
6736 + nop == 0x01000000U)
6737 + {
6738 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6739 +
6740 + if (test_thread_flag(TIF_32BIT))
6741 + dl_resolve &= 0xFFFFFFFFUL;
6742 +
6743 + regs->u_regs[UREG_RETPC] = regs->tpc;
6744 + regs->tpc = dl_resolve;
6745 + regs->tnpc = dl_resolve+4;
6746 + return 3;
6747 + }
6748 + } while (0);
6749 +#endif
6750 +
6751 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
6752 + unsigned int sethi, ba, nop;
6753 +
6754 + err = get_user(sethi, (unsigned int *)regs->tpc);
6755 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6756 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6757 +
6758 + if (err)
6759 + break;
6760 +
6761 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6762 + (ba & 0xFFF00000U) == 0x30600000U &&
6763 + nop == 0x01000000U)
6764 + {
6765 + unsigned long addr;
6766 +
6767 + addr = (sethi & 0x003FFFFFU) << 10;
6768 + regs->u_regs[UREG_G1] = addr;
6769 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6770 +
6771 + if (test_thread_flag(TIF_32BIT))
6772 + addr &= 0xFFFFFFFFUL;
6773 +
6774 + regs->tpc = addr;
6775 + regs->tnpc = addr+4;
6776 + return 2;
6777 + }
6778 + } while (0);
6779 +
6780 +#endif
6781 +
6782 + return 1;
6783 +}
6784 +
6785 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6786 +{
6787 + unsigned long i;
6788 +
6789 + printk(KERN_ERR "PAX: bytes at PC: ");
6790 + for (i = 0; i < 8; i++) {
6791 + unsigned int c;
6792 + if (get_user(c, (unsigned int *)pc+i))
6793 + printk(KERN_CONT "???????? ");
6794 + else
6795 + printk(KERN_CONT "%08x ", c);
6796 + }
6797 + printk("\n");
6798 +}
6799 +#endif
6800 +
6801 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6802 {
6803 struct mm_struct *mm = current->mm;
6804 @@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6805 if (!vma)
6806 goto bad_area;
6807
6808 +#ifdef CONFIG_PAX_PAGEEXEC
6809 + /* PaX: detect ITLB misses on non-exec pages */
6810 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
6811 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
6812 + {
6813 + if (address != regs->tpc)
6814 + goto good_area;
6815 +
6816 + up_read(&mm->mmap_sem);
6817 + switch (pax_handle_fetch_fault(regs)) {
6818 +
6819 +#ifdef CONFIG_PAX_EMUPLT
6820 + case 2:
6821 + case 3:
6822 + return;
6823 +#endif
6824 +
6825 + }
6826 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6827 + do_group_exit(SIGKILL);
6828 + }
6829 +#endif
6830 +
6831 /* Pure DTLB misses do not tell us whether the fault causing
6832 * load/store/atomic was a write or not, it only says that there
6833 * was no match. So in such a case we (carefully) read the
6834 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
6835 index f27d103..1b06377 100644
6836 --- a/arch/sparc/mm/hugetlbpage.c
6837 +++ b/arch/sparc/mm/hugetlbpage.c
6838 @@ -69,7 +69,7 @@ full_search:
6839 }
6840 return -ENOMEM;
6841 }
6842 - if (likely(!vma || addr + len <= vma->vm_start)) {
6843 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6844 /*
6845 * Remember the place where we stopped the search:
6846 */
6847 @@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6848 /* make sure it can fit in the remaining address space */
6849 if (likely(addr > len)) {
6850 vma = find_vma(mm, addr-len);
6851 - if (!vma || addr <= vma->vm_start) {
6852 + if (check_heap_stack_gap(vma, addr - len, len)) {
6853 /* remember the address as a hint for next time */
6854 return (mm->free_area_cache = addr-len);
6855 }
6856 @@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6857 if (unlikely(mm->mmap_base < len))
6858 goto bottomup;
6859
6860 - addr = (mm->mmap_base-len) & HPAGE_MASK;
6861 + addr = mm->mmap_base - len;
6862
6863 do {
6864 + addr &= HPAGE_MASK;
6865 /*
6866 * Lookup failure means no vma is above this address,
6867 * else if new region fits below vma->vm_start,
6868 * return with success:
6869 */
6870 vma = find_vma(mm, addr);
6871 - if (likely(!vma || addr+len <= vma->vm_start)) {
6872 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6873 /* remember the address as a hint for next time */
6874 return (mm->free_area_cache = addr);
6875 }
6876 @@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6877 mm->cached_hole_size = vma->vm_start - addr;
6878
6879 /* try just below the current vma->vm_start */
6880 - addr = (vma->vm_start-len) & HPAGE_MASK;
6881 - } while (likely(len < vma->vm_start));
6882 + addr = skip_heap_stack_gap(vma, len);
6883 + } while (!IS_ERR_VALUE(addr));
6884
6885 bottomup:
6886 /*
6887 @@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
6888 if (addr) {
6889 addr = ALIGN(addr, HPAGE_SIZE);
6890 vma = find_vma(mm, addr);
6891 - if (task_size - len >= addr &&
6892 - (!vma || addr + len <= vma->vm_start))
6893 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6894 return addr;
6895 }
6896 if (mm->get_unmapped_area == arch_get_unmapped_area)
6897 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
6898 index dc7c3b1..34c0070 100644
6899 --- a/arch/sparc/mm/init_32.c
6900 +++ b/arch/sparc/mm/init_32.c
6901 @@ -317,6 +317,9 @@ extern void device_scan(void);
6902 pgprot_t PAGE_SHARED __read_mostly;
6903 EXPORT_SYMBOL(PAGE_SHARED);
6904
6905 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6906 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6907 +
6908 void __init paging_init(void)
6909 {
6910 switch(sparc_cpu_model) {
6911 @@ -345,17 +348,17 @@ void __init paging_init(void)
6912
6913 /* Initialize the protection map with non-constant, MMU dependent values. */
6914 protection_map[0] = PAGE_NONE;
6915 - protection_map[1] = PAGE_READONLY;
6916 - protection_map[2] = PAGE_COPY;
6917 - protection_map[3] = PAGE_COPY;
6918 + protection_map[1] = PAGE_READONLY_NOEXEC;
6919 + protection_map[2] = PAGE_COPY_NOEXEC;
6920 + protection_map[3] = PAGE_COPY_NOEXEC;
6921 protection_map[4] = PAGE_READONLY;
6922 protection_map[5] = PAGE_READONLY;
6923 protection_map[6] = PAGE_COPY;
6924 protection_map[7] = PAGE_COPY;
6925 protection_map[8] = PAGE_NONE;
6926 - protection_map[9] = PAGE_READONLY;
6927 - protection_map[10] = PAGE_SHARED;
6928 - protection_map[11] = PAGE_SHARED;
6929 + protection_map[9] = PAGE_READONLY_NOEXEC;
6930 + protection_map[10] = PAGE_SHARED_NOEXEC;
6931 + protection_map[11] = PAGE_SHARED_NOEXEC;
6932 protection_map[12] = PAGE_READONLY;
6933 protection_map[13] = PAGE_READONLY;
6934 protection_map[14] = PAGE_SHARED;
6935 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
6936 index 509b1ff..bfd7118 100644
6937 --- a/arch/sparc/mm/srmmu.c
6938 +++ b/arch/sparc/mm/srmmu.c
6939 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6940 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6941 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6942 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6943 +
6944 +#ifdef CONFIG_PAX_PAGEEXEC
6945 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6946 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6947 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6948 +#endif
6949 +
6950 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6951 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6952
6953 diff --git a/arch/um/Makefile b/arch/um/Makefile
6954 index fc633db..5e1a1c2 100644
6955 --- a/arch/um/Makefile
6956 +++ b/arch/um/Makefile
6957 @@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
6958 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
6959 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
6960
6961 +ifdef CONSTIFY_PLUGIN
6962 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6963 +endif
6964 +
6965 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
6966
6967 #This will adjust *FLAGS accordingly to the platform.
6968 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
6969 index 6c03acd..a5e0215 100644
6970 --- a/arch/um/include/asm/kmap_types.h
6971 +++ b/arch/um/include/asm/kmap_types.h
6972 @@ -23,6 +23,7 @@ enum km_type {
6973 KM_IRQ1,
6974 KM_SOFTIRQ0,
6975 KM_SOFTIRQ1,
6976 + KM_CLEARPAGE,
6977 KM_TYPE_NR
6978 };
6979
6980 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
6981 index 4cc9b6c..02e5029 100644
6982 --- a/arch/um/include/asm/page.h
6983 +++ b/arch/um/include/asm/page.h
6984 @@ -14,6 +14,9 @@
6985 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6986 #define PAGE_MASK (~(PAGE_SIZE-1))
6987
6988 +#define ktla_ktva(addr) (addr)
6989 +#define ktva_ktla(addr) (addr)
6990 +
6991 #ifndef __ASSEMBLY__
6992
6993 struct page;
6994 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
6995 index 4a28a15..654dc2a 100644
6996 --- a/arch/um/kernel/process.c
6997 +++ b/arch/um/kernel/process.c
6998 @@ -393,22 +393,6 @@ int singlestepping(void * t)
6999 return 2;
7000 }
7001
7002 -/*
7003 - * Only x86 and x86_64 have an arch_align_stack().
7004 - * All other arches have "#define arch_align_stack(x) (x)"
7005 - * in their asm/system.h
7006 - * As this is included in UML from asm-um/system-generic.h,
7007 - * we can use it to behave as the subarch does.
7008 - */
7009 -#ifndef arch_align_stack
7010 -unsigned long arch_align_stack(unsigned long sp)
7011 -{
7012 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7013 - sp -= get_random_int() % 8192;
7014 - return sp & ~0xf;
7015 -}
7016 -#endif
7017 -
7018 unsigned long get_wchan(struct task_struct *p)
7019 {
7020 unsigned long stack_page, sp, ip;
7021 diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h
7022 index d1b93c4..ae1b7fd 100644
7023 --- a/arch/um/sys-i386/shared/sysdep/system.h
7024 +++ b/arch/um/sys-i386/shared/sysdep/system.h
7025 @@ -17,7 +17,7 @@
7026 # define AT_VECTOR_SIZE_ARCH 1
7027 #endif
7028
7029 -extern unsigned long arch_align_stack(unsigned long sp);
7030 +#define arch_align_stack(x) ((x) & ~0xfUL)
7031
7032 void default_idle(void);
7033
7034 diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
7035 index 857ca0b..9a2669d 100644
7036 --- a/arch/um/sys-i386/syscalls.c
7037 +++ b/arch/um/sys-i386/syscalls.c
7038 @@ -11,6 +11,21 @@
7039 #include "asm/uaccess.h"
7040 #include "asm/unistd.h"
7041
7042 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
7043 +{
7044 + unsigned long pax_task_size = TASK_SIZE;
7045 +
7046 +#ifdef CONFIG_PAX_SEGMEXEC
7047 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
7048 + pax_task_size = SEGMEXEC_TASK_SIZE;
7049 +#endif
7050 +
7051 + if (len > pax_task_size || addr > pax_task_size - len)
7052 + return -EINVAL;
7053 +
7054 + return 0;
7055 +}
7056 +
7057 /*
7058 * Perform the select(nd, in, out, ex, tv) and mmap() system
7059 * calls. Linux/i386 didn't use to be able to handle more than
7060 diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h
7061 index d1b93c4..ae1b7fd 100644
7062 --- a/arch/um/sys-x86_64/shared/sysdep/system.h
7063 +++ b/arch/um/sys-x86_64/shared/sysdep/system.h
7064 @@ -17,7 +17,7 @@
7065 # define AT_VECTOR_SIZE_ARCH 1
7066 #endif
7067
7068 -extern unsigned long arch_align_stack(unsigned long sp);
7069 +#define arch_align_stack(x) ((x) & ~0xfUL)
7070
7071 void default_idle(void);
7072
7073 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7074 index 73ae02a..f932de5 100644
7075 --- a/arch/x86/Kconfig
7076 +++ b/arch/x86/Kconfig
7077 @@ -223,7 +223,7 @@ config X86_TRAMPOLINE
7078
7079 config X86_32_LAZY_GS
7080 def_bool y
7081 - depends on X86_32 && !CC_STACKPROTECTOR
7082 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7083
7084 config KTIME_SCALAR
7085 def_bool X86_32
7086 @@ -1008,7 +1008,7 @@ choice
7087
7088 config NOHIGHMEM
7089 bool "off"
7090 - depends on !X86_NUMAQ
7091 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7092 ---help---
7093 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7094 However, the address space of 32-bit x86 processors is only 4
7095 @@ -1045,7 +1045,7 @@ config NOHIGHMEM
7096
7097 config HIGHMEM4G
7098 bool "4GB"
7099 - depends on !X86_NUMAQ
7100 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7101 ---help---
7102 Select this if you have a 32-bit processor and between 1 and 4
7103 gigabytes of physical RAM.
7104 @@ -1099,7 +1099,7 @@ config PAGE_OFFSET
7105 hex
7106 default 0xB0000000 if VMSPLIT_3G_OPT
7107 default 0x80000000 if VMSPLIT_2G
7108 - default 0x78000000 if VMSPLIT_2G_OPT
7109 + default 0x70000000 if VMSPLIT_2G_OPT
7110 default 0x40000000 if VMSPLIT_1G
7111 default 0xC0000000
7112 depends on X86_32
7113 @@ -1460,6 +1460,7 @@ config SECCOMP
7114
7115 config CC_STACKPROTECTOR
7116 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7117 + depends on X86_64 || !PAX_MEMORY_UDEREF
7118 ---help---
7119 This option turns on the -fstack-protector GCC feature. This
7120 feature puts, at the beginning of functions, a canary value on
7121 @@ -1517,6 +1518,7 @@ config KEXEC_JUMP
7122 config PHYSICAL_START
7123 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
7124 default "0x1000000"
7125 + range 0x400000 0x40000000
7126 ---help---
7127 This gives the physical address where the kernel is loaded.
7128
7129 @@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
7130 hex
7131 prompt "Alignment value to which kernel should be aligned" if X86_32
7132 default "0x1000000"
7133 + range 0x400000 0x1000000 if PAX_KERNEXEC
7134 range 0x2000 0x1000000
7135 ---help---
7136 This value puts the alignment restrictions on physical address
7137 @@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
7138 Say N if you want to disable CPU hotplug.
7139
7140 config COMPAT_VDSO
7141 - def_bool y
7142 + def_bool n
7143 prompt "Compat VDSO support"
7144 depends on X86_32 || IA32_EMULATION
7145 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7146 ---help---
7147 Map the 32-bit VDSO to the predictable old-style address too.
7148 ---help---
7149 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
7150 index 0e566103..1a6b57e 100644
7151 --- a/arch/x86/Kconfig.cpu
7152 +++ b/arch/x86/Kconfig.cpu
7153 @@ -340,7 +340,7 @@ config X86_PPRO_FENCE
7154
7155 config X86_F00F_BUG
7156 def_bool y
7157 - depends on M586MMX || M586TSC || M586 || M486 || M386
7158 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7159
7160 config X86_WP_WORKS_OK
7161 def_bool y
7162 @@ -360,7 +360,7 @@ config X86_POPAD_OK
7163
7164 config X86_ALIGNMENT_16
7165 def_bool y
7166 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7167 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7168
7169 config X86_INTEL_USERCOPY
7170 def_bool y
7171 @@ -406,7 +406,7 @@ config X86_CMPXCHG64
7172 # generates cmov.
7173 config X86_CMOV
7174 def_bool y
7175 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
7176 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
7177
7178 config X86_MINIMUM_CPU_FAMILY
7179 int
7180 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
7181 index d105f29..c928727 100644
7182 --- a/arch/x86/Kconfig.debug
7183 +++ b/arch/x86/Kconfig.debug
7184 @@ -99,7 +99,7 @@ config X86_PTDUMP
7185 config DEBUG_RODATA
7186 bool "Write protect kernel read-only data structures"
7187 default y
7188 - depends on DEBUG_KERNEL
7189 + depends on DEBUG_KERNEL && BROKEN
7190 ---help---
7191 Mark the kernel read-only data as write-protected in the pagetables,
7192 in order to catch accidental (and incorrect) writes to such const
7193 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
7194 index d2d24c9..0f21f8d 100644
7195 --- a/arch/x86/Makefile
7196 +++ b/arch/x86/Makefile
7197 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
7198 else
7199 BITS := 64
7200 UTS_MACHINE := x86_64
7201 + biarch := $(call cc-option,-m64)
7202 CHECKFLAGS += -D__x86_64__ -m64
7203
7204 KBUILD_AFLAGS += -m64
7205 @@ -189,3 +190,12 @@ define archhelp
7206 echo ' FDARGS="..." arguments for the booted kernel'
7207 echo ' FDINITRD=file initrd for the booted kernel'
7208 endef
7209 +
7210 +define OLD_LD
7211 +
7212 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7213 +*** Please upgrade your binutils to 2.18 or newer
7214 +endef
7215 +
7216 +archprepare:
7217 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7218 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
7219 index ec749c2..bbb5319 100644
7220 --- a/arch/x86/boot/Makefile
7221 +++ b/arch/x86/boot/Makefile
7222 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7223 $(call cc-option, -fno-stack-protector) \
7224 $(call cc-option, -mpreferred-stack-boundary=2)
7225 KBUILD_CFLAGS += $(call cc-option, -m32)
7226 +ifdef CONSTIFY_PLUGIN
7227 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7228 +endif
7229 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7230 GCOV_PROFILE := n
7231
7232 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7233 index 878e4b9..20537ab 100644
7234 --- a/arch/x86/boot/bitops.h
7235 +++ b/arch/x86/boot/bitops.h
7236 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7237 u8 v;
7238 const u32 *p = (const u32 *)addr;
7239
7240 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7241 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7242 return v;
7243 }
7244
7245 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7246
7247 static inline void set_bit(int nr, void *addr)
7248 {
7249 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7250 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7251 }
7252
7253 #endif /* BOOT_BITOPS_H */
7254 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7255 index 98239d2..f40214c 100644
7256 --- a/arch/x86/boot/boot.h
7257 +++ b/arch/x86/boot/boot.h
7258 @@ -82,7 +82,7 @@ static inline void io_delay(void)
7259 static inline u16 ds(void)
7260 {
7261 u16 seg;
7262 - asm("movw %%ds,%0" : "=rm" (seg));
7263 + asm volatile("movw %%ds,%0" : "=rm" (seg));
7264 return seg;
7265 }
7266
7267 @@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7268 static inline int memcmp(const void *s1, const void *s2, size_t len)
7269 {
7270 u8 diff;
7271 - asm("repe; cmpsb; setnz %0"
7272 + asm volatile("repe; cmpsb; setnz %0"
7273 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7274 return diff;
7275 }
7276 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7277 index f8ed065..5bf5ff3 100644
7278 --- a/arch/x86/boot/compressed/Makefile
7279 +++ b/arch/x86/boot/compressed/Makefile
7280 @@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7281 KBUILD_CFLAGS += $(cflags-y)
7282 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7283 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7284 +ifdef CONSTIFY_PLUGIN
7285 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7286 +endif
7287
7288 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7289 GCOV_PROFILE := n
7290 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7291 index f543b70..b60fba8 100644
7292 --- a/arch/x86/boot/compressed/head_32.S
7293 +++ b/arch/x86/boot/compressed/head_32.S
7294 @@ -76,7 +76,7 @@ ENTRY(startup_32)
7295 notl %eax
7296 andl %eax, %ebx
7297 #else
7298 - movl $LOAD_PHYSICAL_ADDR, %ebx
7299 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7300 #endif
7301
7302 /* Target address to relocate to for decompression */
7303 @@ -149,7 +149,7 @@ relocated:
7304 * and where it was actually loaded.
7305 */
7306 movl %ebp, %ebx
7307 - subl $LOAD_PHYSICAL_ADDR, %ebx
7308 + subl $____LOAD_PHYSICAL_ADDR, %ebx
7309 jz 2f /* Nothing to be done if loaded at compiled addr. */
7310 /*
7311 * Process relocations.
7312 @@ -157,8 +157,7 @@ relocated:
7313
7314 1: subl $4, %edi
7315 movl (%edi), %ecx
7316 - testl %ecx, %ecx
7317 - jz 2f
7318 + jecxz 2f
7319 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7320 jmp 1b
7321 2:
7322 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7323 index 077e1b6..2c6b13b 100644
7324 --- a/arch/x86/boot/compressed/head_64.S
7325 +++ b/arch/x86/boot/compressed/head_64.S
7326 @@ -91,7 +91,7 @@ ENTRY(startup_32)
7327 notl %eax
7328 andl %eax, %ebx
7329 #else
7330 - movl $LOAD_PHYSICAL_ADDR, %ebx
7331 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7332 #endif
7333
7334 /* Target address to relocate to for decompression */
7335 @@ -183,7 +183,7 @@ no_longmode:
7336 hlt
7337 jmp 1b
7338
7339 -#include "../../kernel/verify_cpu_64.S"
7340 +#include "../../kernel/verify_cpu.S"
7341
7342 /*
7343 * Be careful here startup_64 needs to be at a predictable
7344 @@ -234,7 +234,7 @@ ENTRY(startup_64)
7345 notq %rax
7346 andq %rax, %rbp
7347 #else
7348 - movq $LOAD_PHYSICAL_ADDR, %rbp
7349 + movq $____LOAD_PHYSICAL_ADDR, %rbp
7350 #endif
7351
7352 /* Target address to relocate to for decompression */
7353 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7354 index 842b2a3..f00178b 100644
7355 --- a/arch/x86/boot/compressed/misc.c
7356 +++ b/arch/x86/boot/compressed/misc.c
7357 @@ -288,7 +288,7 @@ static void parse_elf(void *output)
7358 case PT_LOAD:
7359 #ifdef CONFIG_RELOCATABLE
7360 dest = output;
7361 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7362 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7363 #else
7364 dest = (void *)(phdr->p_paddr);
7365 #endif
7366 @@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7367 error("Destination address too large");
7368 #endif
7369 #ifndef CONFIG_RELOCATABLE
7370 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7371 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7372 error("Wrong destination address");
7373 #endif
7374
7375 diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
7376 index bcbd36c..b1754af 100644
7377 --- a/arch/x86/boot/compressed/mkpiggy.c
7378 +++ b/arch/x86/boot/compressed/mkpiggy.c
7379 @@ -74,7 +74,7 @@ int main(int argc, char *argv[])
7380
7381 offs = (olen > ilen) ? olen - ilen : 0;
7382 offs += olen >> 12; /* Add 8 bytes for each 32K block */
7383 - offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
7384 + offs += 64*1024; /* Add 64K bytes slack */
7385 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
7386
7387 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
7388 diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
7389 index bbeb0c3..f5167ab 100644
7390 --- a/arch/x86/boot/compressed/relocs.c
7391 +++ b/arch/x86/boot/compressed/relocs.c
7392 @@ -10,8 +10,11 @@
7393 #define USE_BSD
7394 #include <endian.h>
7395
7396 +#include "../../../../include/linux/autoconf.h"
7397 +
7398 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
7399 static Elf32_Ehdr ehdr;
7400 +static Elf32_Phdr *phdr;
7401 static unsigned long reloc_count, reloc_idx;
7402 static unsigned long *relocs;
7403
7404 @@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
7405
7406 static int is_safe_abs_reloc(const char* sym_name)
7407 {
7408 - int i;
7409 + unsigned int i;
7410
7411 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
7412 if (!strcmp(sym_name, safe_abs_relocs[i]))
7413 @@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
7414 }
7415 }
7416
7417 +static void read_phdrs(FILE *fp)
7418 +{
7419 + unsigned int i;
7420 +
7421 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
7422 + if (!phdr) {
7423 + die("Unable to allocate %d program headers\n",
7424 + ehdr.e_phnum);
7425 + }
7426 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
7427 + die("Seek to %d failed: %s\n",
7428 + ehdr.e_phoff, strerror(errno));
7429 + }
7430 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
7431 + die("Cannot read ELF program headers: %s\n",
7432 + strerror(errno));
7433 + }
7434 + for(i = 0; i < ehdr.e_phnum; i++) {
7435 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
7436 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
7437 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
7438 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
7439 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
7440 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
7441 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
7442 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
7443 + }
7444 +
7445 +}
7446 +
7447 static void read_shdrs(FILE *fp)
7448 {
7449 - int i;
7450 + unsigned int i;
7451 Elf32_Shdr shdr;
7452
7453 secs = calloc(ehdr.e_shnum, sizeof(struct section));
7454 @@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
7455
7456 static void read_strtabs(FILE *fp)
7457 {
7458 - int i;
7459 + unsigned int i;
7460 for (i = 0; i < ehdr.e_shnum; i++) {
7461 struct section *sec = &secs[i];
7462 if (sec->shdr.sh_type != SHT_STRTAB) {
7463 @@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
7464
7465 static void read_symtabs(FILE *fp)
7466 {
7467 - int i,j;
7468 + unsigned int i,j;
7469 for (i = 0; i < ehdr.e_shnum; i++) {
7470 struct section *sec = &secs[i];
7471 if (sec->shdr.sh_type != SHT_SYMTAB) {
7472 @@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
7473
7474 static void read_relocs(FILE *fp)
7475 {
7476 - int i,j;
7477 + unsigned int i,j;
7478 + uint32_t base;
7479 +
7480 for (i = 0; i < ehdr.e_shnum; i++) {
7481 struct section *sec = &secs[i];
7482 if (sec->shdr.sh_type != SHT_REL) {
7483 @@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
7484 die("Cannot read symbol table: %s\n",
7485 strerror(errno));
7486 }
7487 + base = 0;
7488 + for (j = 0; j < ehdr.e_phnum; j++) {
7489 + if (phdr[j].p_type != PT_LOAD )
7490 + continue;
7491 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
7492 + continue;
7493 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
7494 + break;
7495 + }
7496 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
7497 Elf32_Rel *rel = &sec->reltab[j];
7498 - rel->r_offset = elf32_to_cpu(rel->r_offset);
7499 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
7500 rel->r_info = elf32_to_cpu(rel->r_info);
7501 }
7502 }
7503 @@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
7504
7505 static void print_absolute_symbols(void)
7506 {
7507 - int i;
7508 + unsigned int i;
7509 printf("Absolute symbols\n");
7510 printf(" Num: Value Size Type Bind Visibility Name\n");
7511 for (i = 0; i < ehdr.e_shnum; i++) {
7512 struct section *sec = &secs[i];
7513 char *sym_strtab;
7514 Elf32_Sym *sh_symtab;
7515 - int j;
7516 + unsigned int j;
7517
7518 if (sec->shdr.sh_type != SHT_SYMTAB) {
7519 continue;
7520 @@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
7521
7522 static void print_absolute_relocs(void)
7523 {
7524 - int i, printed = 0;
7525 + unsigned int i, printed = 0;
7526
7527 for (i = 0; i < ehdr.e_shnum; i++) {
7528 struct section *sec = &secs[i];
7529 struct section *sec_applies, *sec_symtab;
7530 char *sym_strtab;
7531 Elf32_Sym *sh_symtab;
7532 - int j;
7533 + unsigned int j;
7534 if (sec->shdr.sh_type != SHT_REL) {
7535 continue;
7536 }
7537 @@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
7538
7539 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7540 {
7541 - int i;
7542 + unsigned int i;
7543 /* Walk through the relocations */
7544 for (i = 0; i < ehdr.e_shnum; i++) {
7545 char *sym_strtab;
7546 Elf32_Sym *sh_symtab;
7547 struct section *sec_applies, *sec_symtab;
7548 - int j;
7549 + unsigned int j;
7550 struct section *sec = &secs[i];
7551
7552 if (sec->shdr.sh_type != SHT_REL) {
7553 @@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7554 if (sym->st_shndx == SHN_ABS) {
7555 continue;
7556 }
7557 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
7558 + if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
7559 + continue;
7560 +
7561 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
7562 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
7563 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
7564 + continue;
7565 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
7566 + continue;
7567 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
7568 + continue;
7569 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
7570 + continue;
7571 +#endif
7572 if (r_type == R_386_NONE || r_type == R_386_PC32) {
7573 /*
7574 * NONE can be ignored and and PC relative
7575 @@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, const void *vb)
7576
7577 static void emit_relocs(int as_text)
7578 {
7579 - int i;
7580 + unsigned int i;
7581 /* Count how many relocations I have and allocate space for them. */
7582 reloc_count = 0;
7583 walk_relocs(count_reloc);
7584 @@ -634,6 +693,7 @@ int main(int argc, char **argv)
7585 fname, strerror(errno));
7586 }
7587 read_ehdr(fp);
7588 + read_phdrs(fp);
7589 read_shdrs(fp);
7590 read_strtabs(fp);
7591 read_symtabs(fp);
7592 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7593 index 4d3ff03..e4972ff 100644
7594 --- a/arch/x86/boot/cpucheck.c
7595 +++ b/arch/x86/boot/cpucheck.c
7596 @@ -74,7 +74,7 @@ static int has_fpu(void)
7597 u16 fcw = -1, fsw = -1;
7598 u32 cr0;
7599
7600 - asm("movl %%cr0,%0" : "=r" (cr0));
7601 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
7602 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7603 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7604 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7605 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7606 {
7607 u32 f0, f1;
7608
7609 - asm("pushfl ; "
7610 + asm volatile("pushfl ; "
7611 "pushfl ; "
7612 "popl %0 ; "
7613 "movl %0,%1 ; "
7614 @@ -115,7 +115,7 @@ static void get_flags(void)
7615 set_bit(X86_FEATURE_FPU, cpu.flags);
7616
7617 if (has_eflag(X86_EFLAGS_ID)) {
7618 - asm("cpuid"
7619 + asm volatile("cpuid"
7620 : "=a" (max_intel_level),
7621 "=b" (cpu_vendor[0]),
7622 "=d" (cpu_vendor[1]),
7623 @@ -124,7 +124,7 @@ static void get_flags(void)
7624
7625 if (max_intel_level >= 0x00000001 &&
7626 max_intel_level <= 0x0000ffff) {
7627 - asm("cpuid"
7628 + asm volatile("cpuid"
7629 : "=a" (tfms),
7630 "=c" (cpu.flags[4]),
7631 "=d" (cpu.flags[0])
7632 @@ -136,7 +136,7 @@ static void get_flags(void)
7633 cpu.model += ((tfms >> 16) & 0xf) << 4;
7634 }
7635
7636 - asm("cpuid"
7637 + asm volatile("cpuid"
7638 : "=a" (max_amd_level)
7639 : "a" (0x80000000)
7640 : "ebx", "ecx", "edx");
7641 @@ -144,7 +144,7 @@ static void get_flags(void)
7642 if (max_amd_level >= 0x80000001 &&
7643 max_amd_level <= 0x8000ffff) {
7644 u32 eax = 0x80000001;
7645 - asm("cpuid"
7646 + asm volatile("cpuid"
7647 : "+a" (eax),
7648 "=c" (cpu.flags[6]),
7649 "=d" (cpu.flags[1])
7650 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7651 u32 ecx = MSR_K7_HWCR;
7652 u32 eax, edx;
7653
7654 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7655 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7656 eax &= ~(1 << 15);
7657 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7658 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7659
7660 get_flags(); /* Make sure it really did something */
7661 err = check_flags();
7662 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7663 u32 ecx = MSR_VIA_FCR;
7664 u32 eax, edx;
7665
7666 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7667 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7668 eax |= (1<<1)|(1<<7);
7669 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7670 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7671
7672 set_bit(X86_FEATURE_CX8, cpu.flags);
7673 err = check_flags();
7674 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7675 u32 eax, edx;
7676 u32 level = 1;
7677
7678 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7679 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7680 - asm("cpuid"
7681 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7682 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7683 + asm volatile("cpuid"
7684 : "+a" (level), "=d" (cpu.flags[0])
7685 : : "ecx", "ebx");
7686 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7687 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7688
7689 err = check_flags();
7690 }
7691 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
7692 index b31cc54..8d69237 100644
7693 --- a/arch/x86/boot/header.S
7694 +++ b/arch/x86/boot/header.S
7695 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
7696 # single linked list of
7697 # struct setup_data
7698
7699 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
7700 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
7701
7702 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
7703 #define VO_INIT_SIZE (VO__end - VO__text)
7704 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
7705 index cae3feb..ff8ff2a 100644
7706 --- a/arch/x86/boot/memory.c
7707 +++ b/arch/x86/boot/memory.c
7708 @@ -19,7 +19,7 @@
7709
7710 static int detect_memory_e820(void)
7711 {
7712 - int count = 0;
7713 + unsigned int count = 0;
7714 struct biosregs ireg, oreg;
7715 struct e820entry *desc = boot_params.e820_map;
7716 static struct e820entry buf; /* static so it is zeroed */
7717 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
7718 index 11e8c6e..fdbb1ed 100644
7719 --- a/arch/x86/boot/video-vesa.c
7720 +++ b/arch/x86/boot/video-vesa.c
7721 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
7722
7723 boot_params.screen_info.vesapm_seg = oreg.es;
7724 boot_params.screen_info.vesapm_off = oreg.di;
7725 + boot_params.screen_info.vesapm_size = oreg.cx;
7726 }
7727
7728 /*
7729 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
7730 index d42da38..787cdf3 100644
7731 --- a/arch/x86/boot/video.c
7732 +++ b/arch/x86/boot/video.c
7733 @@ -90,7 +90,7 @@ static void store_mode_params(void)
7734 static unsigned int get_entry(void)
7735 {
7736 char entry_buf[4];
7737 - int i, len = 0;
7738 + unsigned int i, len = 0;
7739 int key;
7740 unsigned int v;
7741
7742 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
7743 index 5b577d5..3c1fed4 100644
7744 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
7745 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
7746 @@ -8,6 +8,8 @@
7747 * including this sentence is retained in full.
7748 */
7749
7750 +#include <asm/alternative-asm.h>
7751 +
7752 .extern crypto_ft_tab
7753 .extern crypto_it_tab
7754 .extern crypto_fl_tab
7755 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
7756 je B192; \
7757 leaq 32(r9),r9;
7758
7759 +#define ret pax_force_retaddr 0, 1; ret
7760 +
7761 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
7762 movq r1,r2; \
7763 movq r3,r4; \
7764 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
7765 index eb0566e..e3ebad8 100644
7766 --- a/arch/x86/crypto/aesni-intel_asm.S
7767 +++ b/arch/x86/crypto/aesni-intel_asm.S
7768 @@ -16,6 +16,7 @@
7769 */
7770
7771 #include <linux/linkage.h>
7772 +#include <asm/alternative-asm.h>
7773
7774 .text
7775
7776 @@ -52,6 +53,7 @@ _key_expansion_256a:
7777 pxor %xmm1, %xmm0
7778 movaps %xmm0, (%rcx)
7779 add $0x10, %rcx
7780 + pax_force_retaddr_bts
7781 ret
7782
7783 _key_expansion_192a:
7784 @@ -75,6 +77,7 @@ _key_expansion_192a:
7785 shufps $0b01001110, %xmm2, %xmm1
7786 movaps %xmm1, 16(%rcx)
7787 add $0x20, %rcx
7788 + pax_force_retaddr_bts
7789 ret
7790
7791 _key_expansion_192b:
7792 @@ -93,6 +96,7 @@ _key_expansion_192b:
7793
7794 movaps %xmm0, (%rcx)
7795 add $0x10, %rcx
7796 + pax_force_retaddr_bts
7797 ret
7798
7799 _key_expansion_256b:
7800 @@ -104,6 +108,7 @@ _key_expansion_256b:
7801 pxor %xmm1, %xmm2
7802 movaps %xmm2, (%rcx)
7803 add $0x10, %rcx
7804 + pax_force_retaddr_bts
7805 ret
7806
7807 /*
7808 @@ -239,7 +244,9 @@ ENTRY(aesni_set_key)
7809 cmp %rcx, %rdi
7810 jb .Ldec_key_loop
7811 xor %rax, %rax
7812 + pax_force_retaddr 0, 1
7813 ret
7814 +ENDPROC(aesni_set_key)
7815
7816 /*
7817 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
7818 @@ -249,7 +256,9 @@ ENTRY(aesni_enc)
7819 movups (INP), STATE # input
7820 call _aesni_enc1
7821 movups STATE, (OUTP) # output
7822 + pax_force_retaddr 0, 1
7823 ret
7824 +ENDPROC(aesni_enc)
7825
7826 /*
7827 * _aesni_enc1: internal ABI
7828 @@ -319,6 +328,7 @@ _aesni_enc1:
7829 movaps 0x70(TKEYP), KEY
7830 # aesenclast KEY, STATE # last round
7831 .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2
7832 + pax_force_retaddr_bts
7833 ret
7834
7835 /*
7836 @@ -482,6 +492,7 @@ _aesni_enc4:
7837 .byte 0x66, 0x0f, 0x38, 0xdd, 0xea
7838 # aesenclast KEY, STATE4
7839 .byte 0x66, 0x0f, 0x38, 0xdd, 0xf2
7840 + pax_force_retaddr_bts
7841 ret
7842
7843 /*
7844 @@ -493,7 +504,9 @@ ENTRY(aesni_dec)
7845 movups (INP), STATE # input
7846 call _aesni_dec1
7847 movups STATE, (OUTP) #output
7848 + pax_force_retaddr 0, 1
7849 ret
7850 +ENDPROC(aesni_dec)
7851
7852 /*
7853 * _aesni_dec1: internal ABI
7854 @@ -563,6 +576,7 @@ _aesni_dec1:
7855 movaps 0x70(TKEYP), KEY
7856 # aesdeclast KEY, STATE # last round
7857 .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2
7858 + pax_force_retaddr_bts
7859 ret
7860
7861 /*
7862 @@ -726,6 +740,7 @@ _aesni_dec4:
7863 .byte 0x66, 0x0f, 0x38, 0xdf, 0xea
7864 # aesdeclast KEY, STATE4
7865 .byte 0x66, 0x0f, 0x38, 0xdf, 0xf2
7866 + pax_force_retaddr_bts
7867 ret
7868
7869 /*
7870 @@ -769,7 +784,9 @@ ENTRY(aesni_ecb_enc)
7871 cmp $16, LEN
7872 jge .Lecb_enc_loop1
7873 .Lecb_enc_ret:
7874 + pax_force_retaddr 0, 1
7875 ret
7876 +ENDPROC(aesni_ecb_enc)
7877
7878 /*
7879 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7880 @@ -813,7 +830,9 @@ ENTRY(aesni_ecb_dec)
7881 cmp $16, LEN
7882 jge .Lecb_dec_loop1
7883 .Lecb_dec_ret:
7884 + pax_force_retaddr 0, 1
7885 ret
7886 +ENDPROC(aesni_ecb_dec)
7887
7888 /*
7889 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7890 @@ -837,7 +856,9 @@ ENTRY(aesni_cbc_enc)
7891 jge .Lcbc_enc_loop
7892 movups STATE, (IVP)
7893 .Lcbc_enc_ret:
7894 + pax_force_retaddr 0, 1
7895 ret
7896 +ENDPROC(aesni_cbc_enc)
7897
7898 /*
7899 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7900 @@ -894,4 +915,6 @@ ENTRY(aesni_cbc_dec)
7901 .Lcbc_dec_ret:
7902 movups IV, (IVP)
7903 .Lcbc_dec_just_ret:
7904 + pax_force_retaddr 0, 1
7905 ret
7906 +ENDPROC(aesni_cbc_dec)
7907 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7908 index 6214a9b..1f4fc9a 100644
7909 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
7910 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7911 @@ -1,3 +1,5 @@
7912 +#include <asm/alternative-asm.h>
7913 +
7914 # enter ECRYPT_encrypt_bytes
7915 .text
7916 .p2align 5
7917 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
7918 add %r11,%rsp
7919 mov %rdi,%rax
7920 mov %rsi,%rdx
7921 + pax_force_retaddr 0, 1
7922 ret
7923 # bytesatleast65:
7924 ._bytesatleast65:
7925 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
7926 add %r11,%rsp
7927 mov %rdi,%rax
7928 mov %rsi,%rdx
7929 + pax_force_retaddr
7930 ret
7931 # enter ECRYPT_ivsetup
7932 .text
7933 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
7934 add %r11,%rsp
7935 mov %rdi,%rax
7936 mov %rsi,%rdx
7937 + pax_force_retaddr
7938 ret
7939 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
7940 index 35974a5..5662ae2 100644
7941 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
7942 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
7943 @@ -21,6 +21,7 @@
7944 .text
7945
7946 #include <asm/asm-offsets.h>
7947 +#include <asm/alternative-asm.h>
7948
7949 #define a_offset 0
7950 #define b_offset 4
7951 @@ -269,6 +270,7 @@ twofish_enc_blk:
7952
7953 popq R1
7954 movq $1,%rax
7955 + pax_force_retaddr 0, 1
7956 ret
7957
7958 twofish_dec_blk:
7959 @@ -321,4 +323,5 @@ twofish_dec_blk:
7960
7961 popq R1
7962 movq $1,%rax
7963 + pax_force_retaddr 0, 1
7964 ret
7965 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
7966 index 14531ab..a89a0c0 100644
7967 --- a/arch/x86/ia32/ia32_aout.c
7968 +++ b/arch/x86/ia32/ia32_aout.c
7969 @@ -169,6 +169,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7970 unsigned long dump_start, dump_size;
7971 struct user32 dump;
7972
7973 + memset(&dump, 0, sizeof(dump));
7974 +
7975 fs = get_fs();
7976 set_fs(KERNEL_DS);
7977 has_dumped = 1;
7978 @@ -218,12 +220,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7979 dump_size = dump.u_ssize << PAGE_SHIFT;
7980 DUMP_WRITE(dump_start, dump_size);
7981 }
7982 - /*
7983 - * Finally dump the task struct. Not be used by gdb, but
7984 - * could be useful
7985 - */
7986 - set_fs(KERNEL_DS);
7987 - DUMP_WRITE(current, sizeof(*current));
7988 end_coredump:
7989 set_fs(fs);
7990 return has_dumped;
7991 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
7992 index 588a7aa..a3468b0 100644
7993 --- a/arch/x86/ia32/ia32_signal.c
7994 +++ b/arch/x86/ia32/ia32_signal.c
7995 @@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
7996 }
7997 seg = get_fs();
7998 set_fs(KERNEL_DS);
7999 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8000 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8001 set_fs(seg);
8002 if (ret >= 0 && uoss_ptr) {
8003 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8004 @@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8005 */
8006 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8007 size_t frame_size,
8008 - void **fpstate)
8009 + void __user **fpstate)
8010 {
8011 unsigned long sp;
8012
8013 @@ -395,7 +395,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8014
8015 if (used_math()) {
8016 sp = sp - sig_xstate_ia32_size;
8017 - *fpstate = (struct _fpstate_ia32 *) sp;
8018 + *fpstate = (struct _fpstate_ia32 __user *) sp;
8019 if (save_i387_xstate_ia32(*fpstate) < 0)
8020 return (void __user *) -1L;
8021 }
8022 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8023 sp -= frame_size;
8024 /* Align the stack pointer according to the i386 ABI,
8025 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8026 - sp = ((sp + 4) & -16ul) - 4;
8027 + sp = ((sp - 12) & -16ul) - 4;
8028 return (void __user *) sp;
8029 }
8030
8031 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8032 * These are actually not used anymore, but left because some
8033 * gdb versions depend on them as a marker.
8034 */
8035 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8036 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8037 } put_user_catch(err);
8038
8039 if (err)
8040 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8041 0xb8,
8042 __NR_ia32_rt_sigreturn,
8043 0x80cd,
8044 - 0,
8045 + 0
8046 };
8047
8048 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8049 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8050
8051 if (ka->sa.sa_flags & SA_RESTORER)
8052 restorer = ka->sa.sa_restorer;
8053 + else if (current->mm->context.vdso)
8054 + /* Return stub is in 32bit vsyscall page */
8055 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8056 else
8057 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8058 - rt_sigreturn);
8059 + restorer = &frame->retcode;
8060 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8061
8062 /*
8063 * Not actually used anymore, but left because some gdb
8064 * versions need it.
8065 */
8066 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8067 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8068 } put_user_catch(err);
8069
8070 if (err)
8071 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8072 index 4edd8eb..29124b4 100644
8073 --- a/arch/x86/ia32/ia32entry.S
8074 +++ b/arch/x86/ia32/ia32entry.S
8075 @@ -13,7 +13,9 @@
8076 #include <asm/thread_info.h>
8077 #include <asm/segment.h>
8078 #include <asm/irqflags.h>
8079 +#include <asm/pgtable.h>
8080 #include <linux/linkage.h>
8081 +#include <asm/alternative-asm.h>
8082
8083 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8084 #include <linux/elf-em.h>
8085 @@ -93,6 +95,32 @@ ENTRY(native_irq_enable_sysexit)
8086 ENDPROC(native_irq_enable_sysexit)
8087 #endif
8088
8089 + .macro pax_enter_kernel_user
8090 + pax_set_fptr_mask
8091 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8092 + call pax_enter_kernel_user
8093 +#endif
8094 + .endm
8095 +
8096 + .macro pax_exit_kernel_user
8097 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8098 + call pax_exit_kernel_user
8099 +#endif
8100 +#ifdef CONFIG_PAX_RANDKSTACK
8101 + pushq %rax
8102 + pushq %r11
8103 + call pax_randomize_kstack
8104 + popq %r11
8105 + popq %rax
8106 +#endif
8107 + .endm
8108 +
8109 +.macro pax_erase_kstack
8110 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8111 + call pax_erase_kstack
8112 +#endif
8113 +.endm
8114 +
8115 /*
8116 * 32bit SYSENTER instruction entry.
8117 *
8118 @@ -119,12 +147,6 @@ ENTRY(ia32_sysenter_target)
8119 CFI_REGISTER rsp,rbp
8120 SWAPGS_UNSAFE_STACK
8121 movq PER_CPU_VAR(kernel_stack), %rsp
8122 - addq $(KERNEL_STACK_OFFSET),%rsp
8123 - /*
8124 - * No need to follow this irqs on/off section: the syscall
8125 - * disabled irqs, here we enable it straight after entry:
8126 - */
8127 - ENABLE_INTERRUPTS(CLBR_NONE)
8128 movl %ebp,%ebp /* zero extension */
8129 pushq $__USER32_DS
8130 CFI_ADJUST_CFA_OFFSET 8
8131 @@ -135,28 +157,42 @@ ENTRY(ia32_sysenter_target)
8132 pushfq
8133 CFI_ADJUST_CFA_OFFSET 8
8134 /*CFI_REL_OFFSET rflags,0*/
8135 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
8136 - CFI_REGISTER rip,r10
8137 + orl $X86_EFLAGS_IF,(%rsp)
8138 + GET_THREAD_INFO(%r11)
8139 + movl TI_sysenter_return(%r11), %r11d
8140 + CFI_REGISTER rip,r11
8141 pushq $__USER32_CS
8142 CFI_ADJUST_CFA_OFFSET 8
8143 /*CFI_REL_OFFSET cs,0*/
8144 movl %eax, %eax
8145 - pushq %r10
8146 + pushq %r11
8147 CFI_ADJUST_CFA_OFFSET 8
8148 CFI_REL_OFFSET rip,0
8149 pushq %rax
8150 CFI_ADJUST_CFA_OFFSET 8
8151 cld
8152 SAVE_ARGS 0,0,1
8153 + pax_enter_kernel_user
8154 + /*
8155 + * No need to follow this irqs on/off section: the syscall
8156 + * disabled irqs, here we enable it straight after entry:
8157 + */
8158 + ENABLE_INTERRUPTS(CLBR_NONE)
8159 /* no need to do an access_ok check here because rbp has been
8160 32bit zero extended */
8161 +
8162 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8163 + mov $PAX_USER_SHADOW_BASE,%r11
8164 + add %r11,%rbp
8165 +#endif
8166 +
8167 1: movl (%rbp),%ebp
8168 .section __ex_table,"a"
8169 .quad 1b,ia32_badarg
8170 .previous
8171 - GET_THREAD_INFO(%r10)
8172 - orl $TS_COMPAT,TI_status(%r10)
8173 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8174 + GET_THREAD_INFO(%r11)
8175 + orl $TS_COMPAT,TI_status(%r11)
8176 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8177 CFI_REMEMBER_STATE
8178 jnz sysenter_tracesys
8179 cmpq $(IA32_NR_syscalls-1),%rax
8180 @@ -166,13 +202,15 @@ sysenter_do_call:
8181 sysenter_dispatch:
8182 call *ia32_sys_call_table(,%rax,8)
8183 movq %rax,RAX-ARGOFFSET(%rsp)
8184 - GET_THREAD_INFO(%r10)
8185 + GET_THREAD_INFO(%r11)
8186 DISABLE_INTERRUPTS(CLBR_NONE)
8187 TRACE_IRQS_OFF
8188 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
8189 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8190 jnz sysexit_audit
8191 sysexit_from_sys_call:
8192 - andl $~TS_COMPAT,TI_status(%r10)
8193 + pax_exit_kernel_user
8194 + pax_erase_kstack
8195 + andl $~TS_COMPAT,TI_status(%r11)
8196 /* clear IF, that popfq doesn't enable interrupts early */
8197 andl $~0x200,EFLAGS-R11(%rsp)
8198 movl RIP-R11(%rsp),%edx /* User %eip */
8199 @@ -200,6 +238,9 @@ sysexit_from_sys_call:
8200 movl %eax,%esi /* 2nd arg: syscall number */
8201 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
8202 call audit_syscall_entry
8203 +
8204 + pax_erase_kstack
8205 +
8206 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8207 cmpq $(IA32_NR_syscalls-1),%rax
8208 ja ia32_badsys
8209 @@ -211,7 +252,7 @@ sysexit_from_sys_call:
8210 .endm
8211
8212 .macro auditsys_exit exit
8213 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8214 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8215 jnz ia32_ret_from_sys_call
8216 TRACE_IRQS_ON
8217 sti
8218 @@ -221,12 +262,12 @@ sysexit_from_sys_call:
8219 movzbl %al,%edi /* zero-extend that into %edi */
8220 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
8221 call audit_syscall_exit
8222 - GET_THREAD_INFO(%r10)
8223 + GET_THREAD_INFO(%r11)
8224 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
8225 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8226 cli
8227 TRACE_IRQS_OFF
8228 - testl %edi,TI_flags(%r10)
8229 + testl %edi,TI_flags(%r11)
8230 jz \exit
8231 CLEAR_RREGS -ARGOFFSET
8232 jmp int_with_check
8233 @@ -244,7 +285,7 @@ sysexit_audit:
8234
8235 sysenter_tracesys:
8236 #ifdef CONFIG_AUDITSYSCALL
8237 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8238 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8239 jz sysenter_auditsys
8240 #endif
8241 SAVE_REST
8242 @@ -252,6 +293,9 @@ sysenter_tracesys:
8243 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8244 movq %rsp,%rdi /* &pt_regs -> arg1 */
8245 call syscall_trace_enter
8246 +
8247 + pax_erase_kstack
8248 +
8249 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8250 RESTORE_REST
8251 cmpq $(IA32_NR_syscalls-1),%rax
8252 @@ -283,19 +327,20 @@ ENDPROC(ia32_sysenter_target)
8253 ENTRY(ia32_cstar_target)
8254 CFI_STARTPROC32 simple
8255 CFI_SIGNAL_FRAME
8256 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8257 + CFI_DEF_CFA rsp,0
8258 CFI_REGISTER rip,rcx
8259 /*CFI_REGISTER rflags,r11*/
8260 SWAPGS_UNSAFE_STACK
8261 movl %esp,%r8d
8262 CFI_REGISTER rsp,r8
8263 movq PER_CPU_VAR(kernel_stack),%rsp
8264 + SAVE_ARGS 8*6,1,1
8265 + pax_enter_kernel_user
8266 /*
8267 * No need to follow this irqs on/off section: the syscall
8268 * disabled irqs and here we enable it straight after entry:
8269 */
8270 ENABLE_INTERRUPTS(CLBR_NONE)
8271 - SAVE_ARGS 8,1,1
8272 movl %eax,%eax /* zero extension */
8273 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8274 movq %rcx,RIP-ARGOFFSET(%rsp)
8275 @@ -311,13 +356,19 @@ ENTRY(ia32_cstar_target)
8276 /* no need to do an access_ok check here because r8 has been
8277 32bit zero extended */
8278 /* hardware stack frame is complete now */
8279 +
8280 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8281 + mov $PAX_USER_SHADOW_BASE,%r11
8282 + add %r11,%r8
8283 +#endif
8284 +
8285 1: movl (%r8),%r9d
8286 .section __ex_table,"a"
8287 .quad 1b,ia32_badarg
8288 .previous
8289 - GET_THREAD_INFO(%r10)
8290 - orl $TS_COMPAT,TI_status(%r10)
8291 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8292 + GET_THREAD_INFO(%r11)
8293 + orl $TS_COMPAT,TI_status(%r11)
8294 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8295 CFI_REMEMBER_STATE
8296 jnz cstar_tracesys
8297 cmpq $IA32_NR_syscalls-1,%rax
8298 @@ -327,13 +378,15 @@ cstar_do_call:
8299 cstar_dispatch:
8300 call *ia32_sys_call_table(,%rax,8)
8301 movq %rax,RAX-ARGOFFSET(%rsp)
8302 - GET_THREAD_INFO(%r10)
8303 + GET_THREAD_INFO(%r11)
8304 DISABLE_INTERRUPTS(CLBR_NONE)
8305 TRACE_IRQS_OFF
8306 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
8307 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8308 jnz sysretl_audit
8309 sysretl_from_sys_call:
8310 - andl $~TS_COMPAT,TI_status(%r10)
8311 + pax_exit_kernel_user
8312 + pax_erase_kstack
8313 + andl $~TS_COMPAT,TI_status(%r11)
8314 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
8315 movl RIP-ARGOFFSET(%rsp),%ecx
8316 CFI_REGISTER rip,rcx
8317 @@ -361,7 +414,7 @@ sysretl_audit:
8318
8319 cstar_tracesys:
8320 #ifdef CONFIG_AUDITSYSCALL
8321 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8322 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8323 jz cstar_auditsys
8324 #endif
8325 xchgl %r9d,%ebp
8326 @@ -370,6 +423,9 @@ cstar_tracesys:
8327 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8328 movq %rsp,%rdi /* &pt_regs -> arg1 */
8329 call syscall_trace_enter
8330 +
8331 + pax_erase_kstack
8332 +
8333 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8334 RESTORE_REST
8335 xchgl %ebp,%r9d
8336 @@ -415,11 +471,6 @@ ENTRY(ia32_syscall)
8337 CFI_REL_OFFSET rip,RIP-RIP
8338 PARAVIRT_ADJUST_EXCEPTION_FRAME
8339 SWAPGS
8340 - /*
8341 - * No need to follow this irqs on/off section: the syscall
8342 - * disabled irqs and here we enable it straight after entry:
8343 - */
8344 - ENABLE_INTERRUPTS(CLBR_NONE)
8345 movl %eax,%eax
8346 pushq %rax
8347 CFI_ADJUST_CFA_OFFSET 8
8348 @@ -427,9 +478,15 @@ ENTRY(ia32_syscall)
8349 /* note the registers are not zero extended to the sf.
8350 this could be a problem. */
8351 SAVE_ARGS 0,0,1
8352 - GET_THREAD_INFO(%r10)
8353 - orl $TS_COMPAT,TI_status(%r10)
8354 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8355 + pax_enter_kernel_user
8356 + /*
8357 + * No need to follow this irqs on/off section: the syscall
8358 + * disabled irqs and here we enable it straight after entry:
8359 + */
8360 + ENABLE_INTERRUPTS(CLBR_NONE)
8361 + GET_THREAD_INFO(%r11)
8362 + orl $TS_COMPAT,TI_status(%r11)
8363 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8364 jnz ia32_tracesys
8365 cmpq $(IA32_NR_syscalls-1),%rax
8366 ja ia32_badsys
8367 @@ -448,6 +505,9 @@ ia32_tracesys:
8368 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8369 movq %rsp,%rdi /* &pt_regs -> arg1 */
8370 call syscall_trace_enter
8371 +
8372 + pax_erase_kstack
8373 +
8374 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8375 RESTORE_REST
8376 cmpq $(IA32_NR_syscalls-1),%rax
8377 @@ -462,6 +522,7 @@ ia32_badsys:
8378
8379 quiet_ni_syscall:
8380 movq $-ENOSYS,%rax
8381 + pax_force_retaddr
8382 ret
8383 CFI_ENDPROC
8384
8385 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8386 index 016218c..47ccbdd 100644
8387 --- a/arch/x86/ia32/sys_ia32.c
8388 +++ b/arch/x86/ia32/sys_ia32.c
8389 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8390 */
8391 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8392 {
8393 - typeof(ubuf->st_uid) uid = 0;
8394 - typeof(ubuf->st_gid) gid = 0;
8395 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
8396 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
8397 SET_UID(uid, stat->uid);
8398 SET_GID(gid, stat->gid);
8399 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8400 @@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
8401 }
8402 set_fs(KERNEL_DS);
8403 ret = sys_rt_sigprocmask(how,
8404 - set ? (sigset_t __user *)&s : NULL,
8405 - oset ? (sigset_t __user *)&s : NULL,
8406 + set ? (sigset_t __force_user *)&s : NULL,
8407 + oset ? (sigset_t __force_user *)&s : NULL,
8408 sigsetsize);
8409 set_fs(old_fs);
8410 if (ret)
8411 @@ -371,7 +371,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8412 mm_segment_t old_fs = get_fs();
8413
8414 set_fs(KERNEL_DS);
8415 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8416 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8417 set_fs(old_fs);
8418 if (put_compat_timespec(&t, interval))
8419 return -EFAULT;
8420 @@ -387,7 +387,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8421 mm_segment_t old_fs = get_fs();
8422
8423 set_fs(KERNEL_DS);
8424 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8425 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8426 set_fs(old_fs);
8427 if (!ret) {
8428 switch (_NSIG_WORDS) {
8429 @@ -412,7 +412,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8430 if (copy_siginfo_from_user32(&info, uinfo))
8431 return -EFAULT;
8432 set_fs(KERNEL_DS);
8433 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8434 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8435 set_fs(old_fs);
8436 return ret;
8437 }
8438 @@ -513,7 +513,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8439 return -EFAULT;
8440
8441 set_fs(KERNEL_DS);
8442 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
8443 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
8444 count);
8445 set_fs(old_fs);
8446
8447 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
8448 index e2077d3..17d07ad 100644
8449 --- a/arch/x86/include/asm/alternative-asm.h
8450 +++ b/arch/x86/include/asm/alternative-asm.h
8451 @@ -8,10 +8,10 @@
8452
8453 #ifdef CONFIG_SMP
8454 .macro LOCK_PREFIX
8455 -1: lock
8456 +672: lock
8457 .section .smp_locks,"a"
8458 .align 4
8459 - X86_ALIGN 1b
8460 + X86_ALIGN 672b
8461 .previous
8462 .endm
8463 #else
8464 @@ -19,4 +19,43 @@
8465 .endm
8466 #endif
8467
8468 +#ifdef KERNEXEC_PLUGIN
8469 + .macro pax_force_retaddr_bts rip=0
8470 + btsq $63,\rip(%rsp)
8471 + .endm
8472 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8473 + .macro pax_force_retaddr rip=0, reload=0
8474 + btsq $63,\rip(%rsp)
8475 + .endm
8476 + .macro pax_force_fptr ptr
8477 + btsq $63,\ptr
8478 + .endm
8479 + .macro pax_set_fptr_mask
8480 + .endm
8481 +#endif
8482 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
8483 + .macro pax_force_retaddr rip=0, reload=0
8484 + .if \reload
8485 + pax_set_fptr_mask
8486 + .endif
8487 + orq %r10,\rip(%rsp)
8488 + .endm
8489 + .macro pax_force_fptr ptr
8490 + orq %r10,\ptr
8491 + .endm
8492 + .macro pax_set_fptr_mask
8493 + movabs $0x8000000000000000,%r10
8494 + .endm
8495 +#endif
8496 +#else
8497 + .macro pax_force_retaddr rip=0, reload=0
8498 + .endm
8499 + .macro pax_force_fptr ptr
8500 + .endm
8501 + .macro pax_force_retaddr_bts rip=0
8502 + .endm
8503 + .macro pax_set_fptr_mask
8504 + .endm
8505 +#endif
8506 +
8507 #endif /* __ASSEMBLY__ */
8508 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
8509 index c240efc..fdfadf3 100644
8510 --- a/arch/x86/include/asm/alternative.h
8511 +++ b/arch/x86/include/asm/alternative.h
8512 @@ -85,7 +85,7 @@ static inline void alternatives_smp_switch(int smp) {}
8513 " .byte 662b-661b\n" /* sourcelen */ \
8514 " .byte 664f-663f\n" /* replacementlen */ \
8515 ".previous\n" \
8516 - ".section .altinstr_replacement, \"ax\"\n" \
8517 + ".section .altinstr_replacement, \"a\"\n" \
8518 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
8519 ".previous"
8520
8521 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
8522 index 474d80d..1f97d58 100644
8523 --- a/arch/x86/include/asm/apic.h
8524 +++ b/arch/x86/include/asm/apic.h
8525 @@ -46,7 +46,7 @@ static inline void generic_apic_probe(void)
8526
8527 #ifdef CONFIG_X86_LOCAL_APIC
8528
8529 -extern unsigned int apic_verbosity;
8530 +extern int apic_verbosity;
8531 extern int local_apic_timer_c2_ok;
8532
8533 extern int disable_apic;
8534 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
8535 index 20370c6..a2eb9b0 100644
8536 --- a/arch/x86/include/asm/apm.h
8537 +++ b/arch/x86/include/asm/apm.h
8538 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
8539 __asm__ __volatile__(APM_DO_ZERO_SEGS
8540 "pushl %%edi\n\t"
8541 "pushl %%ebp\n\t"
8542 - "lcall *%%cs:apm_bios_entry\n\t"
8543 + "lcall *%%ss:apm_bios_entry\n\t"
8544 "setc %%al\n\t"
8545 "popl %%ebp\n\t"
8546 "popl %%edi\n\t"
8547 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
8548 __asm__ __volatile__(APM_DO_ZERO_SEGS
8549 "pushl %%edi\n\t"
8550 "pushl %%ebp\n\t"
8551 - "lcall *%%cs:apm_bios_entry\n\t"
8552 + "lcall *%%ss:apm_bios_entry\n\t"
8553 "setc %%bl\n\t"
8554 "popl %%ebp\n\t"
8555 "popl %%edi\n\t"
8556 diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h
8557 index dc5a667..939040c 100644
8558 --- a/arch/x86/include/asm/atomic_32.h
8559 +++ b/arch/x86/include/asm/atomic_32.h
8560 @@ -25,6 +25,17 @@ static inline int atomic_read(const atomic_t *v)
8561 }
8562
8563 /**
8564 + * atomic_read_unchecked - read atomic variable
8565 + * @v: pointer of type atomic_unchecked_t
8566 + *
8567 + * Atomically reads the value of @v.
8568 + */
8569 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8570 +{
8571 + return v->counter;
8572 +}
8573 +
8574 +/**
8575 * atomic_set - set atomic variable
8576 * @v: pointer of type atomic_t
8577 * @i: required value
8578 @@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *v, int i)
8579 }
8580
8581 /**
8582 + * atomic_set_unchecked - set atomic variable
8583 + * @v: pointer of type atomic_unchecked_t
8584 + * @i: required value
8585 + *
8586 + * Atomically sets the value of @v to @i.
8587 + */
8588 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8589 +{
8590 + v->counter = i;
8591 +}
8592 +
8593 +/**
8594 * atomic_add - add integer to atomic variable
8595 * @i: integer value to add
8596 * @v: pointer of type atomic_t
8597 @@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *v, int i)
8598 */
8599 static inline void atomic_add(int i, atomic_t *v)
8600 {
8601 - asm volatile(LOCK_PREFIX "addl %1,%0"
8602 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8603 +
8604 +#ifdef CONFIG_PAX_REFCOUNT
8605 + "jno 0f\n"
8606 + LOCK_PREFIX "subl %1,%0\n"
8607 + "int $4\n0:\n"
8608 + _ASM_EXTABLE(0b, 0b)
8609 +#endif
8610 +
8611 + : "+m" (v->counter)
8612 + : "ir" (i));
8613 +}
8614 +
8615 +/**
8616 + * atomic_add_unchecked - add integer to atomic variable
8617 + * @i: integer value to add
8618 + * @v: pointer of type atomic_unchecked_t
8619 + *
8620 + * Atomically adds @i to @v.
8621 + */
8622 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8623 +{
8624 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8625 : "+m" (v->counter)
8626 : "ir" (i));
8627 }
8628 @@ -59,7 +104,29 @@ static inline void atomic_add(int i, atomic_t *v)
8629 */
8630 static inline void atomic_sub(int i, atomic_t *v)
8631 {
8632 - asm volatile(LOCK_PREFIX "subl %1,%0"
8633 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8634 +
8635 +#ifdef CONFIG_PAX_REFCOUNT
8636 + "jno 0f\n"
8637 + LOCK_PREFIX "addl %1,%0\n"
8638 + "int $4\n0:\n"
8639 + _ASM_EXTABLE(0b, 0b)
8640 +#endif
8641 +
8642 + : "+m" (v->counter)
8643 + : "ir" (i));
8644 +}
8645 +
8646 +/**
8647 + * atomic_sub_unchecked - subtract integer from atomic variable
8648 + * @i: integer value to subtract
8649 + * @v: pointer of type atomic_unchecked_t
8650 + *
8651 + * Atomically subtracts @i from @v.
8652 + */
8653 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8654 +{
8655 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8656 : "+m" (v->counter)
8657 : "ir" (i));
8658 }
8659 @@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8660 {
8661 unsigned char c;
8662
8663 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8664 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
8665 +
8666 +#ifdef CONFIG_PAX_REFCOUNT
8667 + "jno 0f\n"
8668 + LOCK_PREFIX "addl %2,%0\n"
8669 + "int $4\n0:\n"
8670 + _ASM_EXTABLE(0b, 0b)
8671 +#endif
8672 +
8673 + "sete %1\n"
8674 : "+m" (v->counter), "=qm" (c)
8675 : "ir" (i) : "memory");
8676 return c;
8677 @@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8678 */
8679 static inline void atomic_inc(atomic_t *v)
8680 {
8681 - asm volatile(LOCK_PREFIX "incl %0"
8682 + asm volatile(LOCK_PREFIX "incl %0\n"
8683 +
8684 +#ifdef CONFIG_PAX_REFCOUNT
8685 + "jno 0f\n"
8686 + LOCK_PREFIX "decl %0\n"
8687 + "int $4\n0:\n"
8688 + _ASM_EXTABLE(0b, 0b)
8689 +#endif
8690 +
8691 + : "+m" (v->counter));
8692 +}
8693 +
8694 +/**
8695 + * atomic_inc_unchecked - increment atomic variable
8696 + * @v: pointer of type atomic_unchecked_t
8697 + *
8698 + * Atomically increments @v by 1.
8699 + */
8700 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8701 +{
8702 + asm volatile(LOCK_PREFIX "incl %0\n"
8703 : "+m" (v->counter));
8704 }
8705
8706 @@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *v)
8707 */
8708 static inline void atomic_dec(atomic_t *v)
8709 {
8710 - asm volatile(LOCK_PREFIX "decl %0"
8711 + asm volatile(LOCK_PREFIX "decl %0\n"
8712 +
8713 +#ifdef CONFIG_PAX_REFCOUNT
8714 + "jno 0f\n"
8715 + LOCK_PREFIX "incl %0\n"
8716 + "int $4\n0:\n"
8717 + _ASM_EXTABLE(0b, 0b)
8718 +#endif
8719 +
8720 + : "+m" (v->counter));
8721 +}
8722 +
8723 +/**
8724 + * atomic_dec_unchecked - decrement atomic variable
8725 + * @v: pointer of type atomic_unchecked_t
8726 + *
8727 + * Atomically decrements @v by 1.
8728 + */
8729 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8730 +{
8731 + asm volatile(LOCK_PREFIX "decl %0\n"
8732 : "+m" (v->counter));
8733 }
8734
8735 @@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
8736 {
8737 unsigned char c;
8738
8739 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
8740 + asm volatile(LOCK_PREFIX "decl %0\n"
8741 +
8742 +#ifdef CONFIG_PAX_REFCOUNT
8743 + "jno 0f\n"
8744 + LOCK_PREFIX "incl %0\n"
8745 + "int $4\n0:\n"
8746 + _ASM_EXTABLE(0b, 0b)
8747 +#endif
8748 +
8749 + "sete %1\n"
8750 : "+m" (v->counter), "=qm" (c)
8751 : : "memory");
8752 return c != 0;
8753 @@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
8754 {
8755 unsigned char c;
8756
8757 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
8758 + asm volatile(LOCK_PREFIX "incl %0\n"
8759 +
8760 +#ifdef CONFIG_PAX_REFCOUNT
8761 + "jno 0f\n"
8762 + LOCK_PREFIX "decl %0\n"
8763 + "into\n0:\n"
8764 + _ASM_EXTABLE(0b, 0b)
8765 +#endif
8766 +
8767 + "sete %1\n"
8768 + : "+m" (v->counter), "=qm" (c)
8769 + : : "memory");
8770 + return c != 0;
8771 +}
8772 +
8773 +/**
8774 + * atomic_inc_and_test_unchecked - increment and test
8775 + * @v: pointer of type atomic_unchecked_t
8776 + *
8777 + * Atomically increments @v by 1
8778 + * and returns true if the result is zero, or false for all
8779 + * other cases.
8780 + */
8781 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8782 +{
8783 + unsigned char c;
8784 +
8785 + asm volatile(LOCK_PREFIX "incl %0\n"
8786 + "sete %1\n"
8787 : "+m" (v->counter), "=qm" (c)
8788 : : "memory");
8789 return c != 0;
8790 @@ -156,7 +309,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
8791 {
8792 unsigned char c;
8793
8794 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
8795 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
8796 +
8797 +#ifdef CONFIG_PAX_REFCOUNT
8798 + "jno 0f\n"
8799 + LOCK_PREFIX "subl %2,%0\n"
8800 + "int $4\n0:\n"
8801 + _ASM_EXTABLE(0b, 0b)
8802 +#endif
8803 +
8804 + "sets %1\n"
8805 : "+m" (v->counter), "=qm" (c)
8806 : "ir" (i) : "memory");
8807 return c;
8808 @@ -179,7 +341,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
8809 #endif
8810 /* Modern 486+ processor */
8811 __i = i;
8812 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
8813 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
8814 +
8815 +#ifdef CONFIG_PAX_REFCOUNT
8816 + "jno 0f\n"
8817 + "movl %0, %1\n"
8818 + "int $4\n0:\n"
8819 + _ASM_EXTABLE(0b, 0b)
8820 +#endif
8821 +
8822 : "+r" (i), "+m" (v->counter)
8823 : : "memory");
8824 return i + __i;
8825 @@ -195,6 +365,38 @@ no_xadd: /* Legacy 386 processor */
8826 }
8827
8828 /**
8829 + * atomic_add_return_unchecked - add integer and return
8830 + * @v: pointer of type atomic_unchecked_t
8831 + * @i: integer value to add
8832 + *
8833 + * Atomically adds @i to @v and returns @i + @v
8834 + */
8835 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8836 +{
8837 + int __i;
8838 +#ifdef CONFIG_M386
8839 + unsigned long flags;
8840 + if (unlikely(boot_cpu_data.x86 <= 3))
8841 + goto no_xadd;
8842 +#endif
8843 + /* Modern 486+ processor */
8844 + __i = i;
8845 + asm volatile(LOCK_PREFIX "xaddl %0, %1"
8846 + : "+r" (i), "+m" (v->counter)
8847 + : : "memory");
8848 + return i + __i;
8849 +
8850 +#ifdef CONFIG_M386
8851 +no_xadd: /* Legacy 386 processor */
8852 + local_irq_save(flags);
8853 + __i = atomic_read_unchecked(v);
8854 + atomic_set_unchecked(v, i + __i);
8855 + local_irq_restore(flags);
8856 + return i + __i;
8857 +#endif
8858 +}
8859 +
8860 +/**
8861 * atomic_sub_return - subtract integer and return
8862 * @v: pointer of type atomic_t
8863 * @i: integer value to subtract
8864 @@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8865 return cmpxchg(&v->counter, old, new);
8866 }
8867
8868 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8869 +{
8870 + return cmpxchg(&v->counter, old, new);
8871 +}
8872 +
8873 static inline int atomic_xchg(atomic_t *v, int new)
8874 {
8875 return xchg(&v->counter, new);
8876 }
8877
8878 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8879 +{
8880 + return xchg(&v->counter, new);
8881 +}
8882 +
8883 /**
8884 * atomic_add_unless - add unless the number is already a given value
8885 * @v: pointer of type atomic_t
8886 @@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *v, int new)
8887 */
8888 static inline int atomic_add_unless(atomic_t *v, int a, int u)
8889 {
8890 - int c, old;
8891 + int c, old, new;
8892 c = atomic_read(v);
8893 for (;;) {
8894 - if (unlikely(c == (u)))
8895 + if (unlikely(c == u))
8896 break;
8897 - old = atomic_cmpxchg((v), c, c + (a));
8898 +
8899 + asm volatile("addl %2,%0\n"
8900 +
8901 +#ifdef CONFIG_PAX_REFCOUNT
8902 + "jno 0f\n"
8903 + "subl %2,%0\n"
8904 + "int $4\n0:\n"
8905 + _ASM_EXTABLE(0b, 0b)
8906 +#endif
8907 +
8908 + : "=r" (new)
8909 + : "0" (c), "ir" (a));
8910 +
8911 + old = atomic_cmpxchg(v, c, new);
8912 if (likely(old == c))
8913 break;
8914 c = old;
8915 }
8916 - return c != (u);
8917 + return c != u;
8918 }
8919
8920 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8921
8922 #define atomic_inc_return(v) (atomic_add_return(1, v))
8923 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8924 +{
8925 + return atomic_add_return_unchecked(1, v);
8926 +}
8927 #define atomic_dec_return(v) (atomic_sub_return(1, v))
8928
8929 /* These are x86-specific, used by some header files */
8930 @@ -266,9 +495,18 @@ typedef struct {
8931 u64 __aligned(8) counter;
8932 } atomic64_t;
8933
8934 +#ifdef CONFIG_PAX_REFCOUNT
8935 +typedef struct {
8936 + u64 __aligned(8) counter;
8937 +} atomic64_unchecked_t;
8938 +#else
8939 +typedef atomic64_t atomic64_unchecked_t;
8940 +#endif
8941 +
8942 #define ATOMIC64_INIT(val) { (val) }
8943
8944 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8945 +extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
8946
8947 /**
8948 * atomic64_xchg - xchg atomic64 variable
8949 @@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8950 * the old value.
8951 */
8952 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8953 +extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8954
8955 /**
8956 * atomic64_set - set atomic64 variable
8957 @@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8958 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
8959
8960 /**
8961 + * atomic64_unchecked_set - set atomic64 variable
8962 + * @ptr: pointer to type atomic64_unchecked_t
8963 + * @new_val: value to assign
8964 + *
8965 + * Atomically sets the value of @ptr to @new_val.
8966 + */
8967 +extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8968 +
8969 +/**
8970 * atomic64_read - read atomic64 variable
8971 * @ptr: pointer to type atomic64_t
8972 *
8973 @@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64_t *ptr)
8974 return res;
8975 }
8976
8977 -extern u64 atomic64_read(atomic64_t *ptr);
8978 +/**
8979 + * atomic64_read_unchecked - read atomic64 variable
8980 + * @ptr: pointer to type atomic64_unchecked_t
8981 + *
8982 + * Atomically reads the value of @ptr and returns it.
8983 + */
8984 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
8985 +{
8986 + u64 res;
8987 +
8988 + /*
8989 + * Note, we inline this atomic64_unchecked_t primitive because
8990 + * it only clobbers EAX/EDX and leaves the others
8991 + * untouched. We also (somewhat subtly) rely on the
8992 + * fact that cmpxchg8b returns the current 64-bit value
8993 + * of the memory location we are touching:
8994 + */
8995 + asm volatile(
8996 + "mov %%ebx, %%eax\n\t"
8997 + "mov %%ecx, %%edx\n\t"
8998 + LOCK_PREFIX "cmpxchg8b %1\n"
8999 + : "=&A" (res)
9000 + : "m" (*ptr)
9001 + );
9002 +
9003 + return res;
9004 +}
9005
9006 /**
9007 * atomic64_add_return - add and return
9008 @@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
9009 * Other variants with different arithmetic operators:
9010 */
9011 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
9012 +extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9013 extern u64 atomic64_inc_return(atomic64_t *ptr);
9014 +extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
9015 extern u64 atomic64_dec_return(atomic64_t *ptr);
9016 +extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
9017
9018 /**
9019 * atomic64_add - add integer to atomic64 variable
9020 @@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_t *ptr);
9021 extern void atomic64_add(u64 delta, atomic64_t *ptr);
9022
9023 /**
9024 + * atomic64_add_unchecked - add integer to atomic64 variable
9025 + * @delta: integer value to add
9026 + * @ptr: pointer to type atomic64_unchecked_t
9027 + *
9028 + * Atomically adds @delta to @ptr.
9029 + */
9030 +extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9031 +
9032 +/**
9033 * atomic64_sub - subtract the atomic64 variable
9034 * @delta: integer value to subtract
9035 * @ptr: pointer to type atomic64_t
9036 @@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atomic64_t *ptr);
9037 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
9038
9039 /**
9040 + * atomic64_sub_unchecked - subtract the atomic64 variable
9041 + * @delta: integer value to subtract
9042 + * @ptr: pointer to type atomic64_unchecked_t
9043 + *
9044 + * Atomically subtracts @delta from @ptr.
9045 + */
9046 +extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9047 +
9048 +/**
9049 * atomic64_sub_and_test - subtract value from variable and test result
9050 * @delta: integer value to subtract
9051 * @ptr: pointer to type atomic64_t
9052 @@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
9053 extern void atomic64_inc(atomic64_t *ptr);
9054
9055 /**
9056 + * atomic64_inc_unchecked - increment atomic64 variable
9057 + * @ptr: pointer to type atomic64_unchecked_t
9058 + *
9059 + * Atomically increments @ptr by 1.
9060 + */
9061 +extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
9062 +
9063 +/**
9064 * atomic64_dec - decrement atomic64 variable
9065 * @ptr: pointer to type atomic64_t
9066 *
9067 @@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr);
9068 extern void atomic64_dec(atomic64_t *ptr);
9069
9070 /**
9071 + * atomic64_dec_unchecked - decrement atomic64 variable
9072 + * @ptr: pointer to type atomic64_unchecked_t
9073 + *
9074 + * Atomically decrements @ptr by 1.
9075 + */
9076 +extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
9077 +
9078 +/**
9079 * atomic64_dec_and_test - decrement and test
9080 * @ptr: pointer to type atomic64_t
9081 *
9082 diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h
9083 index d605dc2..fafd7bd 100644
9084 --- a/arch/x86/include/asm/atomic_64.h
9085 +++ b/arch/x86/include/asm/atomic_64.h
9086 @@ -24,6 +24,17 @@ static inline int atomic_read(const atomic_t *v)
9087 }
9088
9089 /**
9090 + * atomic_read_unchecked - read atomic variable
9091 + * @v: pointer of type atomic_unchecked_t
9092 + *
9093 + * Atomically reads the value of @v.
9094 + */
9095 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9096 +{
9097 + return v->counter;
9098 +}
9099 +
9100 +/**
9101 * atomic_set - set atomic variable
9102 * @v: pointer of type atomic_t
9103 * @i: required value
9104 @@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *v, int i)
9105 }
9106
9107 /**
9108 + * atomic_set_unchecked - set atomic variable
9109 + * @v: pointer of type atomic_unchecked_t
9110 + * @i: required value
9111 + *
9112 + * Atomically sets the value of @v to @i.
9113 + */
9114 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9115 +{
9116 + v->counter = i;
9117 +}
9118 +
9119 +/**
9120 * atomic_add - add integer to atomic variable
9121 * @i: integer value to add
9122 * @v: pointer of type atomic_t
9123 @@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *v, int i)
9124 */
9125 static inline void atomic_add(int i, atomic_t *v)
9126 {
9127 - asm volatile(LOCK_PREFIX "addl %1,%0"
9128 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9129 +
9130 +#ifdef CONFIG_PAX_REFCOUNT
9131 + "jno 0f\n"
9132 + LOCK_PREFIX "subl %1,%0\n"
9133 + "int $4\n0:\n"
9134 + _ASM_EXTABLE(0b, 0b)
9135 +#endif
9136 +
9137 + : "=m" (v->counter)
9138 + : "ir" (i), "m" (v->counter));
9139 +}
9140 +
9141 +/**
9142 + * atomic_add_unchecked - add integer to atomic variable
9143 + * @i: integer value to add
9144 + * @v: pointer of type atomic_unchecked_t
9145 + *
9146 + * Atomically adds @i to @v.
9147 + */
9148 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
9149 +{
9150 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9151 : "=m" (v->counter)
9152 : "ir" (i), "m" (v->counter));
9153 }
9154 @@ -58,7 +103,29 @@ static inline void atomic_add(int i, atomic_t *v)
9155 */
9156 static inline void atomic_sub(int i, atomic_t *v)
9157 {
9158 - asm volatile(LOCK_PREFIX "subl %1,%0"
9159 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9160 +
9161 +#ifdef CONFIG_PAX_REFCOUNT
9162 + "jno 0f\n"
9163 + LOCK_PREFIX "addl %1,%0\n"
9164 + "int $4\n0:\n"
9165 + _ASM_EXTABLE(0b, 0b)
9166 +#endif
9167 +
9168 + : "=m" (v->counter)
9169 + : "ir" (i), "m" (v->counter));
9170 +}
9171 +
9172 +/**
9173 + * atomic_sub_unchecked - subtract the atomic variable
9174 + * @i: integer value to subtract
9175 + * @v: pointer of type atomic_unchecked_t
9176 + *
9177 + * Atomically subtracts @i from @v.
9178 + */
9179 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
9180 +{
9181 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9182 : "=m" (v->counter)
9183 : "ir" (i), "m" (v->counter));
9184 }
9185 @@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9186 {
9187 unsigned char c;
9188
9189 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
9190 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
9191 +
9192 +#ifdef CONFIG_PAX_REFCOUNT
9193 + "jno 0f\n"
9194 + LOCK_PREFIX "addl %2,%0\n"
9195 + "int $4\n0:\n"
9196 + _ASM_EXTABLE(0b, 0b)
9197 +#endif
9198 +
9199 + "sete %1\n"
9200 : "=m" (v->counter), "=qm" (c)
9201 : "ir" (i), "m" (v->counter) : "memory");
9202 return c;
9203 @@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9204 */
9205 static inline void atomic_inc(atomic_t *v)
9206 {
9207 - asm volatile(LOCK_PREFIX "incl %0"
9208 + asm volatile(LOCK_PREFIX "incl %0\n"
9209 +
9210 +#ifdef CONFIG_PAX_REFCOUNT
9211 + "jno 0f\n"
9212 + LOCK_PREFIX "decl %0\n"
9213 + "int $4\n0:\n"
9214 + _ASM_EXTABLE(0b, 0b)
9215 +#endif
9216 +
9217 + : "=m" (v->counter)
9218 + : "m" (v->counter));
9219 +}
9220 +
9221 +/**
9222 + * atomic_inc_unchecked - increment atomic variable
9223 + * @v: pointer of type atomic_unchecked_t
9224 + *
9225 + * Atomically increments @v by 1.
9226 + */
9227 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9228 +{
9229 + asm volatile(LOCK_PREFIX "incl %0\n"
9230 : "=m" (v->counter)
9231 : "m" (v->counter));
9232 }
9233 @@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *v)
9234 */
9235 static inline void atomic_dec(atomic_t *v)
9236 {
9237 - asm volatile(LOCK_PREFIX "decl %0"
9238 + asm volatile(LOCK_PREFIX "decl %0\n"
9239 +
9240 +#ifdef CONFIG_PAX_REFCOUNT
9241 + "jno 0f\n"
9242 + LOCK_PREFIX "incl %0\n"
9243 + "int $4\n0:\n"
9244 + _ASM_EXTABLE(0b, 0b)
9245 +#endif
9246 +
9247 + : "=m" (v->counter)
9248 + : "m" (v->counter));
9249 +}
9250 +
9251 +/**
9252 + * atomic_dec_unchecked - decrement atomic variable
9253 + * @v: pointer of type atomic_unchecked_t
9254 + *
9255 + * Atomically decrements @v by 1.
9256 + */
9257 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9258 +{
9259 + asm volatile(LOCK_PREFIX "decl %0\n"
9260 : "=m" (v->counter)
9261 : "m" (v->counter));
9262 }
9263 @@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9264 {
9265 unsigned char c;
9266
9267 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
9268 + asm volatile(LOCK_PREFIX "decl %0\n"
9269 +
9270 +#ifdef CONFIG_PAX_REFCOUNT
9271 + "jno 0f\n"
9272 + LOCK_PREFIX "incl %0\n"
9273 + "int $4\n0:\n"
9274 + _ASM_EXTABLE(0b, 0b)
9275 +#endif
9276 +
9277 + "sete %1\n"
9278 : "=m" (v->counter), "=qm" (c)
9279 : "m" (v->counter) : "memory");
9280 return c != 0;
9281 @@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9282 {
9283 unsigned char c;
9284
9285 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
9286 + asm volatile(LOCK_PREFIX "incl %0\n"
9287 +
9288 +#ifdef CONFIG_PAX_REFCOUNT
9289 + "jno 0f\n"
9290 + LOCK_PREFIX "decl %0\n"
9291 + "int $4\n0:\n"
9292 + _ASM_EXTABLE(0b, 0b)
9293 +#endif
9294 +
9295 + "sete %1\n"
9296 + : "=m" (v->counter), "=qm" (c)
9297 + : "m" (v->counter) : "memory");
9298 + return c != 0;
9299 +}
9300 +
9301 +/**
9302 + * atomic_inc_and_test_unchecked - increment and test
9303 + * @v: pointer of type atomic_unchecked_t
9304 + *
9305 + * Atomically increments @v by 1
9306 + * and returns true if the result is zero, or false for all
9307 + * other cases.
9308 + */
9309 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9310 +{
9311 + unsigned char c;
9312 +
9313 + asm volatile(LOCK_PREFIX "incl %0\n"
9314 + "sete %1\n"
9315 : "=m" (v->counter), "=qm" (c)
9316 : "m" (v->counter) : "memory");
9317 return c != 0;
9318 @@ -157,7 +312,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9319 {
9320 unsigned char c;
9321
9322 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9323 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
9324 +
9325 +#ifdef CONFIG_PAX_REFCOUNT
9326 + "jno 0f\n"
9327 + LOCK_PREFIX "subl %2,%0\n"
9328 + "int $4\n0:\n"
9329 + _ASM_EXTABLE(0b, 0b)
9330 +#endif
9331 +
9332 + "sets %1\n"
9333 : "=m" (v->counter), "=qm" (c)
9334 : "ir" (i), "m" (v->counter) : "memory");
9335 return c;
9336 @@ -173,7 +337,31 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9337 static inline int atomic_add_return(int i, atomic_t *v)
9338 {
9339 int __i = i;
9340 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
9341 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9342 +
9343 +#ifdef CONFIG_PAX_REFCOUNT
9344 + "jno 0f\n"
9345 + "movl %0, %1\n"
9346 + "int $4\n0:\n"
9347 + _ASM_EXTABLE(0b, 0b)
9348 +#endif
9349 +
9350 + : "+r" (i), "+m" (v->counter)
9351 + : : "memory");
9352 + return i + __i;
9353 +}
9354 +
9355 +/**
9356 + * atomic_add_return_unchecked - add and return
9357 + * @i: integer value to add
9358 + * @v: pointer of type atomic_unchecked_t
9359 + *
9360 + * Atomically adds @i to @v and returns @i + @v
9361 + */
9362 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9363 +{
9364 + int __i = i;
9365 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9366 : "+r" (i), "+m" (v->counter)
9367 : : "memory");
9368 return i + __i;
9369 @@ -185,6 +373,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9370 }
9371
9372 #define atomic_inc_return(v) (atomic_add_return(1, v))
9373 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9374 +{
9375 + return atomic_add_return_unchecked(1, v);
9376 +}
9377 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9378
9379 /* The 64-bit atomic type */
9380 @@ -204,6 +396,18 @@ static inline long atomic64_read(const atomic64_t *v)
9381 }
9382
9383 /**
9384 + * atomic64_read_unchecked - read atomic64 variable
9385 + * @v: pointer of type atomic64_unchecked_t
9386 + *
9387 + * Atomically reads the value of @v.
9388 + * Doesn't imply a read memory barrier.
9389 + */
9390 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9391 +{
9392 + return v->counter;
9393 +}
9394 +
9395 +/**
9396 * atomic64_set - set atomic64 variable
9397 * @v: pointer to type atomic64_t
9398 * @i: required value
9399 @@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9400 }
9401
9402 /**
9403 + * atomic64_set_unchecked - set atomic64 variable
9404 + * @v: pointer to type atomic64_unchecked_t
9405 + * @i: required value
9406 + *
9407 + * Atomically sets the value of @v to @i.
9408 + */
9409 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9410 +{
9411 + v->counter = i;
9412 +}
9413 +
9414 +/**
9415 * atomic64_add - add integer to atomic64 variable
9416 * @i: integer value to add
9417 * @v: pointer to type atomic64_t
9418 @@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9419 */
9420 static inline void atomic64_add(long i, atomic64_t *v)
9421 {
9422 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
9423 +
9424 +#ifdef CONFIG_PAX_REFCOUNT
9425 + "jno 0f\n"
9426 + LOCK_PREFIX "subq %1,%0\n"
9427 + "int $4\n0:\n"
9428 + _ASM_EXTABLE(0b, 0b)
9429 +#endif
9430 +
9431 + : "=m" (v->counter)
9432 + : "er" (i), "m" (v->counter));
9433 +}
9434 +
9435 +/**
9436 + * atomic64_add_unchecked - add integer to atomic64 variable
9437 + * @i: integer value to add
9438 + * @v: pointer to type atomic64_unchecked_t
9439 + *
9440 + * Atomically adds @i to @v.
9441 + */
9442 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9443 +{
9444 asm volatile(LOCK_PREFIX "addq %1,%0"
9445 : "=m" (v->counter)
9446 : "er" (i), "m" (v->counter));
9447 @@ -238,7 +476,15 @@ static inline void atomic64_add(long i, atomic64_t *v)
9448 */
9449 static inline void atomic64_sub(long i, atomic64_t *v)
9450 {
9451 - asm volatile(LOCK_PREFIX "subq %1,%0"
9452 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9453 +
9454 +#ifdef CONFIG_PAX_REFCOUNT
9455 + "jno 0f\n"
9456 + LOCK_PREFIX "addq %1,%0\n"
9457 + "int $4\n0:\n"
9458 + _ASM_EXTABLE(0b, 0b)
9459 +#endif
9460 +
9461 : "=m" (v->counter)
9462 : "er" (i), "m" (v->counter));
9463 }
9464 @@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9465 {
9466 unsigned char c;
9467
9468 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9469 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
9470 +
9471 +#ifdef CONFIG_PAX_REFCOUNT
9472 + "jno 0f\n"
9473 + LOCK_PREFIX "addq %2,%0\n"
9474 + "int $4\n0:\n"
9475 + _ASM_EXTABLE(0b, 0b)
9476 +#endif
9477 +
9478 + "sete %1\n"
9479 : "=m" (v->counter), "=qm" (c)
9480 : "er" (i), "m" (v->counter) : "memory");
9481 return c;
9482 @@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9483 */
9484 static inline void atomic64_inc(atomic64_t *v)
9485 {
9486 + asm volatile(LOCK_PREFIX "incq %0\n"
9487 +
9488 +#ifdef CONFIG_PAX_REFCOUNT
9489 + "jno 0f\n"
9490 + LOCK_PREFIX "decq %0\n"
9491 + "int $4\n0:\n"
9492 + _ASM_EXTABLE(0b, 0b)
9493 +#endif
9494 +
9495 + : "=m" (v->counter)
9496 + : "m" (v->counter));
9497 +}
9498 +
9499 +/**
9500 + * atomic64_inc_unchecked - increment atomic64 variable
9501 + * @v: pointer to type atomic64_unchecked_t
9502 + *
9503 + * Atomically increments @v by 1.
9504 + */
9505 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9506 +{
9507 asm volatile(LOCK_PREFIX "incq %0"
9508 : "=m" (v->counter)
9509 : "m" (v->counter));
9510 @@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64_t *v)
9511 */
9512 static inline void atomic64_dec(atomic64_t *v)
9513 {
9514 - asm volatile(LOCK_PREFIX "decq %0"
9515 + asm volatile(LOCK_PREFIX "decq %0\n"
9516 +
9517 +#ifdef CONFIG_PAX_REFCOUNT
9518 + "jno 0f\n"
9519 + LOCK_PREFIX "incq %0\n"
9520 + "int $4\n0:\n"
9521 + _ASM_EXTABLE(0b, 0b)
9522 +#endif
9523 +
9524 + : "=m" (v->counter)
9525 + : "m" (v->counter));
9526 +}
9527 +
9528 +/**
9529 + * atomic64_dec_unchecked - decrement atomic64 variable
9530 + * @v: pointer to type atomic64_t
9531 + *
9532 + * Atomically decrements @v by 1.
9533 + */
9534 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9535 +{
9536 + asm volatile(LOCK_PREFIX "decq %0\n"
9537 : "=m" (v->counter)
9538 : "m" (v->counter));
9539 }
9540 @@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9541 {
9542 unsigned char c;
9543
9544 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
9545 + asm volatile(LOCK_PREFIX "decq %0\n"
9546 +
9547 +#ifdef CONFIG_PAX_REFCOUNT
9548 + "jno 0f\n"
9549 + LOCK_PREFIX "incq %0\n"
9550 + "int $4\n0:\n"
9551 + _ASM_EXTABLE(0b, 0b)
9552 +#endif
9553 +
9554 + "sete %1\n"
9555 : "=m" (v->counter), "=qm" (c)
9556 : "m" (v->counter) : "memory");
9557 return c != 0;
9558 @@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9559 {
9560 unsigned char c;
9561
9562 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
9563 + asm volatile(LOCK_PREFIX "incq %0\n"
9564 +
9565 +#ifdef CONFIG_PAX_REFCOUNT
9566 + "jno 0f\n"
9567 + LOCK_PREFIX "decq %0\n"
9568 + "int $4\n0:\n"
9569 + _ASM_EXTABLE(0b, 0b)
9570 +#endif
9571 +
9572 + "sete %1\n"
9573 : "=m" (v->counter), "=qm" (c)
9574 : "m" (v->counter) : "memory");
9575 return c != 0;
9576 @@ -337,7 +652,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9577 {
9578 unsigned char c;
9579
9580 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9581 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
9582 +
9583 +#ifdef CONFIG_PAX_REFCOUNT
9584 + "jno 0f\n"
9585 + LOCK_PREFIX "subq %2,%0\n"
9586 + "int $4\n0:\n"
9587 + _ASM_EXTABLE(0b, 0b)
9588 +#endif
9589 +
9590 + "sets %1\n"
9591 : "=m" (v->counter), "=qm" (c)
9592 : "er" (i), "m" (v->counter) : "memory");
9593 return c;
9594 @@ -353,7 +677,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9595 static inline long atomic64_add_return(long i, atomic64_t *v)
9596 {
9597 long __i = i;
9598 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
9599 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
9600 +
9601 +#ifdef CONFIG_PAX_REFCOUNT
9602 + "jno 0f\n"
9603 + "movq %0, %1\n"
9604 + "int $4\n0:\n"
9605 + _ASM_EXTABLE(0b, 0b)
9606 +#endif
9607 +
9608 + : "+r" (i), "+m" (v->counter)
9609 + : : "memory");
9610 + return i + __i;
9611 +}
9612 +
9613 +/**
9614 + * atomic64_add_return_unchecked - add and return
9615 + * @i: integer value to add
9616 + * @v: pointer to type atomic64_unchecked_t
9617 + *
9618 + * Atomically adds @i to @v and returns @i + @v
9619 + */
9620 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9621 +{
9622 + long __i = i;
9623 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
9624 : "+r" (i), "+m" (v->counter)
9625 : : "memory");
9626 return i + __i;
9627 @@ -365,6 +713,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9628 }
9629
9630 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9631 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9632 +{
9633 + return atomic64_add_return_unchecked(1, v);
9634 +}
9635 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
9636
9637 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9638 @@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9639 return cmpxchg(&v->counter, old, new);
9640 }
9641
9642 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
9643 +{
9644 + return cmpxchg(&v->counter, old, new);
9645 +}
9646 +
9647 static inline long atomic64_xchg(atomic64_t *v, long new)
9648 {
9649 return xchg(&v->counter, new);
9650 }
9651
9652 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
9653 +{
9654 + return xchg(&v->counter, new);
9655 +}
9656 +
9657 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
9658 {
9659 return cmpxchg(&v->counter, old, new);
9660 }
9661
9662 +static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9663 +{
9664 + return cmpxchg(&v->counter, old, new);
9665 +}
9666 +
9667 static inline long atomic_xchg(atomic_t *v, int new)
9668 {
9669 return xchg(&v->counter, new);
9670 }
9671
9672 +static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9673 +{
9674 + return xchg(&v->counter, new);
9675 +}
9676 +
9677 /**
9678 * atomic_add_unless - add unless the number is a given value
9679 * @v: pointer of type atomic_t
9680 @@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t *v, int new)
9681 */
9682 static inline int atomic_add_unless(atomic_t *v, int a, int u)
9683 {
9684 - int c, old;
9685 + int c, old, new;
9686 c = atomic_read(v);
9687 for (;;) {
9688 - if (unlikely(c == (u)))
9689 + if (unlikely(c == u))
9690 break;
9691 - old = atomic_cmpxchg((v), c, c + (a));
9692 +
9693 + asm volatile("addl %2,%0\n"
9694 +
9695 +#ifdef CONFIG_PAX_REFCOUNT
9696 + "jno 0f\n"
9697 + "subl %2,%0\n"
9698 + "int $4\n0:\n"
9699 + _ASM_EXTABLE(0b, 0b)
9700 +#endif
9701 +
9702 + : "=r" (new)
9703 + : "0" (c), "ir" (a));
9704 +
9705 + old = atomic_cmpxchg(v, c, new);
9706 if (likely(old == c))
9707 break;
9708 c = old;
9709 }
9710 - return c != (u);
9711 + return c != u;
9712 }
9713
9714 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
9715 @@ -424,17 +809,30 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
9716 */
9717 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
9718 {
9719 - long c, old;
9720 + long c, old, new;
9721 c = atomic64_read(v);
9722 for (;;) {
9723 - if (unlikely(c == (u)))
9724 + if (unlikely(c == u))
9725 break;
9726 - old = atomic64_cmpxchg((v), c, c + (a));
9727 +
9728 + asm volatile("addq %2,%0\n"
9729 +
9730 +#ifdef CONFIG_PAX_REFCOUNT
9731 + "jno 0f\n"
9732 + "subq %2,%0\n"
9733 + "int $4\n0:\n"
9734 + _ASM_EXTABLE(0b, 0b)
9735 +#endif
9736 +
9737 + : "=r" (new)
9738 + : "0" (c), "er" (a));
9739 +
9740 + old = atomic64_cmpxchg(v, c, new);
9741 if (likely(old == c))
9742 break;
9743 c = old;
9744 }
9745 - return c != (u);
9746 + return c != u;
9747 }
9748
9749 /**
9750 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
9751 index 02b47a6..d5c4b15 100644
9752 --- a/arch/x86/include/asm/bitops.h
9753 +++ b/arch/x86/include/asm/bitops.h
9754 @@ -38,7 +38,7 @@
9755 * a mask operation on a byte.
9756 */
9757 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
9758 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
9759 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
9760 #define CONST_MASK(nr) (1 << ((nr) & 7))
9761
9762 /**
9763 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
9764 index 7a10659..8bbf355 100644
9765 --- a/arch/x86/include/asm/boot.h
9766 +++ b/arch/x86/include/asm/boot.h
9767 @@ -11,10 +11,15 @@
9768 #include <asm/pgtable_types.h>
9769
9770 /* Physical address where kernel should be loaded. */
9771 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9772 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9773 + (CONFIG_PHYSICAL_ALIGN - 1)) \
9774 & ~(CONFIG_PHYSICAL_ALIGN - 1))
9775
9776 +#ifndef __ASSEMBLY__
9777 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
9778 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
9779 +#endif
9780 +
9781 /* Minimum kernel alignment, as a power of two */
9782 #ifdef CONFIG_X86_64
9783 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
9784 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
9785 index 549860d..7d45f68 100644
9786 --- a/arch/x86/include/asm/cache.h
9787 +++ b/arch/x86/include/asm/cache.h
9788 @@ -5,9 +5,10 @@
9789
9790 /* L1 cache line size */
9791 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
9792 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9793 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9794
9795 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
9796 +#define __read_only __attribute__((__section__(".data.read_only")))
9797
9798 #ifdef CONFIG_X86_VSMP
9799 /* vSMP Internode cacheline shift */
9800 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
9801 index b54f6af..5b376a6 100644
9802 --- a/arch/x86/include/asm/cacheflush.h
9803 +++ b/arch/x86/include/asm/cacheflush.h
9804 @@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
9805 static inline unsigned long get_page_memtype(struct page *pg)
9806 {
9807 if (!PageUncached(pg) && !PageWC(pg))
9808 - return -1;
9809 + return ~0UL;
9810 else if (!PageUncached(pg) && PageWC(pg))
9811 return _PAGE_CACHE_WC;
9812 else if (PageUncached(pg) && !PageWC(pg))
9813 @@ -85,7 +85,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype)
9814 SetPageWC(pg);
9815 break;
9816 default:
9817 - case -1:
9818 + case ~0UL:
9819 ClearPageUncached(pg);
9820 ClearPageWC(pg);
9821 break;
9822 diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
9823 index 0e63c9a..ab8d972 100644
9824 --- a/arch/x86/include/asm/calling.h
9825 +++ b/arch/x86/include/asm/calling.h
9826 @@ -52,32 +52,32 @@ For 32-bit we have the following conventions - kernel is built with
9827 * for assembly code:
9828 */
9829
9830 -#define R15 0
9831 -#define R14 8
9832 -#define R13 16
9833 -#define R12 24
9834 -#define RBP 32
9835 -#define RBX 40
9836 +#define R15 (0)
9837 +#define R14 (8)
9838 +#define R13 (16)
9839 +#define R12 (24)
9840 +#define RBP (32)
9841 +#define RBX (40)
9842
9843 /* arguments: interrupts/non tracing syscalls only save up to here: */
9844 -#define R11 48
9845 -#define R10 56
9846 -#define R9 64
9847 -#define R8 72
9848 -#define RAX 80
9849 -#define RCX 88
9850 -#define RDX 96
9851 -#define RSI 104
9852 -#define RDI 112
9853 -#define ORIG_RAX 120 /* + error_code */
9854 +#define R11 (48)
9855 +#define R10 (56)
9856 +#define R9 (64)
9857 +#define R8 (72)
9858 +#define RAX (80)
9859 +#define RCX (88)
9860 +#define RDX (96)
9861 +#define RSI (104)
9862 +#define RDI (112)
9863 +#define ORIG_RAX (120) /* + error_code */
9864 /* end of arguments */
9865
9866 /* cpu exception frame or undefined in case of fast syscall: */
9867 -#define RIP 128
9868 -#define CS 136
9869 -#define EFLAGS 144
9870 -#define RSP 152
9871 -#define SS 160
9872 +#define RIP (128)
9873 +#define CS (136)
9874 +#define EFLAGS (144)
9875 +#define RSP (152)
9876 +#define SS (160)
9877
9878 #define ARGOFFSET R11
9879 #define SWFRAME ORIG_RAX
9880 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
9881 index 46fc474..b02b0f9 100644
9882 --- a/arch/x86/include/asm/checksum_32.h
9883 +++ b/arch/x86/include/asm/checksum_32.h
9884 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
9885 int len, __wsum sum,
9886 int *src_err_ptr, int *dst_err_ptr);
9887
9888 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
9889 + int len, __wsum sum,
9890 + int *src_err_ptr, int *dst_err_ptr);
9891 +
9892 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
9893 + int len, __wsum sum,
9894 + int *src_err_ptr, int *dst_err_ptr);
9895 +
9896 /*
9897 * Note: when you get a NULL pointer exception here this means someone
9898 * passed in an incorrect kernel address to one of these functions.
9899 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
9900 int *err_ptr)
9901 {
9902 might_sleep();
9903 - return csum_partial_copy_generic((__force void *)src, dst,
9904 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
9905 len, sum, err_ptr, NULL);
9906 }
9907
9908 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
9909 {
9910 might_sleep();
9911 if (access_ok(VERIFY_WRITE, dst, len))
9912 - return csum_partial_copy_generic(src, (__force void *)dst,
9913 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
9914 len, sum, NULL, err_ptr);
9915
9916 if (len)
9917 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
9918 index 617bd56..7b047a1 100644
9919 --- a/arch/x86/include/asm/desc.h
9920 +++ b/arch/x86/include/asm/desc.h
9921 @@ -4,6 +4,7 @@
9922 #include <asm/desc_defs.h>
9923 #include <asm/ldt.h>
9924 #include <asm/mmu.h>
9925 +#include <asm/pgtable.h>
9926 #include <linux/smp.h>
9927
9928 static inline void fill_ldt(struct desc_struct *desc,
9929 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_struct *desc,
9930 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
9931 desc->type = (info->read_exec_only ^ 1) << 1;
9932 desc->type |= info->contents << 2;
9933 + desc->type |= info->seg_not_present ^ 1;
9934 desc->s = 1;
9935 desc->dpl = 0x3;
9936 desc->p = info->seg_not_present ^ 1;
9937 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_struct *desc,
9938 }
9939
9940 extern struct desc_ptr idt_descr;
9941 -extern gate_desc idt_table[];
9942 -
9943 -struct gdt_page {
9944 - struct desc_struct gdt[GDT_ENTRIES];
9945 -} __attribute__((aligned(PAGE_SIZE)));
9946 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
9947 +extern gate_desc idt_table[256];
9948
9949 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
9950 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
9951 {
9952 - return per_cpu(gdt_page, cpu).gdt;
9953 + return cpu_gdt_table[cpu];
9954 }
9955
9956 #ifdef CONFIG_X86_64
9957 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
9958 unsigned long base, unsigned dpl, unsigned flags,
9959 unsigned short seg)
9960 {
9961 - gate->a = (seg << 16) | (base & 0xffff);
9962 - gate->b = (base & 0xffff0000) |
9963 - (((0x80 | type | (dpl << 5)) & 0xff) << 8);
9964 + gate->gate.offset_low = base;
9965 + gate->gate.seg = seg;
9966 + gate->gate.reserved = 0;
9967 + gate->gate.type = type;
9968 + gate->gate.s = 0;
9969 + gate->gate.dpl = dpl;
9970 + gate->gate.p = 1;
9971 + gate->gate.offset_high = base >> 16;
9972 }
9973
9974 #endif
9975 @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
9976 static inline void native_write_idt_entry(gate_desc *idt, int entry,
9977 const gate_desc *gate)
9978 {
9979 + pax_open_kernel();
9980 memcpy(&idt[entry], gate, sizeof(*gate));
9981 + pax_close_kernel();
9982 }
9983
9984 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
9985 const void *desc)
9986 {
9987 + pax_open_kernel();
9988 memcpy(&ldt[entry], desc, 8);
9989 + pax_close_kernel();
9990 }
9991
9992 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
9993 @@ -139,7 +146,10 @@ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
9994 size = sizeof(struct desc_struct);
9995 break;
9996 }
9997 +
9998 + pax_open_kernel();
9999 memcpy(&gdt[entry], desc, size);
10000 + pax_close_kernel();
10001 }
10002
10003 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
10004 @@ -211,7 +221,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
10005
10006 static inline void native_load_tr_desc(void)
10007 {
10008 + pax_open_kernel();
10009 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
10010 + pax_close_kernel();
10011 }
10012
10013 static inline void native_load_gdt(const struct desc_ptr *dtr)
10014 @@ -246,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
10015 unsigned int i;
10016 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
10017
10018 + pax_open_kernel();
10019 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
10020 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
10021 + pax_close_kernel();
10022 }
10023
10024 #define _LDT_empty(info) \
10025 @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
10026 desc->limit = (limit >> 16) & 0xf;
10027 }
10028
10029 -static inline void _set_gate(int gate, unsigned type, void *addr,
10030 +static inline void _set_gate(int gate, unsigned type, const void *addr,
10031 unsigned dpl, unsigned ist, unsigned seg)
10032 {
10033 gate_desc s;
10034 @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
10035 * Pentium F0 0F bugfix can have resulted in the mapped
10036 * IDT being write-protected.
10037 */
10038 -static inline void set_intr_gate(unsigned int n, void *addr)
10039 +static inline void set_intr_gate(unsigned int n, const void *addr)
10040 {
10041 BUG_ON((unsigned)n > 0xFF);
10042 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
10043 @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
10044 /*
10045 * This routine sets up an interrupt gate at directory privilege level 3.
10046 */
10047 -static inline void set_system_intr_gate(unsigned int n, void *addr)
10048 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
10049 {
10050 BUG_ON((unsigned)n > 0xFF);
10051 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
10052 }
10053
10054 -static inline void set_system_trap_gate(unsigned int n, void *addr)
10055 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
10056 {
10057 BUG_ON((unsigned)n > 0xFF);
10058 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
10059 }
10060
10061 -static inline void set_trap_gate(unsigned int n, void *addr)
10062 +static inline void set_trap_gate(unsigned int n, const void *addr)
10063 {
10064 BUG_ON((unsigned)n > 0xFF);
10065 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
10066 @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
10067 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
10068 {
10069 BUG_ON((unsigned)n > 0xFF);
10070 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10071 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
10072 }
10073
10074 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
10075 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
10076 {
10077 BUG_ON((unsigned)n > 0xFF);
10078 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
10079 }
10080
10081 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10082 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
10083 {
10084 BUG_ON((unsigned)n > 0xFF);
10085 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
10086 }
10087
10088 +#ifdef CONFIG_X86_32
10089 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
10090 +{
10091 + struct desc_struct d;
10092 +
10093 + if (likely(limit))
10094 + limit = (limit - 1UL) >> PAGE_SHIFT;
10095 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
10096 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
10097 +}
10098 +#endif
10099 +
10100 #endif /* _ASM_X86_DESC_H */
10101 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
10102 index 9d66848..6b4a691 100644
10103 --- a/arch/x86/include/asm/desc_defs.h
10104 +++ b/arch/x86/include/asm/desc_defs.h
10105 @@ -31,6 +31,12 @@ struct desc_struct {
10106 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
10107 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
10108 };
10109 + struct {
10110 + u16 offset_low;
10111 + u16 seg;
10112 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
10113 + unsigned offset_high: 16;
10114 + } gate;
10115 };
10116 } __attribute__((packed));
10117
10118 diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
10119 index cee34e9..a7c3fa2 100644
10120 --- a/arch/x86/include/asm/device.h
10121 +++ b/arch/x86/include/asm/device.h
10122 @@ -6,7 +6,7 @@ struct dev_archdata {
10123 void *acpi_handle;
10124 #endif
10125 #ifdef CONFIG_X86_64
10126 -struct dma_map_ops *dma_ops;
10127 + const struct dma_map_ops *dma_ops;
10128 #endif
10129 #ifdef CONFIG_DMAR
10130 void *iommu; /* hook for IOMMU specific extension */
10131 diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
10132 index 6a25d5d..786b202 100644
10133 --- a/arch/x86/include/asm/dma-mapping.h
10134 +++ b/arch/x86/include/asm/dma-mapping.h
10135 @@ -25,9 +25,9 @@ extern int iommu_merge;
10136 extern struct device x86_dma_fallback_dev;
10137 extern int panic_on_overflow;
10138
10139 -extern struct dma_map_ops *dma_ops;
10140 +extern const struct dma_map_ops *dma_ops;
10141
10142 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
10143 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
10144 {
10145 #ifdef CONFIG_X86_32
10146 return dma_ops;
10147 @@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
10148 /* Make sure we keep the same behaviour */
10149 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
10150 {
10151 - struct dma_map_ops *ops = get_dma_ops(dev);
10152 + const struct dma_map_ops *ops = get_dma_ops(dev);
10153 if (ops->mapping_error)
10154 return ops->mapping_error(dev, dma_addr);
10155
10156 @@ -122,7 +122,7 @@ static inline void *
10157 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
10158 gfp_t gfp)
10159 {
10160 - struct dma_map_ops *ops = get_dma_ops(dev);
10161 + const struct dma_map_ops *ops = get_dma_ops(dev);
10162 void *memory;
10163
10164 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
10165 @@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
10166 static inline void dma_free_coherent(struct device *dev, size_t size,
10167 void *vaddr, dma_addr_t bus)
10168 {
10169 - struct dma_map_ops *ops = get_dma_ops(dev);
10170 + const struct dma_map_ops *ops = get_dma_ops(dev);
10171
10172 WARN_ON(irqs_disabled()); /* for portability */
10173
10174 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
10175 index 40b4e61..40d8133 100644
10176 --- a/arch/x86/include/asm/e820.h
10177 +++ b/arch/x86/include/asm/e820.h
10178 @@ -133,7 +133,7 @@ extern char *default_machine_specific_memory_setup(void);
10179 #define ISA_END_ADDRESS 0x100000
10180 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
10181
10182 -#define BIOS_BEGIN 0x000a0000
10183 +#define BIOS_BEGIN 0x000c0000
10184 #define BIOS_END 0x00100000
10185
10186 #ifdef __KERNEL__
10187 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
10188 index 8ac9d9a..0a6c96e 100644
10189 --- a/arch/x86/include/asm/elf.h
10190 +++ b/arch/x86/include/asm/elf.h
10191 @@ -257,7 +257,25 @@ extern int force_personality32;
10192 the loader. We need to make sure that it is out of the way of the program
10193 that it will "exec", and that there is sufficient room for the brk. */
10194
10195 +#ifdef CONFIG_PAX_SEGMEXEC
10196 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
10197 +#else
10198 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
10199 +#endif
10200 +
10201 +#ifdef CONFIG_PAX_ASLR
10202 +#ifdef CONFIG_X86_32
10203 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
10204 +
10205 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10206 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10207 +#else
10208 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
10209 +
10210 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10211 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10212 +#endif
10213 +#endif
10214
10215 /* This yields a mask that user programs can use to figure out what
10216 instruction set this CPU supports. This could be done in user space,
10217 @@ -310,9 +328,7 @@ do { \
10218
10219 #define ARCH_DLINFO \
10220 do { \
10221 - if (vdso_enabled) \
10222 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10223 - (unsigned long)current->mm->context.vdso); \
10224 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10225 } while (0)
10226
10227 #define AT_SYSINFO 32
10228 @@ -323,7 +339,7 @@ do { \
10229
10230 #endif /* !CONFIG_X86_32 */
10231
10232 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
10233 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10234
10235 #define VDSO_ENTRY \
10236 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
10237 @@ -337,7 +353,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
10238 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10239 #define compat_arch_setup_additional_pages syscall32_setup_pages
10240
10241 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10242 -#define arch_randomize_brk arch_randomize_brk
10243 -
10244 #endif /* _ASM_X86_ELF_H */
10245 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10246 index cc70c1c..d96d011 100644
10247 --- a/arch/x86/include/asm/emergency-restart.h
10248 +++ b/arch/x86/include/asm/emergency-restart.h
10249 @@ -15,6 +15,6 @@ enum reboot_type {
10250
10251 extern enum reboot_type reboot_type;
10252
10253 -extern void machine_emergency_restart(void);
10254 +extern void machine_emergency_restart(void) __noreturn;
10255
10256 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10257 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10258 index 1f11ce4..7caabd1 100644
10259 --- a/arch/x86/include/asm/futex.h
10260 +++ b/arch/x86/include/asm/futex.h
10261 @@ -12,16 +12,18 @@
10262 #include <asm/system.h>
10263
10264 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10265 + typecheck(u32 __user *, uaddr); \
10266 asm volatile("1:\t" insn "\n" \
10267 "2:\t.section .fixup,\"ax\"\n" \
10268 "3:\tmov\t%3, %1\n" \
10269 "\tjmp\t2b\n" \
10270 "\t.previous\n" \
10271 _ASM_EXTABLE(1b, 3b) \
10272 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10273 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10274 : "i" (-EFAULT), "0" (oparg), "1" (0))
10275
10276 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10277 + typecheck(u32 __user *, uaddr); \
10278 asm volatile("1:\tmovl %2, %0\n" \
10279 "\tmovl\t%0, %3\n" \
10280 "\t" insn "\n" \
10281 @@ -34,10 +36,10 @@
10282 _ASM_EXTABLE(1b, 4b) \
10283 _ASM_EXTABLE(2b, 4b) \
10284 : "=&a" (oldval), "=&r" (ret), \
10285 - "+m" (*uaddr), "=&r" (tem) \
10286 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10287 : "r" (oparg), "i" (-EFAULT), "1" (0))
10288
10289 -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10290 +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10291 {
10292 int op = (encoded_op >> 28) & 7;
10293 int cmp = (encoded_op >> 24) & 15;
10294 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10295
10296 switch (op) {
10297 case FUTEX_OP_SET:
10298 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10299 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10300 break;
10301 case FUTEX_OP_ADD:
10302 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10303 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10304 uaddr, oparg);
10305 break;
10306 case FUTEX_OP_OR:
10307 @@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10308 return ret;
10309 }
10310
10311 -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10312 +static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
10313 int newval)
10314 {
10315
10316 @@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10317 return -ENOSYS;
10318 #endif
10319
10320 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
10321 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10322 return -EFAULT;
10323
10324 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
10325 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
10326 "2:\t.section .fixup, \"ax\"\n"
10327 "3:\tmov %2, %0\n"
10328 "\tjmp 2b\n"
10329 "\t.previous\n"
10330 _ASM_EXTABLE(1b, 3b)
10331 - : "=a" (oldval), "+m" (*uaddr)
10332 + : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
10333 : "i" (-EFAULT), "r" (newval), "0" (oldval)
10334 : "memory"
10335 );
10336 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10337 index ba180d9..3bad351 100644
10338 --- a/arch/x86/include/asm/hw_irq.h
10339 +++ b/arch/x86/include/asm/hw_irq.h
10340 @@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
10341 extern void enable_IO_APIC(void);
10342
10343 /* Statistics */
10344 -extern atomic_t irq_err_count;
10345 -extern atomic_t irq_mis_count;
10346 +extern atomic_unchecked_t irq_err_count;
10347 +extern atomic_unchecked_t irq_mis_count;
10348
10349 /* EISA */
10350 extern void eisa_set_level_irq(unsigned int irq);
10351 diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
10352 index 0b20bbb..4cb1396 100644
10353 --- a/arch/x86/include/asm/i387.h
10354 +++ b/arch/x86/include/asm/i387.h
10355 @@ -60,6 +60,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10356 {
10357 int err;
10358
10359 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10360 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10361 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
10362 +#endif
10363 +
10364 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
10365 "2:\n"
10366 ".section .fixup,\"ax\"\n"
10367 @@ -105,6 +110,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10368 {
10369 int err;
10370
10371 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10372 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10373 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10374 +#endif
10375 +
10376 asm volatile("1: rex64/fxsave (%[fx])\n\t"
10377 "2:\n"
10378 ".section .fixup,\"ax\"\n"
10379 @@ -195,13 +205,8 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10380 }
10381
10382 /* We need a safe address that is cheap to find and that is already
10383 - in L1 during context switch. The best choices are unfortunately
10384 - different for UP and SMP */
10385 -#ifdef CONFIG_SMP
10386 -#define safe_address (__per_cpu_offset[0])
10387 -#else
10388 -#define safe_address (kstat_cpu(0).cpustat.user)
10389 -#endif
10390 + in L1 during context switch. */
10391 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
10392
10393 /*
10394 * These must be called with preempt disabled
10395 @@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void)
10396 struct thread_info *me = current_thread_info();
10397 preempt_disable();
10398 if (me->status & TS_USEDFPU)
10399 - __save_init_fpu(me->task);
10400 + __save_init_fpu(current);
10401 else
10402 clts();
10403 }
10404 diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h
10405 index a299900..15c5410 100644
10406 --- a/arch/x86/include/asm/io_32.h
10407 +++ b/arch/x86/include/asm/io_32.h
10408 @@ -3,6 +3,7 @@
10409
10410 #include <linux/string.h>
10411 #include <linux/compiler.h>
10412 +#include <asm/processor.h>
10413
10414 /*
10415 * This file contains the definitions for the x86 IO instructions
10416 @@ -42,6 +43,17 @@
10417
10418 #ifdef __KERNEL__
10419
10420 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10421 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10422 +{
10423 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10424 +}
10425 +
10426 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10427 +{
10428 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10429 +}
10430 +
10431 #include <asm-generic/iomap.h>
10432
10433 #include <linux/vmalloc.h>
10434 diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h
10435 index 2440678..c158b88 100644
10436 --- a/arch/x86/include/asm/io_64.h
10437 +++ b/arch/x86/include/asm/io_64.h
10438 @@ -140,6 +140,17 @@ __OUTS(l)
10439
10440 #include <linux/vmalloc.h>
10441
10442 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10443 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10444 +{
10445 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10446 +}
10447 +
10448 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10449 +{
10450 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10451 +}
10452 +
10453 #include <asm-generic/iomap.h>
10454
10455 void __memcpy_fromio(void *, unsigned long, unsigned);
10456 diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
10457 index fd6d21b..8b13915 100644
10458 --- a/arch/x86/include/asm/iommu.h
10459 +++ b/arch/x86/include/asm/iommu.h
10460 @@ -3,7 +3,7 @@
10461
10462 extern void pci_iommu_shutdown(void);
10463 extern void no_iommu_init(void);
10464 -extern struct dma_map_ops nommu_dma_ops;
10465 +extern const struct dma_map_ops nommu_dma_ops;
10466 extern int force_iommu, no_iommu;
10467 extern int iommu_detected;
10468 extern int iommu_pass_through;
10469 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10470 index 9e2b952..557206e 100644
10471 --- a/arch/x86/include/asm/irqflags.h
10472 +++ b/arch/x86/include/asm/irqflags.h
10473 @@ -142,6 +142,11 @@ static inline unsigned long __raw_local_irq_save(void)
10474 sti; \
10475 sysexit
10476
10477 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
10478 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10479 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
10480 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10481 +
10482 #else
10483 #define INTERRUPT_RETURN iret
10484 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10485 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10486 index 4fe681d..bb6d40c 100644
10487 --- a/arch/x86/include/asm/kprobes.h
10488 +++ b/arch/x86/include/asm/kprobes.h
10489 @@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
10490 #define BREAKPOINT_INSTRUCTION 0xcc
10491 #define RELATIVEJUMP_INSTRUCTION 0xe9
10492 #define MAX_INSN_SIZE 16
10493 -#define MAX_STACK_SIZE 64
10494 -#define MIN_STACK_SIZE(ADDR) \
10495 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10496 - THREAD_SIZE - (unsigned long)(ADDR))) \
10497 - ? (MAX_STACK_SIZE) \
10498 - : (((unsigned long)current_thread_info()) + \
10499 - THREAD_SIZE - (unsigned long)(ADDR)))
10500 +#define MAX_STACK_SIZE 64UL
10501 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10502
10503 #define flush_insn_slot(p) do { } while (0)
10504
10505 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10506 index 08bc2ff..2e88d1f 100644
10507 --- a/arch/x86/include/asm/kvm_host.h
10508 +++ b/arch/x86/include/asm/kvm_host.h
10509 @@ -534,9 +534,9 @@ struct kvm_x86_ops {
10510 bool (*gb_page_enable)(void);
10511
10512 const struct trace_print_flags *exit_reasons_str;
10513 -};
10514 +} __do_const;
10515
10516 -extern struct kvm_x86_ops *kvm_x86_ops;
10517 +extern const struct kvm_x86_ops *kvm_x86_ops;
10518
10519 int kvm_mmu_module_init(void);
10520 void kvm_mmu_module_exit(void);
10521 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10522 index 47b9b6f..815aaa1 100644
10523 --- a/arch/x86/include/asm/local.h
10524 +++ b/arch/x86/include/asm/local.h
10525 @@ -18,26 +18,58 @@ typedef struct {
10526
10527 static inline void local_inc(local_t *l)
10528 {
10529 - asm volatile(_ASM_INC "%0"
10530 + asm volatile(_ASM_INC "%0\n"
10531 +
10532 +#ifdef CONFIG_PAX_REFCOUNT
10533 + "jno 0f\n"
10534 + _ASM_DEC "%0\n"
10535 + "int $4\n0:\n"
10536 + _ASM_EXTABLE(0b, 0b)
10537 +#endif
10538 +
10539 : "+m" (l->a.counter));
10540 }
10541
10542 static inline void local_dec(local_t *l)
10543 {
10544 - asm volatile(_ASM_DEC "%0"
10545 + asm volatile(_ASM_DEC "%0\n"
10546 +
10547 +#ifdef CONFIG_PAX_REFCOUNT
10548 + "jno 0f\n"
10549 + _ASM_INC "%0\n"
10550 + "int $4\n0:\n"
10551 + _ASM_EXTABLE(0b, 0b)
10552 +#endif
10553 +
10554 : "+m" (l->a.counter));
10555 }
10556
10557 static inline void local_add(long i, local_t *l)
10558 {
10559 - asm volatile(_ASM_ADD "%1,%0"
10560 + asm volatile(_ASM_ADD "%1,%0\n"
10561 +
10562 +#ifdef CONFIG_PAX_REFCOUNT
10563 + "jno 0f\n"
10564 + _ASM_SUB "%1,%0\n"
10565 + "int $4\n0:\n"
10566 + _ASM_EXTABLE(0b, 0b)
10567 +#endif
10568 +
10569 : "+m" (l->a.counter)
10570 : "ir" (i));
10571 }
10572
10573 static inline void local_sub(long i, local_t *l)
10574 {
10575 - asm volatile(_ASM_SUB "%1,%0"
10576 + asm volatile(_ASM_SUB "%1,%0\n"
10577 +
10578 +#ifdef CONFIG_PAX_REFCOUNT
10579 + "jno 0f\n"
10580 + _ASM_ADD "%1,%0\n"
10581 + "int $4\n0:\n"
10582 + _ASM_EXTABLE(0b, 0b)
10583 +#endif
10584 +
10585 : "+m" (l->a.counter)
10586 : "ir" (i));
10587 }
10588 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10589 {
10590 unsigned char c;
10591
10592 - asm volatile(_ASM_SUB "%2,%0; sete %1"
10593 + asm volatile(_ASM_SUB "%2,%0\n"
10594 +
10595 +#ifdef CONFIG_PAX_REFCOUNT
10596 + "jno 0f\n"
10597 + _ASM_ADD "%2,%0\n"
10598 + "int $4\n0:\n"
10599 + _ASM_EXTABLE(0b, 0b)
10600 +#endif
10601 +
10602 + "sete %1\n"
10603 : "+m" (l->a.counter), "=qm" (c)
10604 : "ir" (i) : "memory");
10605 return c;
10606 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
10607 {
10608 unsigned char c;
10609
10610 - asm volatile(_ASM_DEC "%0; sete %1"
10611 + asm volatile(_ASM_DEC "%0\n"
10612 +
10613 +#ifdef CONFIG_PAX_REFCOUNT
10614 + "jno 0f\n"
10615 + _ASM_INC "%0\n"
10616 + "int $4\n0:\n"
10617 + _ASM_EXTABLE(0b, 0b)
10618 +#endif
10619 +
10620 + "sete %1\n"
10621 : "+m" (l->a.counter), "=qm" (c)
10622 : : "memory");
10623 return c != 0;
10624 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
10625 {
10626 unsigned char c;
10627
10628 - asm volatile(_ASM_INC "%0; sete %1"
10629 + asm volatile(_ASM_INC "%0\n"
10630 +
10631 +#ifdef CONFIG_PAX_REFCOUNT
10632 + "jno 0f\n"
10633 + _ASM_DEC "%0\n"
10634 + "int $4\n0:\n"
10635 + _ASM_EXTABLE(0b, 0b)
10636 +#endif
10637 +
10638 + "sete %1\n"
10639 : "+m" (l->a.counter), "=qm" (c)
10640 : : "memory");
10641 return c != 0;
10642 @@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
10643 {
10644 unsigned char c;
10645
10646 - asm volatile(_ASM_ADD "%2,%0; sets %1"
10647 + asm volatile(_ASM_ADD "%2,%0\n"
10648 +
10649 +#ifdef CONFIG_PAX_REFCOUNT
10650 + "jno 0f\n"
10651 + _ASM_SUB "%2,%0\n"
10652 + "int $4\n0:\n"
10653 + _ASM_EXTABLE(0b, 0b)
10654 +#endif
10655 +
10656 + "sets %1\n"
10657 : "+m" (l->a.counter), "=qm" (c)
10658 : "ir" (i) : "memory");
10659 return c;
10660 @@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
10661 #endif
10662 /* Modern 486+ processor */
10663 __i = i;
10664 - asm volatile(_ASM_XADD "%0, %1;"
10665 + asm volatile(_ASM_XADD "%0, %1\n"
10666 +
10667 +#ifdef CONFIG_PAX_REFCOUNT
10668 + "jno 0f\n"
10669 + _ASM_MOV "%0,%1\n"
10670 + "int $4\n0:\n"
10671 + _ASM_EXTABLE(0b, 0b)
10672 +#endif
10673 +
10674 : "+r" (i), "+m" (l->a.counter)
10675 : : "memory");
10676 return i + __i;
10677 diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
10678 index ef51b50..514ba37 100644
10679 --- a/arch/x86/include/asm/microcode.h
10680 +++ b/arch/x86/include/asm/microcode.h
10681 @@ -12,13 +12,13 @@ struct device;
10682 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
10683
10684 struct microcode_ops {
10685 - enum ucode_state (*request_microcode_user) (int cpu,
10686 + enum ucode_state (* const request_microcode_user) (int cpu,
10687 const void __user *buf, size_t size);
10688
10689 - enum ucode_state (*request_microcode_fw) (int cpu,
10690 + enum ucode_state (* const request_microcode_fw) (int cpu,
10691 struct device *device);
10692
10693 - void (*microcode_fini_cpu) (int cpu);
10694 + void (* const microcode_fini_cpu) (int cpu);
10695
10696 /*
10697 * The generic 'microcode_core' part guarantees that
10698 @@ -38,18 +38,18 @@ struct ucode_cpu_info {
10699 extern struct ucode_cpu_info ucode_cpu_info[];
10700
10701 #ifdef CONFIG_MICROCODE_INTEL
10702 -extern struct microcode_ops * __init init_intel_microcode(void);
10703 +extern const struct microcode_ops * __init init_intel_microcode(void);
10704 #else
10705 -static inline struct microcode_ops * __init init_intel_microcode(void)
10706 +static inline const struct microcode_ops * __init init_intel_microcode(void)
10707 {
10708 return NULL;
10709 }
10710 #endif /* CONFIG_MICROCODE_INTEL */
10711
10712 #ifdef CONFIG_MICROCODE_AMD
10713 -extern struct microcode_ops * __init init_amd_microcode(void);
10714 +extern const struct microcode_ops * __init init_amd_microcode(void);
10715 #else
10716 -static inline struct microcode_ops * __init init_amd_microcode(void)
10717 +static inline const struct microcode_ops * __init init_amd_microcode(void)
10718 {
10719 return NULL;
10720 }
10721 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10722 index 593e51d..fa69c9a 100644
10723 --- a/arch/x86/include/asm/mman.h
10724 +++ b/arch/x86/include/asm/mman.h
10725 @@ -5,4 +5,14 @@
10726
10727 #include <asm-generic/mman.h>
10728
10729 +#ifdef __KERNEL__
10730 +#ifndef __ASSEMBLY__
10731 +#ifdef CONFIG_X86_32
10732 +#define arch_mmap_check i386_mmap_check
10733 +int i386_mmap_check(unsigned long addr, unsigned long len,
10734 + unsigned long flags);
10735 +#endif
10736 +#endif
10737 +#endif
10738 +
10739 #endif /* _ASM_X86_MMAN_H */
10740 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10741 index 80a1dee..239c67d 100644
10742 --- a/arch/x86/include/asm/mmu.h
10743 +++ b/arch/x86/include/asm/mmu.h
10744 @@ -9,10 +9,23 @@
10745 * we put the segment information here.
10746 */
10747 typedef struct {
10748 - void *ldt;
10749 + struct desc_struct *ldt;
10750 int size;
10751 struct mutex lock;
10752 - void *vdso;
10753 + unsigned long vdso;
10754 +
10755 +#ifdef CONFIG_X86_32
10756 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10757 + unsigned long user_cs_base;
10758 + unsigned long user_cs_limit;
10759 +
10760 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10761 + cpumask_t cpu_user_cs_mask;
10762 +#endif
10763 +
10764 +#endif
10765 +#endif
10766 +
10767 } mm_context_t;
10768
10769 #ifdef CONFIG_SMP
10770 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10771 index 8b5393e..8143173 100644
10772 --- a/arch/x86/include/asm/mmu_context.h
10773 +++ b/arch/x86/include/asm/mmu_context.h
10774 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10775
10776 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10777 {
10778 +
10779 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10780 + unsigned int i;
10781 + pgd_t *pgd;
10782 +
10783 + pax_open_kernel();
10784 + pgd = get_cpu_pgd(smp_processor_id());
10785 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10786 + set_pgd_batched(pgd+i, native_make_pgd(0));
10787 + pax_close_kernel();
10788 +#endif
10789 +
10790 #ifdef CONFIG_SMP
10791 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10792 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10793 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10794 struct task_struct *tsk)
10795 {
10796 unsigned cpu = smp_processor_id();
10797 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
10798 + int tlbstate = TLBSTATE_OK;
10799 +#endif
10800
10801 if (likely(prev != next)) {
10802 #ifdef CONFIG_SMP
10803 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10804 + tlbstate = percpu_read(cpu_tlbstate.state);
10805 +#endif
10806 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10807 percpu_write(cpu_tlbstate.active_mm, next);
10808 #endif
10809 cpumask_set_cpu(cpu, mm_cpumask(next));
10810
10811 /* Re-load page tables */
10812 +#ifdef CONFIG_PAX_PER_CPU_PGD
10813 + pax_open_kernel();
10814 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10815 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10816 + pax_close_kernel();
10817 + load_cr3(get_cpu_pgd(cpu));
10818 +#else
10819 load_cr3(next->pgd);
10820 +#endif
10821
10822 /* stop flush ipis for the previous mm */
10823 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10824 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10825 */
10826 if (unlikely(prev->context.ldt != next->context.ldt))
10827 load_LDT_nolock(&next->context);
10828 - }
10829 +
10830 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10831 + if (!nx_enabled) {
10832 + smp_mb__before_clear_bit();
10833 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
10834 + smp_mb__after_clear_bit();
10835 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10836 + }
10837 +#endif
10838 +
10839 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10840 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
10841 + prev->context.user_cs_limit != next->context.user_cs_limit))
10842 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10843 #ifdef CONFIG_SMP
10844 + else if (unlikely(tlbstate != TLBSTATE_OK))
10845 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10846 +#endif
10847 +#endif
10848 +
10849 + }
10850 else {
10851 +
10852 +#ifdef CONFIG_PAX_PER_CPU_PGD
10853 + pax_open_kernel();
10854 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10855 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10856 + pax_close_kernel();
10857 + load_cr3(get_cpu_pgd(cpu));
10858 +#endif
10859 +
10860 +#ifdef CONFIG_SMP
10861 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10862 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
10863
10864 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10865 * tlb flush IPI delivery. We must reload CR3
10866 * to make sure to use no freed page tables.
10867 */
10868 +
10869 +#ifndef CONFIG_PAX_PER_CPU_PGD
10870 load_cr3(next->pgd);
10871 +#endif
10872 +
10873 load_LDT_nolock(&next->context);
10874 +
10875 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
10876 + if (!nx_enabled)
10877 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10878 +#endif
10879 +
10880 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10881 +#ifdef CONFIG_PAX_PAGEEXEC
10882 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
10883 +#endif
10884 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10885 +#endif
10886 +
10887 }
10888 +#endif
10889 }
10890 -#endif
10891 }
10892
10893 #define activate_mm(prev, next) \
10894 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
10895 index 3e2ce58..caaf478 100644
10896 --- a/arch/x86/include/asm/module.h
10897 +++ b/arch/x86/include/asm/module.h
10898 @@ -5,6 +5,7 @@
10899
10900 #ifdef CONFIG_X86_64
10901 /* X86_64 does not define MODULE_PROC_FAMILY */
10902 +#define MODULE_PROC_FAMILY ""
10903 #elif defined CONFIG_M386
10904 #define MODULE_PROC_FAMILY "386 "
10905 #elif defined CONFIG_M486
10906 @@ -59,13 +60,26 @@
10907 #error unknown processor family
10908 #endif
10909
10910 -#ifdef CONFIG_X86_32
10911 -# ifdef CONFIG_4KSTACKS
10912 -# define MODULE_STACKSIZE "4KSTACKS "
10913 -# else
10914 -# define MODULE_STACKSIZE ""
10915 -# endif
10916 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
10917 +#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
10918 +#define MODULE_STACKSIZE "4KSTACKS "
10919 +#else
10920 +#define MODULE_STACKSIZE ""
10921 #endif
10922
10923 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10924 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
10925 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
10926 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
10927 +#else
10928 +#define MODULE_PAX_KERNEXEC ""
10929 +#endif
10930 +
10931 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10932 +#define MODULE_PAX_UDEREF "UDEREF "
10933 +#else
10934 +#define MODULE_PAX_UDEREF ""
10935 +#endif
10936 +
10937 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
10938 +
10939 #endif /* _ASM_X86_MODULE_H */
10940 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
10941 index 7639dbf..e08a58c 100644
10942 --- a/arch/x86/include/asm/page_64_types.h
10943 +++ b/arch/x86/include/asm/page_64_types.h
10944 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
10945
10946 /* duplicated to the one in bootmem.h */
10947 extern unsigned long max_pfn;
10948 -extern unsigned long phys_base;
10949 +extern const unsigned long phys_base;
10950
10951 extern unsigned long __phys_addr(unsigned long);
10952 #define __phys_reloc_hide(x) (x)
10953 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
10954 index efb3899..ef30687 100644
10955 --- a/arch/x86/include/asm/paravirt.h
10956 +++ b/arch/x86/include/asm/paravirt.h
10957 @@ -648,6 +648,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
10958 val);
10959 }
10960
10961 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
10962 +{
10963 + pgdval_t val = native_pgd_val(pgd);
10964 +
10965 + if (sizeof(pgdval_t) > sizeof(long))
10966 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
10967 + val, (u64)val >> 32);
10968 + else
10969 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
10970 + val);
10971 +}
10972 +
10973 static inline void pgd_clear(pgd_t *pgdp)
10974 {
10975 set_pgd(pgdp, __pgd(0));
10976 @@ -729,6 +741,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
10977 pv_mmu_ops.set_fixmap(idx, phys, flags);
10978 }
10979
10980 +#ifdef CONFIG_PAX_KERNEXEC
10981 +static inline unsigned long pax_open_kernel(void)
10982 +{
10983 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
10984 +}
10985 +
10986 +static inline unsigned long pax_close_kernel(void)
10987 +{
10988 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
10989 +}
10990 +#else
10991 +static inline unsigned long pax_open_kernel(void) { return 0; }
10992 +static inline unsigned long pax_close_kernel(void) { return 0; }
10993 +#endif
10994 +
10995 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
10996
10997 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
10998 @@ -945,7 +972,7 @@ extern void default_banner(void);
10999
11000 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
11001 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
11002 -#define PARA_INDIRECT(addr) *%cs:addr
11003 +#define PARA_INDIRECT(addr) *%ss:addr
11004 #endif
11005
11006 #define INTERRUPT_RETURN \
11007 @@ -1022,6 +1049,21 @@ extern void default_banner(void);
11008 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
11009 CLBR_NONE, \
11010 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
11011 +
11012 +#define GET_CR0_INTO_RDI \
11013 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
11014 + mov %rax,%rdi
11015 +
11016 +#define SET_RDI_INTO_CR0 \
11017 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11018 +
11019 +#define GET_CR3_INTO_RDI \
11020 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
11021 + mov %rax,%rdi
11022 +
11023 +#define SET_RDI_INTO_CR3 \
11024 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
11025 +
11026 #endif /* CONFIG_X86_32 */
11027
11028 #endif /* __ASSEMBLY__ */
11029 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
11030 index 9357473..aeb2de5 100644
11031 --- a/arch/x86/include/asm/paravirt_types.h
11032 +++ b/arch/x86/include/asm/paravirt_types.h
11033 @@ -78,19 +78,19 @@ struct pv_init_ops {
11034 */
11035 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
11036 unsigned long addr, unsigned len);
11037 -};
11038 +} __no_const;
11039
11040
11041 struct pv_lazy_ops {
11042 /* Set deferred update mode, used for batching operations. */
11043 void (*enter)(void);
11044 void (*leave)(void);
11045 -};
11046 +} __no_const;
11047
11048 struct pv_time_ops {
11049 unsigned long long (*sched_clock)(void);
11050 unsigned long (*get_tsc_khz)(void);
11051 -};
11052 +} __no_const;
11053
11054 struct pv_cpu_ops {
11055 /* hooks for various privileged instructions */
11056 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
11057
11058 void (*start_context_switch)(struct task_struct *prev);
11059 void (*end_context_switch)(struct task_struct *next);
11060 -};
11061 +} __no_const;
11062
11063 struct pv_irq_ops {
11064 /*
11065 @@ -217,7 +217,7 @@ struct pv_apic_ops {
11066 unsigned long start_eip,
11067 unsigned long start_esp);
11068 #endif
11069 -};
11070 +} __no_const;
11071
11072 struct pv_mmu_ops {
11073 unsigned long (*read_cr2)(void);
11074 @@ -301,6 +301,7 @@ struct pv_mmu_ops {
11075 struct paravirt_callee_save make_pud;
11076
11077 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
11078 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
11079 #endif /* PAGETABLE_LEVELS == 4 */
11080 #endif /* PAGETABLE_LEVELS >= 3 */
11081
11082 @@ -316,6 +317,12 @@ struct pv_mmu_ops {
11083 an mfn. We can tell which is which from the index. */
11084 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
11085 phys_addr_t phys, pgprot_t flags);
11086 +
11087 +#ifdef CONFIG_PAX_KERNEXEC
11088 + unsigned long (*pax_open_kernel)(void);
11089 + unsigned long (*pax_close_kernel)(void);
11090 +#endif
11091 +
11092 };
11093
11094 struct raw_spinlock;
11095 @@ -326,7 +333,7 @@ struct pv_lock_ops {
11096 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
11097 int (*spin_trylock)(struct raw_spinlock *lock);
11098 void (*spin_unlock)(struct raw_spinlock *lock);
11099 -};
11100 +} __no_const;
11101
11102 /* This contains all the paravirt structures: we get a convenient
11103 * number for each function using the offset which we use to indicate
11104 diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
11105 index b399988..3f47c38 100644
11106 --- a/arch/x86/include/asm/pci_x86.h
11107 +++ b/arch/x86/include/asm/pci_x86.h
11108 @@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct pci_dev *dev);
11109 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
11110
11111 struct pci_raw_ops {
11112 - int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
11113 + int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
11114 int reg, int len, u32 *val);
11115 - int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
11116 + int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
11117 int reg, int len, u32 val);
11118 };
11119
11120 -extern struct pci_raw_ops *raw_pci_ops;
11121 -extern struct pci_raw_ops *raw_pci_ext_ops;
11122 +extern const struct pci_raw_ops *raw_pci_ops;
11123 +extern const struct pci_raw_ops *raw_pci_ext_ops;
11124
11125 -extern struct pci_raw_ops pci_direct_conf1;
11126 +extern const struct pci_raw_ops pci_direct_conf1;
11127 extern bool port_cf9_safe;
11128
11129 /* arch_initcall level */
11130 diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
11131 index b65a36d..50345a4 100644
11132 --- a/arch/x86/include/asm/percpu.h
11133 +++ b/arch/x86/include/asm/percpu.h
11134 @@ -78,6 +78,7 @@ do { \
11135 if (0) { \
11136 T__ tmp__; \
11137 tmp__ = (val); \
11138 + (void)tmp__; \
11139 } \
11140 switch (sizeof(var)) { \
11141 case 1: \
11142 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
11143 index 271de94..ef944d6 100644
11144 --- a/arch/x86/include/asm/pgalloc.h
11145 +++ b/arch/x86/include/asm/pgalloc.h
11146 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
11147 pmd_t *pmd, pte_t *pte)
11148 {
11149 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11150 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
11151 +}
11152 +
11153 +static inline void pmd_populate_user(struct mm_struct *mm,
11154 + pmd_t *pmd, pte_t *pte)
11155 +{
11156 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11157 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
11158 }
11159
11160 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
11161 index 2334982..70bc412 100644
11162 --- a/arch/x86/include/asm/pgtable-2level.h
11163 +++ b/arch/x86/include/asm/pgtable-2level.h
11164 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
11165
11166 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11167 {
11168 + pax_open_kernel();
11169 *pmdp = pmd;
11170 + pax_close_kernel();
11171 }
11172
11173 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11174 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
11175 index 33927d2..ccde329 100644
11176 --- a/arch/x86/include/asm/pgtable-3level.h
11177 +++ b/arch/x86/include/asm/pgtable-3level.h
11178 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11179
11180 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11181 {
11182 + pax_open_kernel();
11183 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
11184 + pax_close_kernel();
11185 }
11186
11187 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11188 {
11189 + pax_open_kernel();
11190 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
11191 + pax_close_kernel();
11192 }
11193
11194 /*
11195 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
11196 index af6fd36..867ff74 100644
11197 --- a/arch/x86/include/asm/pgtable.h
11198 +++ b/arch/x86/include/asm/pgtable.h
11199 @@ -39,6 +39,7 @@ extern struct list_head pgd_list;
11200
11201 #ifndef __PAGETABLE_PUD_FOLDED
11202 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
11203 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
11204 #define pgd_clear(pgd) native_pgd_clear(pgd)
11205 #endif
11206
11207 @@ -74,12 +75,51 @@ extern struct list_head pgd_list;
11208
11209 #define arch_end_context_switch(prev) do {} while(0)
11210
11211 +#define pax_open_kernel() native_pax_open_kernel()
11212 +#define pax_close_kernel() native_pax_close_kernel()
11213 #endif /* CONFIG_PARAVIRT */
11214
11215 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
11216 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
11217 +
11218 +#ifdef CONFIG_PAX_KERNEXEC
11219 +static inline unsigned long native_pax_open_kernel(void)
11220 +{
11221 + unsigned long cr0;
11222 +
11223 + preempt_disable();
11224 + barrier();
11225 + cr0 = read_cr0() ^ X86_CR0_WP;
11226 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
11227 + write_cr0(cr0);
11228 + return cr0 ^ X86_CR0_WP;
11229 +}
11230 +
11231 +static inline unsigned long native_pax_close_kernel(void)
11232 +{
11233 + unsigned long cr0;
11234 +
11235 + cr0 = read_cr0() ^ X86_CR0_WP;
11236 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
11237 + write_cr0(cr0);
11238 + barrier();
11239 + preempt_enable_no_resched();
11240 + return cr0 ^ X86_CR0_WP;
11241 +}
11242 +#else
11243 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
11244 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
11245 +#endif
11246 +
11247 /*
11248 * The following only work if pte_present() is true.
11249 * Undefined behaviour if not..
11250 */
11251 +static inline int pte_user(pte_t pte)
11252 +{
11253 + return pte_val(pte) & _PAGE_USER;
11254 +}
11255 +
11256 static inline int pte_dirty(pte_t pte)
11257 {
11258 return pte_flags(pte) & _PAGE_DIRTY;
11259 @@ -167,9 +207,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11260 return pte_clear_flags(pte, _PAGE_RW);
11261 }
11262
11263 +static inline pte_t pte_mkread(pte_t pte)
11264 +{
11265 + return __pte(pte_val(pte) | _PAGE_USER);
11266 +}
11267 +
11268 static inline pte_t pte_mkexec(pte_t pte)
11269 {
11270 - return pte_clear_flags(pte, _PAGE_NX);
11271 +#ifdef CONFIG_X86_PAE
11272 + if (__supported_pte_mask & _PAGE_NX)
11273 + return pte_clear_flags(pte, _PAGE_NX);
11274 + else
11275 +#endif
11276 + return pte_set_flags(pte, _PAGE_USER);
11277 +}
11278 +
11279 +static inline pte_t pte_exprotect(pte_t pte)
11280 +{
11281 +#ifdef CONFIG_X86_PAE
11282 + if (__supported_pte_mask & _PAGE_NX)
11283 + return pte_set_flags(pte, _PAGE_NX);
11284 + else
11285 +#endif
11286 + return pte_clear_flags(pte, _PAGE_USER);
11287 }
11288
11289 static inline pte_t pte_mkdirty(pte_t pte)
11290 @@ -302,6 +362,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11291 #endif
11292
11293 #ifndef __ASSEMBLY__
11294 +
11295 +#ifdef CONFIG_PAX_PER_CPU_PGD
11296 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11297 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11298 +{
11299 + return cpu_pgd[cpu];
11300 +}
11301 +#endif
11302 +
11303 #include <linux/mm_types.h>
11304
11305 static inline int pte_none(pte_t pte)
11306 @@ -472,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11307
11308 static inline int pgd_bad(pgd_t pgd)
11309 {
11310 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11311 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11312 }
11313
11314 static inline int pgd_none(pgd_t pgd)
11315 @@ -495,7 +564,12 @@ static inline int pgd_none(pgd_t pgd)
11316 * pgd_offset() returns a (pgd_t *)
11317 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11318 */
11319 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11320 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11321 +
11322 +#ifdef CONFIG_PAX_PER_CPU_PGD
11323 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11324 +#endif
11325 +
11326 /*
11327 * a shortcut which implies the use of the kernel's pgd, instead
11328 * of a process's
11329 @@ -506,6 +580,20 @@ static inline int pgd_none(pgd_t pgd)
11330 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11331 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11332
11333 +#ifdef CONFIG_X86_32
11334 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11335 +#else
11336 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11337 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11338 +
11339 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11340 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11341 +#else
11342 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11343 +#endif
11344 +
11345 +#endif
11346 +
11347 #ifndef __ASSEMBLY__
11348
11349 extern int direct_gbpages;
11350 @@ -611,11 +699,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
11351 * dst and src can be on the same page, but the range must not overlap,
11352 * and must not cross a page boundary.
11353 */
11354 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11355 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11356 {
11357 - memcpy(dst, src, count * sizeof(pgd_t));
11358 + pax_open_kernel();
11359 + while (count--)
11360 + *dst++ = *src++;
11361 + pax_close_kernel();
11362 }
11363
11364 +#ifdef CONFIG_PAX_PER_CPU_PGD
11365 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11366 +#endif
11367 +
11368 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11369 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11370 +#else
11371 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
11372 +#endif
11373
11374 #include <asm-generic/pgtable.h>
11375 #endif /* __ASSEMBLY__ */
11376 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11377 index 750f1bf..971e839 100644
11378 --- a/arch/x86/include/asm/pgtable_32.h
11379 +++ b/arch/x86/include/asm/pgtable_32.h
11380 @@ -26,9 +26,6 @@
11381 struct mm_struct;
11382 struct vm_area_struct;
11383
11384 -extern pgd_t swapper_pg_dir[1024];
11385 -extern pgd_t trampoline_pg_dir[1024];
11386 -
11387 static inline void pgtable_cache_init(void) { }
11388 static inline void check_pgt_cache(void) { }
11389 void paging_init(void);
11390 @@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11391 # include <asm/pgtable-2level.h>
11392 #endif
11393
11394 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11395 +extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
11396 +#ifdef CONFIG_X86_PAE
11397 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11398 +#endif
11399 +
11400 #if defined(CONFIG_HIGHPTE)
11401 #define __KM_PTE \
11402 (in_nmi() ? KM_NMI_PTE : \
11403 @@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11404 /* Clear a kernel PTE and flush it from the TLB */
11405 #define kpte_clear_flush(ptep, vaddr) \
11406 do { \
11407 + pax_open_kernel(); \
11408 pte_clear(&init_mm, (vaddr), (ptep)); \
11409 + pax_close_kernel(); \
11410 __flush_tlb_one((vaddr)); \
11411 } while (0)
11412
11413 @@ -85,6 +90,9 @@ do { \
11414
11415 #endif /* !__ASSEMBLY__ */
11416
11417 +#define HAVE_ARCH_UNMAPPED_AREA
11418 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11419 +
11420 /*
11421 * kern_addr_valid() is (1) for FLATMEM and (0) for
11422 * SPARSEMEM and DISCONTIGMEM
11423 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11424 index 5e67c15..12d5c47 100644
11425 --- a/arch/x86/include/asm/pgtable_32_types.h
11426 +++ b/arch/x86/include/asm/pgtable_32_types.h
11427 @@ -8,7 +8,7 @@
11428 */
11429 #ifdef CONFIG_X86_PAE
11430 # include <asm/pgtable-3level_types.h>
11431 -# define PMD_SIZE (1UL << PMD_SHIFT)
11432 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11433 # define PMD_MASK (~(PMD_SIZE - 1))
11434 #else
11435 # include <asm/pgtable-2level_types.h>
11436 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11437 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11438 #endif
11439
11440 +#ifdef CONFIG_PAX_KERNEXEC
11441 +#ifndef __ASSEMBLY__
11442 +extern unsigned char MODULES_EXEC_VADDR[];
11443 +extern unsigned char MODULES_EXEC_END[];
11444 +#endif
11445 +#include <asm/boot.h>
11446 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11447 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11448 +#else
11449 +#define ktla_ktva(addr) (addr)
11450 +#define ktva_ktla(addr) (addr)
11451 +#endif
11452 +
11453 #define MODULES_VADDR VMALLOC_START
11454 #define MODULES_END VMALLOC_END
11455 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11456 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11457 index c57a301..6b414ff 100644
11458 --- a/arch/x86/include/asm/pgtable_64.h
11459 +++ b/arch/x86/include/asm/pgtable_64.h
11460 @@ -16,10 +16,14 @@
11461
11462 extern pud_t level3_kernel_pgt[512];
11463 extern pud_t level3_ident_pgt[512];
11464 +extern pud_t level3_vmalloc_start_pgt[512];
11465 +extern pud_t level3_vmalloc_end_pgt[512];
11466 +extern pud_t level3_vmemmap_pgt[512];
11467 +extern pud_t level2_vmemmap_pgt[512];
11468 extern pmd_t level2_kernel_pgt[512];
11469 extern pmd_t level2_fixmap_pgt[512];
11470 -extern pmd_t level2_ident_pgt[512];
11471 -extern pgd_t init_level4_pgt[];
11472 +extern pmd_t level2_ident_pgt[512*2];
11473 +extern pgd_t init_level4_pgt[512];
11474
11475 #define swapper_pg_dir init_level4_pgt
11476
11477 @@ -74,7 +78,9 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
11478
11479 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11480 {
11481 + pax_open_kernel();
11482 *pmdp = pmd;
11483 + pax_close_kernel();
11484 }
11485
11486 static inline void native_pmd_clear(pmd_t *pmd)
11487 @@ -94,6 +100,13 @@ static inline void native_pud_clear(pud_t *pud)
11488
11489 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11490 {
11491 + pax_open_kernel();
11492 + *pgdp = pgd;
11493 + pax_close_kernel();
11494 +}
11495 +
11496 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11497 +{
11498 *pgdp = pgd;
11499 }
11500
11501 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11502 index 766ea16..5b96cb3 100644
11503 --- a/arch/x86/include/asm/pgtable_64_types.h
11504 +++ b/arch/x86/include/asm/pgtable_64_types.h
11505 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11506 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11507 #define MODULES_END _AC(0xffffffffff000000, UL)
11508 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11509 +#define MODULES_EXEC_VADDR MODULES_VADDR
11510 +#define MODULES_EXEC_END MODULES_END
11511 +
11512 +#define ktla_ktva(addr) (addr)
11513 +#define ktva_ktla(addr) (addr)
11514
11515 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11516 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11517 index d1f4a76..2f46ba1 100644
11518 --- a/arch/x86/include/asm/pgtable_types.h
11519 +++ b/arch/x86/include/asm/pgtable_types.h
11520 @@ -16,12 +16,11 @@
11521 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11522 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11523 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11524 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11525 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11526 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11527 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11528 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11529 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11530 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11531 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11532 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11533
11534 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11535 @@ -39,7 +38,6 @@
11536 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11537 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11538 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11539 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11540 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11541 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11542 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11543 @@ -55,8 +53,10 @@
11544
11545 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11546 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11547 -#else
11548 +#elif defined(CONFIG_KMEMCHECK)
11549 #define _PAGE_NX (_AT(pteval_t, 0))
11550 +#else
11551 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11552 #endif
11553
11554 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11555 @@ -93,6 +93,9 @@
11556 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11557 _PAGE_ACCESSED)
11558
11559 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
11560 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
11561 +
11562 #define __PAGE_KERNEL_EXEC \
11563 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11564 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11565 @@ -103,8 +106,8 @@
11566 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11567 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11568 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11569 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11570 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
11571 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11572 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
11573 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11574 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
11575 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
11576 @@ -163,8 +166,8 @@
11577 * bits are combined, this will alow user to access the high address mapped
11578 * VDSO in the presence of CONFIG_COMPAT_VDSO
11579 */
11580 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11581 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11582 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11583 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11584 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11585 #endif
11586
11587 @@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11588 {
11589 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11590 }
11591 +#endif
11592
11593 +#if PAGETABLE_LEVELS == 3
11594 +#include <asm-generic/pgtable-nopud.h>
11595 +#endif
11596 +
11597 +#if PAGETABLE_LEVELS == 2
11598 +#include <asm-generic/pgtable-nopmd.h>
11599 +#endif
11600 +
11601 +#ifndef __ASSEMBLY__
11602 #if PAGETABLE_LEVELS > 3
11603 typedef struct { pudval_t pud; } pud_t;
11604
11605 @@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11606 return pud.pud;
11607 }
11608 #else
11609 -#include <asm-generic/pgtable-nopud.h>
11610 -
11611 static inline pudval_t native_pud_val(pud_t pud)
11612 {
11613 return native_pgd_val(pud.pgd);
11614 @@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11615 return pmd.pmd;
11616 }
11617 #else
11618 -#include <asm-generic/pgtable-nopmd.h>
11619 -
11620 static inline pmdval_t native_pmd_val(pmd_t pmd)
11621 {
11622 return native_pgd_val(pmd.pud.pgd);
11623 @@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
11624
11625 extern pteval_t __supported_pte_mask;
11626 extern void set_nx(void);
11627 +
11628 +#ifdef CONFIG_X86_32
11629 +#ifdef CONFIG_X86_PAE
11630 extern int nx_enabled;
11631 +#else
11632 +#define nx_enabled (0)
11633 +#endif
11634 +#else
11635 +#define nx_enabled (1)
11636 +#endif
11637
11638 #define pgprot_writecombine pgprot_writecombine
11639 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11640 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11641 index fa04dea..5f823fc 100644
11642 --- a/arch/x86/include/asm/processor.h
11643 +++ b/arch/x86/include/asm/processor.h
11644 @@ -272,7 +272,7 @@ struct tss_struct {
11645
11646 } ____cacheline_aligned;
11647
11648 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11649 +extern struct tss_struct init_tss[NR_CPUS];
11650
11651 /*
11652 * Save the original ist values for checking stack pointers during debugging
11653 @@ -911,11 +911,18 @@ static inline void spin_lock_prefetch(const void *x)
11654 */
11655 #define TASK_SIZE PAGE_OFFSET
11656 #define TASK_SIZE_MAX TASK_SIZE
11657 +
11658 +#ifdef CONFIG_PAX_SEGMEXEC
11659 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11660 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11661 +#else
11662 #define STACK_TOP TASK_SIZE
11663 -#define STACK_TOP_MAX STACK_TOP
11664 +#endif
11665 +
11666 +#define STACK_TOP_MAX TASK_SIZE
11667
11668 #define INIT_THREAD { \
11669 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11670 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11671 .vm86_info = NULL, \
11672 .sysenter_cs = __KERNEL_CS, \
11673 .io_bitmap_ptr = NULL, \
11674 @@ -929,7 +936,7 @@ static inline void spin_lock_prefetch(const void *x)
11675 */
11676 #define INIT_TSS { \
11677 .x86_tss = { \
11678 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11679 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11680 .ss0 = __KERNEL_DS, \
11681 .ss1 = __KERNEL_CS, \
11682 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11683 @@ -940,11 +947,7 @@ static inline void spin_lock_prefetch(const void *x)
11684 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11685
11686 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11687 -#define KSTK_TOP(info) \
11688 -({ \
11689 - unsigned long *__ptr = (unsigned long *)(info); \
11690 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11691 -})
11692 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11693
11694 /*
11695 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11696 @@ -959,7 +962,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11697 #define task_pt_regs(task) \
11698 ({ \
11699 struct pt_regs *__regs__; \
11700 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11701 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11702 __regs__ - 1; \
11703 })
11704
11705 @@ -969,13 +972,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11706 /*
11707 * User space process size. 47bits minus one guard page.
11708 */
11709 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11710 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11711
11712 /* This decides where the kernel will search for a free chunk of vm
11713 * space during mmap's.
11714 */
11715 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11716 - 0xc0000000 : 0xFFFFe000)
11717 + 0xc0000000 : 0xFFFFf000)
11718
11719 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
11720 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11721 @@ -986,11 +989,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11722 #define STACK_TOP_MAX TASK_SIZE_MAX
11723
11724 #define INIT_THREAD { \
11725 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11726 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11727 }
11728
11729 #define INIT_TSS { \
11730 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11731 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11732 }
11733
11734 /*
11735 @@ -1012,6 +1015,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11736 */
11737 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11738
11739 +#ifdef CONFIG_PAX_SEGMEXEC
11740 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11741 +#endif
11742 +
11743 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11744
11745 /* Get/set a process' ability to use the timestamp counter instruction */
11746 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11747 index 0f0d908..f2e3da2 100644
11748 --- a/arch/x86/include/asm/ptrace.h
11749 +++ b/arch/x86/include/asm/ptrace.h
11750 @@ -151,28 +151,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11751 }
11752
11753 /*
11754 - * user_mode_vm(regs) determines whether a register set came from user mode.
11755 + * user_mode(regs) determines whether a register set came from user mode.
11756 * This is true if V8086 mode was enabled OR if the register set was from
11757 * protected mode with RPL-3 CS value. This tricky test checks that with
11758 * one comparison. Many places in the kernel can bypass this full check
11759 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11760 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11761 + * be used.
11762 */
11763 -static inline int user_mode(struct pt_regs *regs)
11764 +static inline int user_mode_novm(struct pt_regs *regs)
11765 {
11766 #ifdef CONFIG_X86_32
11767 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11768 #else
11769 - return !!(regs->cs & 3);
11770 + return !!(regs->cs & SEGMENT_RPL_MASK);
11771 #endif
11772 }
11773
11774 -static inline int user_mode_vm(struct pt_regs *regs)
11775 +static inline int user_mode(struct pt_regs *regs)
11776 {
11777 #ifdef CONFIG_X86_32
11778 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11779 USER_RPL;
11780 #else
11781 - return user_mode(regs);
11782 + return user_mode_novm(regs);
11783 #endif
11784 }
11785
11786 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
11787 index 562d4fd..6e39df1 100644
11788 --- a/arch/x86/include/asm/reboot.h
11789 +++ b/arch/x86/include/asm/reboot.h
11790 @@ -6,19 +6,19 @@
11791 struct pt_regs;
11792
11793 struct machine_ops {
11794 - void (*restart)(char *cmd);
11795 - void (*halt)(void);
11796 - void (*power_off)(void);
11797 + void (* __noreturn restart)(char *cmd);
11798 + void (* __noreturn halt)(void);
11799 + void (* __noreturn power_off)(void);
11800 void (*shutdown)(void);
11801 void (*crash_shutdown)(struct pt_regs *);
11802 - void (*emergency_restart)(void);
11803 -};
11804 + void (* __noreturn emergency_restart)(void);
11805 +} __no_const;
11806
11807 extern struct machine_ops machine_ops;
11808
11809 void native_machine_crash_shutdown(struct pt_regs *regs);
11810 void native_machine_shutdown(void);
11811 -void machine_real_restart(const unsigned char *code, int length);
11812 +void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
11813
11814 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
11815 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
11816 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
11817 index 606ede1..dbfff37 100644
11818 --- a/arch/x86/include/asm/rwsem.h
11819 +++ b/arch/x86/include/asm/rwsem.h
11820 @@ -118,6 +118,14 @@ static inline void __down_read(struct rw_semaphore *sem)
11821 {
11822 asm volatile("# beginning down_read\n\t"
11823 LOCK_PREFIX _ASM_INC "(%1)\n\t"
11824 +
11825 +#ifdef CONFIG_PAX_REFCOUNT
11826 + "jno 0f\n"
11827 + LOCK_PREFIX _ASM_DEC "(%1)\n\t"
11828 + "int $4\n0:\n"
11829 + _ASM_EXTABLE(0b, 0b)
11830 +#endif
11831 +
11832 /* adds 0x00000001, returns the old value */
11833 " jns 1f\n"
11834 " call call_rwsem_down_read_failed\n"
11835 @@ -139,6 +147,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
11836 "1:\n\t"
11837 " mov %1,%2\n\t"
11838 " add %3,%2\n\t"
11839 +
11840 +#ifdef CONFIG_PAX_REFCOUNT
11841 + "jno 0f\n"
11842 + "sub %3,%2\n"
11843 + "int $4\n0:\n"
11844 + _ASM_EXTABLE(0b, 0b)
11845 +#endif
11846 +
11847 " jle 2f\n\t"
11848 LOCK_PREFIX " cmpxchg %2,%0\n\t"
11849 " jnz 1b\n\t"
11850 @@ -160,6 +176,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
11851 tmp = RWSEM_ACTIVE_WRITE_BIAS;
11852 asm volatile("# beginning down_write\n\t"
11853 LOCK_PREFIX " xadd %1,(%2)\n\t"
11854 +
11855 +#ifdef CONFIG_PAX_REFCOUNT
11856 + "jno 0f\n"
11857 + "mov %1,(%2)\n"
11858 + "int $4\n0:\n"
11859 + _ASM_EXTABLE(0b, 0b)
11860 +#endif
11861 +
11862 /* subtract 0x0000ffff, returns the old value */
11863 " test %1,%1\n\t"
11864 /* was the count 0 before? */
11865 @@ -198,6 +222,14 @@ static inline void __up_read(struct rw_semaphore *sem)
11866 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
11867 asm volatile("# beginning __up_read\n\t"
11868 LOCK_PREFIX " xadd %1,(%2)\n\t"
11869 +
11870 +#ifdef CONFIG_PAX_REFCOUNT
11871 + "jno 0f\n"
11872 + "mov %1,(%2)\n"
11873 + "int $4\n0:\n"
11874 + _ASM_EXTABLE(0b, 0b)
11875 +#endif
11876 +
11877 /* subtracts 1, returns the old value */
11878 " jns 1f\n\t"
11879 " call call_rwsem_wake\n"
11880 @@ -216,6 +248,14 @@ static inline void __up_write(struct rw_semaphore *sem)
11881 rwsem_count_t tmp;
11882 asm volatile("# beginning __up_write\n\t"
11883 LOCK_PREFIX " xadd %1,(%2)\n\t"
11884 +
11885 +#ifdef CONFIG_PAX_REFCOUNT
11886 + "jno 0f\n"
11887 + "mov %1,(%2)\n"
11888 + "int $4\n0:\n"
11889 + _ASM_EXTABLE(0b, 0b)
11890 +#endif
11891 +
11892 /* tries to transition
11893 0xffff0001 -> 0x00000000 */
11894 " jz 1f\n"
11895 @@ -234,6 +274,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11896 {
11897 asm volatile("# beginning __downgrade_write\n\t"
11898 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
11899 +
11900 +#ifdef CONFIG_PAX_REFCOUNT
11901 + "jno 0f\n"
11902 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
11903 + "int $4\n0:\n"
11904 + _ASM_EXTABLE(0b, 0b)
11905 +#endif
11906 +
11907 /*
11908 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
11909 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
11910 @@ -253,7 +301,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11911 static inline void rwsem_atomic_add(rwsem_count_t delta,
11912 struct rw_semaphore *sem)
11913 {
11914 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
11915 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
11916 +
11917 +#ifdef CONFIG_PAX_REFCOUNT
11918 + "jno 0f\n"
11919 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
11920 + "int $4\n0:\n"
11921 + _ASM_EXTABLE(0b, 0b)
11922 +#endif
11923 +
11924 : "+m" (sem->count)
11925 : "er" (delta));
11926 }
11927 @@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
11928 {
11929 rwsem_count_t tmp = delta;
11930
11931 - asm volatile(LOCK_PREFIX "xadd %0,%1"
11932 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
11933 +
11934 +#ifdef CONFIG_PAX_REFCOUNT
11935 + "jno 0f\n"
11936 + "mov %0,%1\n"
11937 + "int $4\n0:\n"
11938 + _ASM_EXTABLE(0b, 0b)
11939 +#endif
11940 +
11941 : "+r" (tmp), "+m" (sem->count)
11942 : : "memory");
11943
11944 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
11945 index 14e0ed8..7f7dd5e 100644
11946 --- a/arch/x86/include/asm/segment.h
11947 +++ b/arch/x86/include/asm/segment.h
11948 @@ -62,10 +62,15 @@
11949 * 26 - ESPFIX small SS
11950 * 27 - per-cpu [ offset to per-cpu data area ]
11951 * 28 - stack_canary-20 [ for stack protector ]
11952 - * 29 - unused
11953 - * 30 - unused
11954 + * 29 - PCI BIOS CS
11955 + * 30 - PCI BIOS DS
11956 * 31 - TSS for double fault handler
11957 */
11958 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
11959 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
11960 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
11961 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
11962 +
11963 #define GDT_ENTRY_TLS_MIN 6
11964 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
11965
11966 @@ -77,6 +82,8 @@
11967
11968 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
11969
11970 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
11971 +
11972 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
11973
11974 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
11975 @@ -88,7 +95,7 @@
11976 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
11977 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
11978
11979 -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
11980 +#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
11981 #ifdef CONFIG_SMP
11982 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
11983 #else
11984 @@ -102,6 +109,12 @@
11985 #define __KERNEL_STACK_CANARY 0
11986 #endif
11987
11988 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
11989 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
11990 +
11991 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
11992 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
11993 +
11994 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
11995
11996 /*
11997 @@ -139,7 +152,7 @@
11998 */
11999
12000 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
12001 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
12002 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
12003
12004
12005 #else
12006 @@ -163,6 +176,8 @@
12007 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
12008 #define __USER32_DS __USER_DS
12009
12010 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
12011 +
12012 #define GDT_ENTRY_TSS 8 /* needs two entries */
12013 #define GDT_ENTRY_LDT 10 /* needs two entries */
12014 #define GDT_ENTRY_TLS_MIN 12
12015 @@ -183,6 +198,7 @@
12016 #endif
12017
12018 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
12019 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
12020 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
12021 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
12022 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
12023 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
12024 index 4c2f63c..5685db2 100644
12025 --- a/arch/x86/include/asm/smp.h
12026 +++ b/arch/x86/include/asm/smp.h
12027 @@ -24,7 +24,7 @@ extern unsigned int num_processors;
12028 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
12029 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
12030 DECLARE_PER_CPU(u16, cpu_llc_id);
12031 -DECLARE_PER_CPU(int, cpu_number);
12032 +DECLARE_PER_CPU(unsigned int, cpu_number);
12033
12034 static inline struct cpumask *cpu_sibling_mask(int cpu)
12035 {
12036 @@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
12037 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
12038
12039 /* Static state in head.S used to set up a CPU */
12040 -extern struct {
12041 - void *sp;
12042 - unsigned short ss;
12043 -} stack_start;
12044 +extern unsigned long stack_start; /* Initial stack pointer address */
12045
12046 struct smp_ops {
12047 void (*smp_prepare_boot_cpu)(void);
12048 @@ -60,7 +57,7 @@ struct smp_ops {
12049
12050 void (*send_call_func_ipi)(const struct cpumask *mask);
12051 void (*send_call_func_single_ipi)(int cpu);
12052 -};
12053 +} __no_const;
12054
12055 /* Globals due to paravirt */
12056 extern void set_cpu_sibling_map(int cpu);
12057 @@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitdata;
12058 extern int safe_smp_processor_id(void);
12059
12060 #elif defined(CONFIG_X86_64_SMP)
12061 -#define raw_smp_processor_id() (percpu_read(cpu_number))
12062 -
12063 -#define stack_smp_processor_id() \
12064 -({ \
12065 - struct thread_info *ti; \
12066 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
12067 - ti->cpu; \
12068 -})
12069 +#define raw_smp_processor_id() (percpu_read(cpu_number))
12070 +#define stack_smp_processor_id() raw_smp_processor_id()
12071 #define safe_smp_processor_id() smp_processor_id()
12072
12073 #endif
12074 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
12075 index 4e77853..4359783 100644
12076 --- a/arch/x86/include/asm/spinlock.h
12077 +++ b/arch/x86/include/asm/spinlock.h
12078 @@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(raw_rwlock_t *lock)
12079 static inline void __raw_read_lock(raw_rwlock_t *rw)
12080 {
12081 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
12082 +
12083 +#ifdef CONFIG_PAX_REFCOUNT
12084 + "jno 0f\n"
12085 + LOCK_PREFIX " addl $1,(%0)\n"
12086 + "int $4\n0:\n"
12087 + _ASM_EXTABLE(0b, 0b)
12088 +#endif
12089 +
12090 "jns 1f\n"
12091 "call __read_lock_failed\n\t"
12092 "1:\n"
12093 @@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
12094 static inline void __raw_write_lock(raw_rwlock_t *rw)
12095 {
12096 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
12097 +
12098 +#ifdef CONFIG_PAX_REFCOUNT
12099 + "jno 0f\n"
12100 + LOCK_PREFIX " addl %1,(%0)\n"
12101 + "int $4\n0:\n"
12102 + _ASM_EXTABLE(0b, 0b)
12103 +#endif
12104 +
12105 "jz 1f\n"
12106 "call __write_lock_failed\n\t"
12107 "1:\n"
12108 @@ -286,12 +302,29 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
12109
12110 static inline void __raw_read_unlock(raw_rwlock_t *rw)
12111 {
12112 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
12113 + asm volatile(LOCK_PREFIX "incl %0\n"
12114 +
12115 +#ifdef CONFIG_PAX_REFCOUNT
12116 + "jno 0f\n"
12117 + LOCK_PREFIX "decl %0\n"
12118 + "int $4\n0:\n"
12119 + _ASM_EXTABLE(0b, 0b)
12120 +#endif
12121 +
12122 + :"+m" (rw->lock) : : "memory");
12123 }
12124
12125 static inline void __raw_write_unlock(raw_rwlock_t *rw)
12126 {
12127 - asm volatile(LOCK_PREFIX "addl %1, %0"
12128 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
12129 +
12130 +#ifdef CONFIG_PAX_REFCOUNT
12131 + "jno 0f\n"
12132 + LOCK_PREFIX "subl %1, %0\n"
12133 + "int $4\n0:\n"
12134 + _ASM_EXTABLE(0b, 0b)
12135 +#endif
12136 +
12137 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
12138 }
12139
12140 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
12141 index 1575177..cb23f52 100644
12142 --- a/arch/x86/include/asm/stackprotector.h
12143 +++ b/arch/x86/include/asm/stackprotector.h
12144 @@ -48,7 +48,7 @@
12145 * head_32 for boot CPU and setup_per_cpu_areas() for others.
12146 */
12147 #define GDT_STACK_CANARY_INIT \
12148 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
12149 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
12150
12151 /*
12152 * Initialize the stackprotector canary value.
12153 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
12154
12155 static inline void load_stack_canary_segment(void)
12156 {
12157 -#ifdef CONFIG_X86_32
12158 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
12159 asm volatile ("mov %0, %%gs" : : "r" (0));
12160 #endif
12161 }
12162 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
12163 index e0fbf29..858ef4a 100644
12164 --- a/arch/x86/include/asm/system.h
12165 +++ b/arch/x86/include/asm/system.h
12166 @@ -132,7 +132,7 @@ do { \
12167 "thread_return:\n\t" \
12168 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
12169 __switch_canary \
12170 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
12171 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
12172 "movq %%rax,%%rdi\n\t" \
12173 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
12174 "jnz ret_from_fork\n\t" \
12175 @@ -143,7 +143,7 @@ do { \
12176 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
12177 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
12178 [_tif_fork] "i" (_TIF_FORK), \
12179 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
12180 + [thread_info] "m" (per_cpu_var(current_tinfo)), \
12181 [current_task] "m" (per_cpu_var(current_task)) \
12182 __switch_canary_iparam \
12183 : "memory", "cc" __EXTRA_CLOBBER)
12184 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
12185 {
12186 unsigned long __limit;
12187 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
12188 - return __limit + 1;
12189 + return __limit;
12190 }
12191
12192 static inline void native_clts(void)
12193 @@ -340,12 +340,12 @@ void enable_hlt(void);
12194
12195 void cpu_idle_wait(void);
12196
12197 -extern unsigned long arch_align_stack(unsigned long sp);
12198 +#define arch_align_stack(x) ((x) & ~0xfUL)
12199 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
12200
12201 void default_idle(void);
12202
12203 -void stop_this_cpu(void *dummy);
12204 +void stop_this_cpu(void *dummy) __noreturn;
12205
12206 /*
12207 * Force strict CPU ordering.
12208 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
12209 index 19c3ce4..8962535 100644
12210 --- a/arch/x86/include/asm/thread_info.h
12211 +++ b/arch/x86/include/asm/thread_info.h
12212 @@ -10,6 +10,7 @@
12213 #include <linux/compiler.h>
12214 #include <asm/page.h>
12215 #include <asm/types.h>
12216 +#include <asm/percpu.h>
12217
12218 /*
12219 * low level task data that entry.S needs immediate access to
12220 @@ -24,7 +25,6 @@ struct exec_domain;
12221 #include <asm/atomic.h>
12222
12223 struct thread_info {
12224 - struct task_struct *task; /* main task structure */
12225 struct exec_domain *exec_domain; /* execution domain */
12226 __u32 flags; /* low level flags */
12227 __u32 status; /* thread synchronous flags */
12228 @@ -34,18 +34,12 @@ struct thread_info {
12229 mm_segment_t addr_limit;
12230 struct restart_block restart_block;
12231 void __user *sysenter_return;
12232 -#ifdef CONFIG_X86_32
12233 - unsigned long previous_esp; /* ESP of the previous stack in
12234 - case of nested (IRQ) stacks
12235 - */
12236 - __u8 supervisor_stack[0];
12237 -#endif
12238 + unsigned long lowest_stack;
12239 int uaccess_err;
12240 };
12241
12242 -#define INIT_THREAD_INFO(tsk) \
12243 +#define INIT_THREAD_INFO \
12244 { \
12245 - .task = &tsk, \
12246 .exec_domain = &default_exec_domain, \
12247 .flags = 0, \
12248 .cpu = 0, \
12249 @@ -56,7 +50,7 @@ struct thread_info {
12250 }, \
12251 }
12252
12253 -#define init_thread_info (init_thread_union.thread_info)
12254 +#define init_thread_info (init_thread_union.stack)
12255 #define init_stack (init_thread_union.stack)
12256
12257 #else /* !__ASSEMBLY__ */
12258 @@ -163,45 +157,40 @@ struct thread_info {
12259 #define alloc_thread_info(tsk) \
12260 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
12261
12262 -#ifdef CONFIG_X86_32
12263 -
12264 -#define STACK_WARN (THREAD_SIZE/8)
12265 -/*
12266 - * macros/functions for gaining access to the thread information structure
12267 - *
12268 - * preempt_count needs to be 1 initially, until the scheduler is functional.
12269 - */
12270 -#ifndef __ASSEMBLY__
12271 -
12272 -
12273 -/* how to get the current stack pointer from C */
12274 -register unsigned long current_stack_pointer asm("esp") __used;
12275 -
12276 -/* how to get the thread information struct from C */
12277 -static inline struct thread_info *current_thread_info(void)
12278 -{
12279 - return (struct thread_info *)
12280 - (current_stack_pointer & ~(THREAD_SIZE - 1));
12281 -}
12282 -
12283 -#else /* !__ASSEMBLY__ */
12284 -
12285 +#ifdef __ASSEMBLY__
12286 /* how to get the thread information struct from ASM */
12287 #define GET_THREAD_INFO(reg) \
12288 - movl $-THREAD_SIZE, reg; \
12289 - andl %esp, reg
12290 + mov PER_CPU_VAR(current_tinfo), reg
12291
12292 /* use this one if reg already contains %esp */
12293 -#define GET_THREAD_INFO_WITH_ESP(reg) \
12294 - andl $-THREAD_SIZE, reg
12295 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12296 +#else
12297 +/* how to get the thread information struct from C */
12298 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12299 +
12300 +static __always_inline struct thread_info *current_thread_info(void)
12301 +{
12302 + return percpu_read_stable(current_tinfo);
12303 +}
12304 +#endif
12305 +
12306 +#ifdef CONFIG_X86_32
12307 +
12308 +#define STACK_WARN (THREAD_SIZE/8)
12309 +/*
12310 + * macros/functions for gaining access to the thread information structure
12311 + *
12312 + * preempt_count needs to be 1 initially, until the scheduler is functional.
12313 + */
12314 +#ifndef __ASSEMBLY__
12315 +
12316 +/* how to get the current stack pointer from C */
12317 +register unsigned long current_stack_pointer asm("esp") __used;
12318
12319 #endif
12320
12321 #else /* X86_32 */
12322
12323 -#include <asm/percpu.h>
12324 -#define KERNEL_STACK_OFFSET (5*8)
12325 -
12326 /*
12327 * macros/functions for gaining access to the thread information structure
12328 * preempt_count needs to be 1 initially, until the scheduler is functional.
12329 @@ -209,21 +198,8 @@ static inline struct thread_info *current_thread_info(void)
12330 #ifndef __ASSEMBLY__
12331 DECLARE_PER_CPU(unsigned long, kernel_stack);
12332
12333 -static inline struct thread_info *current_thread_info(void)
12334 -{
12335 - struct thread_info *ti;
12336 - ti = (void *)(percpu_read_stable(kernel_stack) +
12337 - KERNEL_STACK_OFFSET - THREAD_SIZE);
12338 - return ti;
12339 -}
12340 -
12341 -#else /* !__ASSEMBLY__ */
12342 -
12343 -/* how to get the thread information struct from ASM */
12344 -#define GET_THREAD_INFO(reg) \
12345 - movq PER_CPU_VAR(kernel_stack),reg ; \
12346 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12347 -
12348 +/* how to get the current stack pointer from C */
12349 +register unsigned long current_stack_pointer asm("rsp") __used;
12350 #endif
12351
12352 #endif /* !X86_32 */
12353 @@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
12354 extern void free_thread_info(struct thread_info *ti);
12355 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12356 #define arch_task_cache_init arch_task_cache_init
12357 +
12358 +#define __HAVE_THREAD_FUNCTIONS
12359 +#define task_thread_info(task) (&(task)->tinfo)
12360 +#define task_stack_page(task) ((task)->stack)
12361 +#define setup_thread_stack(p, org) do {} while (0)
12362 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12363 +
12364 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12365 +extern struct task_struct *alloc_task_struct(void);
12366 +extern void free_task_struct(struct task_struct *);
12367 +
12368 #endif
12369 #endif /* _ASM_X86_THREAD_INFO_H */
12370 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12371 index 61c5874..8a046e9 100644
12372 --- a/arch/x86/include/asm/uaccess.h
12373 +++ b/arch/x86/include/asm/uaccess.h
12374 @@ -8,12 +8,15 @@
12375 #include <linux/thread_info.h>
12376 #include <linux/prefetch.h>
12377 #include <linux/string.h>
12378 +#include <linux/sched.h>
12379 #include <asm/asm.h>
12380 #include <asm/page.h>
12381
12382 #define VERIFY_READ 0
12383 #define VERIFY_WRITE 1
12384
12385 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
12386 +
12387 /*
12388 * The fs value determines whether argument validity checking should be
12389 * performed or not. If get_fs() == USER_DS, checking is performed, with
12390 @@ -29,7 +32,12 @@
12391
12392 #define get_ds() (KERNEL_DS)
12393 #define get_fs() (current_thread_info()->addr_limit)
12394 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12395 +void __set_fs(mm_segment_t x);
12396 +void set_fs(mm_segment_t x);
12397 +#else
12398 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12399 +#endif
12400
12401 #define segment_eq(a, b) ((a).seg == (b).seg)
12402
12403 @@ -77,7 +85,33 @@
12404 * checks that the pointer is in the user space range - after calling
12405 * this function, memory access functions may still return -EFAULT.
12406 */
12407 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12408 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12409 +#define access_ok(type, addr, size) \
12410 +({ \
12411 + long __size = size; \
12412 + unsigned long __addr = (unsigned long)addr; \
12413 + unsigned long __addr_ao = __addr & PAGE_MASK; \
12414 + unsigned long __end_ao = __addr + __size - 1; \
12415 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12416 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12417 + while(__addr_ao <= __end_ao) { \
12418 + char __c_ao; \
12419 + __addr_ao += PAGE_SIZE; \
12420 + if (__size > PAGE_SIZE) \
12421 + cond_resched(); \
12422 + if (__get_user(__c_ao, (char __user *)__addr)) \
12423 + break; \
12424 + if (type != VERIFY_WRITE) { \
12425 + __addr = __addr_ao; \
12426 + continue; \
12427 + } \
12428 + if (__put_user(__c_ao, (char __user *)__addr)) \
12429 + break; \
12430 + __addr = __addr_ao; \
12431 + } \
12432 + } \
12433 + __ret_ao; \
12434 +})
12435
12436 /*
12437 * The exception table consists of pairs of addresses: the first is the
12438 @@ -183,12 +217,20 @@ extern int __get_user_bad(void);
12439 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12440 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12441
12442 -
12443 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12444 +#define __copyuser_seg "gs;"
12445 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12446 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12447 +#else
12448 +#define __copyuser_seg
12449 +#define __COPYUSER_SET_ES
12450 +#define __COPYUSER_RESTORE_ES
12451 +#endif
12452
12453 #ifdef CONFIG_X86_32
12454 #define __put_user_asm_u64(x, addr, err, errret) \
12455 - asm volatile("1: movl %%eax,0(%2)\n" \
12456 - "2: movl %%edx,4(%2)\n" \
12457 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12458 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12459 "3:\n" \
12460 ".section .fixup,\"ax\"\n" \
12461 "4: movl %3,%0\n" \
12462 @@ -200,8 +242,8 @@ extern int __get_user_bad(void);
12463 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12464
12465 #define __put_user_asm_ex_u64(x, addr) \
12466 - asm volatile("1: movl %%eax,0(%1)\n" \
12467 - "2: movl %%edx,4(%1)\n" \
12468 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12469 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12470 "3:\n" \
12471 _ASM_EXTABLE(1b, 2b - 1b) \
12472 _ASM_EXTABLE(2b, 3b - 2b) \
12473 @@ -253,7 +295,7 @@ extern void __put_user_8(void);
12474 __typeof__(*(ptr)) __pu_val; \
12475 __chk_user_ptr(ptr); \
12476 might_fault(); \
12477 - __pu_val = x; \
12478 + __pu_val = (x); \
12479 switch (sizeof(*(ptr))) { \
12480 case 1: \
12481 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12482 @@ -374,7 +416,7 @@ do { \
12483 } while (0)
12484
12485 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12486 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12487 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12488 "2:\n" \
12489 ".section .fixup,\"ax\"\n" \
12490 "3: mov %3,%0\n" \
12491 @@ -382,7 +424,7 @@ do { \
12492 " jmp 2b\n" \
12493 ".previous\n" \
12494 _ASM_EXTABLE(1b, 3b) \
12495 - : "=r" (err), ltype(x) \
12496 + : "=r" (err), ltype (x) \
12497 : "m" (__m(addr)), "i" (errret), "0" (err))
12498
12499 #define __get_user_size_ex(x, ptr, size) \
12500 @@ -407,7 +449,7 @@ do { \
12501 } while (0)
12502
12503 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12504 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12505 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12506 "2:\n" \
12507 _ASM_EXTABLE(1b, 2b - 1b) \
12508 : ltype(x) : "m" (__m(addr)))
12509 @@ -424,13 +466,24 @@ do { \
12510 int __gu_err; \
12511 unsigned long __gu_val; \
12512 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12513 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
12514 + (x) = (__typeof__(*(ptr)))__gu_val; \
12515 __gu_err; \
12516 })
12517
12518 /* FIXME: this hack is definitely wrong -AK */
12519 struct __large_struct { unsigned long buf[100]; };
12520 -#define __m(x) (*(struct __large_struct __user *)(x))
12521 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12522 +#define ____m(x) \
12523 +({ \
12524 + unsigned long ____x = (unsigned long)(x); \
12525 + if (____x < PAX_USER_SHADOW_BASE) \
12526 + ____x += PAX_USER_SHADOW_BASE; \
12527 + (void __user *)____x; \
12528 +})
12529 +#else
12530 +#define ____m(x) (x)
12531 +#endif
12532 +#define __m(x) (*(struct __large_struct __user *)____m(x))
12533
12534 /*
12535 * Tell gcc we read from memory instead of writing: this is because
12536 @@ -438,7 +491,7 @@ struct __large_struct { unsigned long buf[100]; };
12537 * aliasing issues.
12538 */
12539 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12540 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12541 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12542 "2:\n" \
12543 ".section .fixup,\"ax\"\n" \
12544 "3: mov %3,%0\n" \
12545 @@ -446,10 +499,10 @@ struct __large_struct { unsigned long buf[100]; };
12546 ".previous\n" \
12547 _ASM_EXTABLE(1b, 3b) \
12548 : "=r"(err) \
12549 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12550 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12551
12552 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12553 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12554 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12555 "2:\n" \
12556 _ASM_EXTABLE(1b, 2b - 1b) \
12557 : : ltype(x), "m" (__m(addr)))
12558 @@ -488,8 +541,12 @@ struct __large_struct { unsigned long buf[100]; };
12559 * On error, the variable @x is set to zero.
12560 */
12561
12562 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12563 +#define __get_user(x, ptr) get_user((x), (ptr))
12564 +#else
12565 #define __get_user(x, ptr) \
12566 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12567 +#endif
12568
12569 /**
12570 * __put_user: - Write a simple value into user space, with less checking.
12571 @@ -511,8 +568,12 @@ struct __large_struct { unsigned long buf[100]; };
12572 * Returns zero on success, or -EFAULT on error.
12573 */
12574
12575 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12576 +#define __put_user(x, ptr) put_user((x), (ptr))
12577 +#else
12578 #define __put_user(x, ptr) \
12579 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12580 +#endif
12581
12582 #define __get_user_unaligned __get_user
12583 #define __put_user_unaligned __put_user
12584 @@ -530,7 +591,7 @@ struct __large_struct { unsigned long buf[100]; };
12585 #define get_user_ex(x, ptr) do { \
12586 unsigned long __gue_val; \
12587 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12588 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
12589 + (x) = (__typeof__(*(ptr)))__gue_val; \
12590 } while (0)
12591
12592 #ifdef CONFIG_X86_WP_WORKS_OK
12593 @@ -567,6 +628,7 @@ extern struct movsl_mask {
12594
12595 #define ARCH_HAS_NOCACHE_UACCESS 1
12596
12597 +#define ARCH_HAS_SORT_EXTABLE
12598 #ifdef CONFIG_X86_32
12599 # include "uaccess_32.h"
12600 #else
12601 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12602 index 632fb44..e30e334 100644
12603 --- a/arch/x86/include/asm/uaccess_32.h
12604 +++ b/arch/x86/include/asm/uaccess_32.h
12605 @@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12606 static __always_inline unsigned long __must_check
12607 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12608 {
12609 + pax_track_stack();
12610 +
12611 + if ((long)n < 0)
12612 + return n;
12613 +
12614 if (__builtin_constant_p(n)) {
12615 unsigned long ret;
12616
12617 @@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12618 return ret;
12619 }
12620 }
12621 + if (!__builtin_constant_p(n))
12622 + check_object_size(from, n, true);
12623 return __copy_to_user_ll(to, from, n);
12624 }
12625
12626 @@ -83,12 +90,16 @@ static __always_inline unsigned long __must_check
12627 __copy_to_user(void __user *to, const void *from, unsigned long n)
12628 {
12629 might_fault();
12630 +
12631 return __copy_to_user_inatomic(to, from, n);
12632 }
12633
12634 static __always_inline unsigned long
12635 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12636 {
12637 + if ((long)n < 0)
12638 + return n;
12639 +
12640 /* Avoid zeroing the tail if the copy fails..
12641 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12642 * but as the zeroing behaviour is only significant when n is not
12643 @@ -138,6 +149,12 @@ static __always_inline unsigned long
12644 __copy_from_user(void *to, const void __user *from, unsigned long n)
12645 {
12646 might_fault();
12647 +
12648 + pax_track_stack();
12649 +
12650 + if ((long)n < 0)
12651 + return n;
12652 +
12653 if (__builtin_constant_p(n)) {
12654 unsigned long ret;
12655
12656 @@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12657 return ret;
12658 }
12659 }
12660 + if (!__builtin_constant_p(n))
12661 + check_object_size(to, n, false);
12662 return __copy_from_user_ll(to, from, n);
12663 }
12664
12665 @@ -160,6 +179,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12666 const void __user *from, unsigned long n)
12667 {
12668 might_fault();
12669 +
12670 + if ((long)n < 0)
12671 + return n;
12672 +
12673 if (__builtin_constant_p(n)) {
12674 unsigned long ret;
12675
12676 @@ -182,14 +205,62 @@ static __always_inline unsigned long
12677 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12678 unsigned long n)
12679 {
12680 - return __copy_from_user_ll_nocache_nozero(to, from, n);
12681 + if ((long)n < 0)
12682 + return n;
12683 +
12684 + return __copy_from_user_ll_nocache_nozero(to, from, n);
12685 +}
12686 +
12687 +/**
12688 + * copy_to_user: - Copy a block of data into user space.
12689 + * @to: Destination address, in user space.
12690 + * @from: Source address, in kernel space.
12691 + * @n: Number of bytes to copy.
12692 + *
12693 + * Context: User context only. This function may sleep.
12694 + *
12695 + * Copy data from kernel space to user space.
12696 + *
12697 + * Returns number of bytes that could not be copied.
12698 + * On success, this will be zero.
12699 + */
12700 +static __always_inline unsigned long __must_check
12701 +copy_to_user(void __user *to, const void *from, unsigned long n)
12702 +{
12703 + if (access_ok(VERIFY_WRITE, to, n))
12704 + n = __copy_to_user(to, from, n);
12705 + return n;
12706 +}
12707 +
12708 +/**
12709 + * copy_from_user: - Copy a block of data from user space.
12710 + * @to: Destination address, in kernel space.
12711 + * @from: Source address, in user space.
12712 + * @n: Number of bytes to copy.
12713 + *
12714 + * Context: User context only. This function may sleep.
12715 + *
12716 + * Copy data from user space to kernel space.
12717 + *
12718 + * Returns number of bytes that could not be copied.
12719 + * On success, this will be zero.
12720 + *
12721 + * If some data could not be copied, this function will pad the copied
12722 + * data to the requested size using zero bytes.
12723 + */
12724 +static __always_inline unsigned long __must_check
12725 +copy_from_user(void *to, const void __user *from, unsigned long n)
12726 +{
12727 + if (access_ok(VERIFY_READ, from, n))
12728 + n = __copy_from_user(to, from, n);
12729 + else if ((long)n > 0) {
12730 + if (!__builtin_constant_p(n))
12731 + check_object_size(to, n, false);
12732 + memset(to, 0, n);
12733 + }
12734 + return n;
12735 }
12736
12737 -unsigned long __must_check copy_to_user(void __user *to,
12738 - const void *from, unsigned long n);
12739 -unsigned long __must_check copy_from_user(void *to,
12740 - const void __user *from,
12741 - unsigned long n);
12742 long __must_check strncpy_from_user(char *dst, const char __user *src,
12743 long count);
12744 long __must_check __strncpy_from_user(char *dst,
12745 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
12746 index db24b21..f595ae7 100644
12747 --- a/arch/x86/include/asm/uaccess_64.h
12748 +++ b/arch/x86/include/asm/uaccess_64.h
12749 @@ -9,6 +9,9 @@
12750 #include <linux/prefetch.h>
12751 #include <linux/lockdep.h>
12752 #include <asm/page.h>
12753 +#include <asm/pgtable.h>
12754 +
12755 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
12756
12757 /*
12758 * Copy To/From Userspace
12759 @@ -16,116 +19,205 @@
12760
12761 /* Handles exceptions in both to and from, but doesn't do access_ok */
12762 __must_check unsigned long
12763 -copy_user_generic(void *to, const void *from, unsigned len);
12764 +copy_user_generic(void *to, const void *from, unsigned long len);
12765
12766 __must_check unsigned long
12767 -copy_to_user(void __user *to, const void *from, unsigned len);
12768 -__must_check unsigned long
12769 -copy_from_user(void *to, const void __user *from, unsigned len);
12770 -__must_check unsigned long
12771 -copy_in_user(void __user *to, const void __user *from, unsigned len);
12772 +copy_in_user(void __user *to, const void __user *from, unsigned long len);
12773
12774 static __always_inline __must_check
12775 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
12776 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
12777 {
12778 - int ret = 0;
12779 + unsigned ret = 0;
12780
12781 might_fault();
12782 - if (!__builtin_constant_p(size))
12783 - return copy_user_generic(dst, (__force void *)src, size);
12784 +
12785 + if (size > INT_MAX)
12786 + return size;
12787 +
12788 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12789 + if (!__access_ok(VERIFY_READ, src, size))
12790 + return size;
12791 +#endif
12792 +
12793 + if (!__builtin_constant_p(size)) {
12794 + check_object_size(dst, size, false);
12795 +
12796 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12797 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12798 + src += PAX_USER_SHADOW_BASE;
12799 +#endif
12800 +
12801 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12802 + }
12803 switch (size) {
12804 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
12805 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
12806 ret, "b", "b", "=q", 1);
12807 return ret;
12808 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
12809 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
12810 ret, "w", "w", "=r", 2);
12811 return ret;
12812 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
12813 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
12814 ret, "l", "k", "=r", 4);
12815 return ret;
12816 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
12817 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12818 ret, "q", "", "=r", 8);
12819 return ret;
12820 case 10:
12821 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12822 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12823 ret, "q", "", "=r", 10);
12824 if (unlikely(ret))
12825 return ret;
12826 __get_user_asm(*(u16 *)(8 + (char *)dst),
12827 - (u16 __user *)(8 + (char __user *)src),
12828 + (const u16 __user *)(8 + (const char __user *)src),
12829 ret, "w", "w", "=r", 2);
12830 return ret;
12831 case 16:
12832 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12833 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12834 ret, "q", "", "=r", 16);
12835 if (unlikely(ret))
12836 return ret;
12837 __get_user_asm(*(u64 *)(8 + (char *)dst),
12838 - (u64 __user *)(8 + (char __user *)src),
12839 + (const u64 __user *)(8 + (const char __user *)src),
12840 ret, "q", "", "=r", 8);
12841 return ret;
12842 default:
12843 - return copy_user_generic(dst, (__force void *)src, size);
12844 +
12845 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12846 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12847 + src += PAX_USER_SHADOW_BASE;
12848 +#endif
12849 +
12850 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12851 }
12852 }
12853
12854 static __always_inline __must_check
12855 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
12856 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
12857 {
12858 - int ret = 0;
12859 + unsigned ret = 0;
12860
12861 might_fault();
12862 - if (!__builtin_constant_p(size))
12863 - return copy_user_generic((__force void *)dst, src, size);
12864 +
12865 + pax_track_stack();
12866 +
12867 + if (size > INT_MAX)
12868 + return size;
12869 +
12870 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12871 + if (!__access_ok(VERIFY_WRITE, dst, size))
12872 + return size;
12873 +#endif
12874 +
12875 + if (!__builtin_constant_p(size)) {
12876 + check_object_size(src, size, true);
12877 +
12878 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12879 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12880 + dst += PAX_USER_SHADOW_BASE;
12881 +#endif
12882 +
12883 + return copy_user_generic((__force_kernel void *)dst, src, size);
12884 + }
12885 switch (size) {
12886 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
12887 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
12888 ret, "b", "b", "iq", 1);
12889 return ret;
12890 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
12891 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
12892 ret, "w", "w", "ir", 2);
12893 return ret;
12894 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
12895 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
12896 ret, "l", "k", "ir", 4);
12897 return ret;
12898 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
12899 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12900 ret, "q", "", "er", 8);
12901 return ret;
12902 case 10:
12903 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12904 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12905 ret, "q", "", "er", 10);
12906 if (unlikely(ret))
12907 return ret;
12908 asm("":::"memory");
12909 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
12910 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
12911 ret, "w", "w", "ir", 2);
12912 return ret;
12913 case 16:
12914 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12915 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12916 ret, "q", "", "er", 16);
12917 if (unlikely(ret))
12918 return ret;
12919 asm("":::"memory");
12920 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
12921 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
12922 ret, "q", "", "er", 8);
12923 return ret;
12924 default:
12925 - return copy_user_generic((__force void *)dst, src, size);
12926 +
12927 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12928 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12929 + dst += PAX_USER_SHADOW_BASE;
12930 +#endif
12931 +
12932 + return copy_user_generic((__force_kernel void *)dst, src, size);
12933 + }
12934 +}
12935 +
12936 +static __always_inline __must_check
12937 +unsigned long copy_to_user(void __user *to, const void *from, unsigned long len)
12938 +{
12939 + if (access_ok(VERIFY_WRITE, to, len))
12940 + len = __copy_to_user(to, from, len);
12941 + return len;
12942 +}
12943 +
12944 +static __always_inline __must_check
12945 +unsigned long copy_from_user(void *to, const void __user *from, unsigned long len)
12946 +{
12947 + might_fault();
12948 +
12949 + if (access_ok(VERIFY_READ, from, len))
12950 + len = __copy_from_user(to, from, len);
12951 + else if (len < INT_MAX) {
12952 + if (!__builtin_constant_p(len))
12953 + check_object_size(to, len, false);
12954 + memset(to, 0, len);
12955 }
12956 + return len;
12957 }
12958
12959 static __always_inline __must_check
12960 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12961 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
12962 {
12963 - int ret = 0;
12964 + unsigned ret = 0;
12965
12966 might_fault();
12967 - if (!__builtin_constant_p(size))
12968 - return copy_user_generic((__force void *)dst,
12969 - (__force void *)src, size);
12970 +
12971 + pax_track_stack();
12972 +
12973 + if (size > INT_MAX)
12974 + return size;
12975 +
12976 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12977 + if (!__access_ok(VERIFY_READ, src, size))
12978 + return size;
12979 + if (!__access_ok(VERIFY_WRITE, dst, size))
12980 + return size;
12981 +#endif
12982 +
12983 + if (!__builtin_constant_p(size)) {
12984 +
12985 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12986 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12987 + src += PAX_USER_SHADOW_BASE;
12988 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12989 + dst += PAX_USER_SHADOW_BASE;
12990 +#endif
12991 +
12992 + return copy_user_generic((__force_kernel void *)dst,
12993 + (__force_kernel const void *)src, size);
12994 + }
12995 switch (size) {
12996 case 1: {
12997 u8 tmp;
12998 - __get_user_asm(tmp, (u8 __user *)src,
12999 + __get_user_asm(tmp, (const u8 __user *)src,
13000 ret, "b", "b", "=q", 1);
13001 if (likely(!ret))
13002 __put_user_asm(tmp, (u8 __user *)dst,
13003 @@ -134,7 +226,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13004 }
13005 case 2: {
13006 u16 tmp;
13007 - __get_user_asm(tmp, (u16 __user *)src,
13008 + __get_user_asm(tmp, (const u16 __user *)src,
13009 ret, "w", "w", "=r", 2);
13010 if (likely(!ret))
13011 __put_user_asm(tmp, (u16 __user *)dst,
13012 @@ -144,7 +236,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13013
13014 case 4: {
13015 u32 tmp;
13016 - __get_user_asm(tmp, (u32 __user *)src,
13017 + __get_user_asm(tmp, (const u32 __user *)src,
13018 ret, "l", "k", "=r", 4);
13019 if (likely(!ret))
13020 __put_user_asm(tmp, (u32 __user *)dst,
13021 @@ -153,7 +245,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13022 }
13023 case 8: {
13024 u64 tmp;
13025 - __get_user_asm(tmp, (u64 __user *)src,
13026 + __get_user_asm(tmp, (const u64 __user *)src,
13027 ret, "q", "", "=r", 8);
13028 if (likely(!ret))
13029 __put_user_asm(tmp, (u64 __user *)dst,
13030 @@ -161,8 +253,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13031 return ret;
13032 }
13033 default:
13034 - return copy_user_generic((__force void *)dst,
13035 - (__force void *)src, size);
13036 +
13037 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13038 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13039 + src += PAX_USER_SHADOW_BASE;
13040 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13041 + dst += PAX_USER_SHADOW_BASE;
13042 +#endif
13043 +
13044 + return copy_user_generic((__force_kernel void *)dst,
13045 + (__force_kernel const void *)src, size);
13046 }
13047 }
13048
13049 @@ -176,33 +276,75 @@ __must_check long strlen_user(const char __user *str);
13050 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
13051 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
13052
13053 -__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
13054 - unsigned size);
13055 +static __must_check __always_inline unsigned long
13056 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
13057 +{
13058 + pax_track_stack();
13059 +
13060 + if (size > INT_MAX)
13061 + return size;
13062 +
13063 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13064 + if (!__access_ok(VERIFY_READ, src, size))
13065 + return size;
13066
13067 -static __must_check __always_inline int
13068 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
13069 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13070 + src += PAX_USER_SHADOW_BASE;
13071 +#endif
13072 +
13073 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13074 +}
13075 +
13076 +static __must_check __always_inline unsigned long
13077 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
13078 {
13079 - return copy_user_generic((__force void *)dst, src, size);
13080 + if (size > INT_MAX)
13081 + return size;
13082 +
13083 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13084 + if (!__access_ok(VERIFY_WRITE, dst, size))
13085 + return size;
13086 +
13087 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13088 + dst += PAX_USER_SHADOW_BASE;
13089 +#endif
13090 +
13091 + return copy_user_generic((__force_kernel void *)dst, src, size);
13092 }
13093
13094 -extern long __copy_user_nocache(void *dst, const void __user *src,
13095 - unsigned size, int zerorest);
13096 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
13097 + unsigned long size, int zerorest);
13098
13099 -static inline int
13100 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
13101 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
13102 {
13103 might_sleep();
13104 +
13105 + if (size > INT_MAX)
13106 + return size;
13107 +
13108 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13109 + if (!__access_ok(VERIFY_READ, src, size))
13110 + return size;
13111 +#endif
13112 +
13113 return __copy_user_nocache(dst, src, size, 1);
13114 }
13115
13116 -static inline int
13117 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13118 - unsigned size)
13119 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13120 + unsigned long size)
13121 {
13122 + if (size > INT_MAX)
13123 + return size;
13124 +
13125 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13126 + if (!__access_ok(VERIFY_READ, src, size))
13127 + return size;
13128 +#endif
13129 +
13130 return __copy_user_nocache(dst, src, size, 0);
13131 }
13132
13133 -unsigned long
13134 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
13135 +extern unsigned long
13136 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
13137
13138 #endif /* _ASM_X86_UACCESS_64_H */
13139 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
13140 index 9064052..786cfbc 100644
13141 --- a/arch/x86/include/asm/vdso.h
13142 +++ b/arch/x86/include/asm/vdso.h
13143 @@ -25,7 +25,7 @@ extern const char VDSO32_PRELINK[];
13144 #define VDSO32_SYMBOL(base, name) \
13145 ({ \
13146 extern const char VDSO32_##name[]; \
13147 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13148 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13149 })
13150 #endif
13151
13152 diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
13153 index 3d61e20..9507180 100644
13154 --- a/arch/x86/include/asm/vgtod.h
13155 +++ b/arch/x86/include/asm/vgtod.h
13156 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
13157 int sysctl_enabled;
13158 struct timezone sys_tz;
13159 struct { /* extract of a clocksource struct */
13160 + char name[8];
13161 cycle_t (*vread)(void);
13162 cycle_t cycle_last;
13163 cycle_t mask;
13164 diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h
13165 index 61e08c0..b0da582 100644
13166 --- a/arch/x86/include/asm/vmi.h
13167 +++ b/arch/x86/include/asm/vmi.h
13168 @@ -191,6 +191,7 @@ struct vrom_header {
13169 u8 reserved[96]; /* Reserved for headers */
13170 char vmi_init[8]; /* VMI_Init jump point */
13171 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
13172 + char rom_data[8048]; /* rest of the option ROM */
13173 } __attribute__((packed));
13174
13175 struct pnp_header {
13176 diff --git a/arch/x86/include/asm/vmi_time.h b/arch/x86/include/asm/vmi_time.h
13177 index c6e0bee..fcb9f74 100644
13178 --- a/arch/x86/include/asm/vmi_time.h
13179 +++ b/arch/x86/include/asm/vmi_time.h
13180 @@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
13181 int (*wallclock_updated)(void);
13182 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
13183 void (*cancel_alarm)(u32 flags);
13184 -} vmi_timer_ops;
13185 +} __no_const vmi_timer_ops;
13186
13187 /* Prototypes */
13188 extern void __init vmi_time_init(void);
13189 diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
13190 index d0983d2..1f7c9e9 100644
13191 --- a/arch/x86/include/asm/vsyscall.h
13192 +++ b/arch/x86/include/asm/vsyscall.h
13193 @@ -15,9 +15,10 @@ enum vsyscall_num {
13194
13195 #ifdef __KERNEL__
13196 #include <linux/seqlock.h>
13197 +#include <linux/getcpu.h>
13198 +#include <linux/time.h>
13199
13200 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
13201 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
13202
13203 /* Definitions for CONFIG_GENERIC_TIME definitions */
13204 #define __section_vsyscall_gtod_data __attribute__ \
13205 @@ -31,7 +32,6 @@ enum vsyscall_num {
13206 #define VGETCPU_LSL 2
13207
13208 extern int __vgetcpu_mode;
13209 -extern volatile unsigned long __jiffies;
13210
13211 /* kernel space (writeable) */
13212 extern int vgetcpu_mode;
13213 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
13214
13215 extern void map_vsyscall(void);
13216
13217 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
13218 +extern time_t vtime(time_t *t);
13219 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
13220 #endif /* __KERNEL__ */
13221
13222 #endif /* _ASM_X86_VSYSCALL_H */
13223 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
13224 index 2c756fd..3377e37 100644
13225 --- a/arch/x86/include/asm/x86_init.h
13226 +++ b/arch/x86/include/asm/x86_init.h
13227 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
13228 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13229 void (*find_smp_config)(unsigned int reserve);
13230 void (*get_smp_config)(unsigned int early);
13231 -};
13232 +} __no_const;
13233
13234 /**
13235 * struct x86_init_resources - platform specific resource related ops
13236 @@ -42,7 +42,7 @@ struct x86_init_resources {
13237 void (*probe_roms)(void);
13238 void (*reserve_resources)(void);
13239 char *(*memory_setup)(void);
13240 -};
13241 +} __no_const;
13242
13243 /**
13244 * struct x86_init_irqs - platform specific interrupt setup
13245 @@ -55,7 +55,7 @@ struct x86_init_irqs {
13246 void (*pre_vector_init)(void);
13247 void (*intr_init)(void);
13248 void (*trap_init)(void);
13249 -};
13250 +} __no_const;
13251
13252 /**
13253 * struct x86_init_oem - oem platform specific customizing functions
13254 @@ -65,7 +65,7 @@ struct x86_init_irqs {
13255 struct x86_init_oem {
13256 void (*arch_setup)(void);
13257 void (*banner)(void);
13258 -};
13259 +} __no_const;
13260
13261 /**
13262 * struct x86_init_paging - platform specific paging functions
13263 @@ -75,7 +75,7 @@ struct x86_init_oem {
13264 struct x86_init_paging {
13265 void (*pagetable_setup_start)(pgd_t *base);
13266 void (*pagetable_setup_done)(pgd_t *base);
13267 -};
13268 +} __no_const;
13269
13270 /**
13271 * struct x86_init_timers - platform specific timer setup
13272 @@ -88,7 +88,7 @@ struct x86_init_timers {
13273 void (*setup_percpu_clockev)(void);
13274 void (*tsc_pre_init)(void);
13275 void (*timer_init)(void);
13276 -};
13277 +} __no_const;
13278
13279 /**
13280 * struct x86_init_ops - functions for platform specific setup
13281 @@ -101,7 +101,7 @@ struct x86_init_ops {
13282 struct x86_init_oem oem;
13283 struct x86_init_paging paging;
13284 struct x86_init_timers timers;
13285 -};
13286 +} __no_const;
13287
13288 /**
13289 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13290 @@ -109,7 +109,7 @@ struct x86_init_ops {
13291 */
13292 struct x86_cpuinit_ops {
13293 void (*setup_percpu_clockev)(void);
13294 -};
13295 +} __no_const;
13296
13297 /**
13298 * struct x86_platform_ops - platform specific runtime functions
13299 @@ -121,7 +121,7 @@ struct x86_platform_ops {
13300 unsigned long (*calibrate_tsc)(void);
13301 unsigned long (*get_wallclock)(void);
13302 int (*set_wallclock)(unsigned long nowtime);
13303 -};
13304 +} __no_const;
13305
13306 extern struct x86_init_ops x86_init;
13307 extern struct x86_cpuinit_ops x86_cpuinit;
13308 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13309 index 727acc1..554f3eb 100644
13310 --- a/arch/x86/include/asm/xsave.h
13311 +++ b/arch/x86/include/asm/xsave.h
13312 @@ -56,6 +56,12 @@ static inline int xrstor_checking(struct xsave_struct *fx)
13313 static inline int xsave_user(struct xsave_struct __user *buf)
13314 {
13315 int err;
13316 +
13317 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13318 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13319 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13320 +#endif
13321 +
13322 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
13323 "2:\n"
13324 ".section .fixup,\"ax\"\n"
13325 @@ -78,10 +84,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13326 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13327 {
13328 int err;
13329 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13330 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13331 u32 lmask = mask;
13332 u32 hmask = mask >> 32;
13333
13334 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13335 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13336 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13337 +#endif
13338 +
13339 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13340 "2:\n"
13341 ".section .fixup,\"ax\"\n"
13342 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13343 index 6a564ac..9b1340c 100644
13344 --- a/arch/x86/kernel/acpi/realmode/Makefile
13345 +++ b/arch/x86/kernel/acpi/realmode/Makefile
13346 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13347 $(call cc-option, -fno-stack-protector) \
13348 $(call cc-option, -mpreferred-stack-boundary=2)
13349 KBUILD_CFLAGS += $(call cc-option, -m32)
13350 +ifdef CONSTIFY_PLUGIN
13351 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13352 +endif
13353 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13354 GCOV_PROFILE := n
13355
13356 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13357 index 580b4e2..d4129e4 100644
13358 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
13359 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13360 @@ -91,6 +91,9 @@ _start:
13361 /* Do any other stuff... */
13362
13363 #ifndef CONFIG_64BIT
13364 + /* Recheck NX bit overrides (64bit path does this in trampoline) */
13365 + call verify_cpu
13366 +
13367 /* This could also be done in C code... */
13368 movl pmode_cr3, %eax
13369 movl %eax, %cr3
13370 @@ -104,7 +107,7 @@ _start:
13371 movl %eax, %ecx
13372 orl %edx, %ecx
13373 jz 1f
13374 - movl $0xc0000080, %ecx
13375 + mov $MSR_EFER, %ecx
13376 wrmsr
13377 1:
13378
13379 @@ -114,6 +117,7 @@ _start:
13380 movl pmode_cr0, %eax
13381 movl %eax, %cr0
13382 jmp pmode_return
13383 +# include "../../verify_cpu.S"
13384 #else
13385 pushw $0
13386 pushw trampoline_segment
13387 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13388 index ca93638..7042f24 100644
13389 --- a/arch/x86/kernel/acpi/sleep.c
13390 +++ b/arch/x86/kernel/acpi/sleep.c
13391 @@ -11,11 +11,12 @@
13392 #include <linux/cpumask.h>
13393 #include <asm/segment.h>
13394 #include <asm/desc.h>
13395 +#include <asm/e820.h>
13396
13397 #include "realmode/wakeup.h"
13398 #include "sleep.h"
13399
13400 -unsigned long acpi_wakeup_address;
13401 +unsigned long acpi_wakeup_address = 0x2000;
13402 unsigned long acpi_realmode_flags;
13403
13404 /* address in low memory of the wakeup routine. */
13405 @@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
13406 #else /* CONFIG_64BIT */
13407 header->trampoline_segment = setup_trampoline() >> 4;
13408 #ifdef CONFIG_SMP
13409 - stack_start.sp = temp_stack + sizeof(temp_stack);
13410 + stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13411 +
13412 + pax_open_kernel();
13413 early_gdt_descr.address =
13414 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13415 + pax_close_kernel();
13416 +
13417 initial_gs = per_cpu_offset(smp_processor_id());
13418 #endif
13419 initial_code = (unsigned long)wakeup_long64;
13420 @@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
13421 return;
13422 }
13423
13424 - acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
13425 -
13426 - if (!acpi_realmode) {
13427 - printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
13428 - return;
13429 - }
13430 -
13431 - acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
13432 + reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
13433 + acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
13434 }
13435
13436
13437 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13438 index 8ded418..079961e 100644
13439 --- a/arch/x86/kernel/acpi/wakeup_32.S
13440 +++ b/arch/x86/kernel/acpi/wakeup_32.S
13441 @@ -30,13 +30,11 @@ wakeup_pmode_return:
13442 # and restore the stack ... but you need gdt for this to work
13443 movl saved_context_esp, %esp
13444
13445 - movl %cs:saved_magic, %eax
13446 - cmpl $0x12345678, %eax
13447 + cmpl $0x12345678, saved_magic
13448 jne bogus_magic
13449
13450 # jump to place where we left off
13451 - movl saved_eip, %eax
13452 - jmp *%eax
13453 + jmp *(saved_eip)
13454
13455 bogus_magic:
13456 jmp bogus_magic
13457 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13458 index de7353c..075da5f 100644
13459 --- a/arch/x86/kernel/alternative.c
13460 +++ b/arch/x86/kernel/alternative.c
13461 @@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13462
13463 BUG_ON(p->len > MAX_PATCH_LEN);
13464 /* prep the buffer with the original instructions */
13465 - memcpy(insnbuf, p->instr, p->len);
13466 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13467 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13468 (unsigned long)p->instr, p->len);
13469
13470 @@ -475,7 +475,7 @@ void __init alternative_instructions(void)
13471 if (smp_alt_once)
13472 free_init_pages("SMP alternatives",
13473 (unsigned long)__smp_locks,
13474 - (unsigned long)__smp_locks_end);
13475 + PAGE_ALIGN((unsigned long)__smp_locks_end));
13476
13477 restart_nmi();
13478 }
13479 @@ -492,13 +492,17 @@ void __init alternative_instructions(void)
13480 * instructions. And on the local CPU you need to be protected again NMI or MCE
13481 * handlers seeing an inconsistent instruction while you patch.
13482 */
13483 -static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13484 +static void *__kprobes text_poke_early(void *addr, const void *opcode,
13485 size_t len)
13486 {
13487 unsigned long flags;
13488 local_irq_save(flags);
13489 - memcpy(addr, opcode, len);
13490 +
13491 + pax_open_kernel();
13492 + memcpy(ktla_ktva(addr), opcode, len);
13493 sync_core();
13494 + pax_close_kernel();
13495 +
13496 local_irq_restore(flags);
13497 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13498 that causes hangs on some VIA CPUs. */
13499 @@ -520,35 +524,21 @@ static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13500 */
13501 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13502 {
13503 - unsigned long flags;
13504 - char *vaddr;
13505 + unsigned char *vaddr = ktla_ktva(addr);
13506 struct page *pages[2];
13507 - int i;
13508 + size_t i;
13509
13510 if (!core_kernel_text((unsigned long)addr)) {
13511 - pages[0] = vmalloc_to_page(addr);
13512 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13513 + pages[0] = vmalloc_to_page(vaddr);
13514 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13515 } else {
13516 - pages[0] = virt_to_page(addr);
13517 + pages[0] = virt_to_page(vaddr);
13518 WARN_ON(!PageReserved(pages[0]));
13519 - pages[1] = virt_to_page(addr + PAGE_SIZE);
13520 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13521 }
13522 BUG_ON(!pages[0]);
13523 - local_irq_save(flags);
13524 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13525 - if (pages[1])
13526 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13527 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13528 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13529 - clear_fixmap(FIX_TEXT_POKE0);
13530 - if (pages[1])
13531 - clear_fixmap(FIX_TEXT_POKE1);
13532 - local_flush_tlb();
13533 - sync_core();
13534 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
13535 - that causes hangs on some VIA CPUs. */
13536 + text_poke_early(addr, opcode, len);
13537 for (i = 0; i < len; i++)
13538 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13539 - local_irq_restore(flags);
13540 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13541 return addr;
13542 }
13543 diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
13544 index 3a44b75..1601800 100644
13545 --- a/arch/x86/kernel/amd_iommu.c
13546 +++ b/arch/x86/kernel/amd_iommu.c
13547 @@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(void)
13548 }
13549 }
13550
13551 -static struct dma_map_ops amd_iommu_dma_ops = {
13552 +static const struct dma_map_ops amd_iommu_dma_ops = {
13553 .alloc_coherent = alloc_coherent,
13554 .free_coherent = free_coherent,
13555 .map_page = map_page,
13556 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13557 index 1d2d670..8e3f477 100644
13558 --- a/arch/x86/kernel/apic/apic.c
13559 +++ b/arch/x86/kernel/apic/apic.c
13560 @@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
13561 /*
13562 * Debug level, exported for io_apic.c
13563 */
13564 -unsigned int apic_verbosity;
13565 +int apic_verbosity;
13566
13567 int pic_mode;
13568
13569 @@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13570 apic_write(APIC_ESR, 0);
13571 v1 = apic_read(APIC_ESR);
13572 ack_APIC_irq();
13573 - atomic_inc(&irq_err_count);
13574 + atomic_inc_unchecked(&irq_err_count);
13575
13576 /*
13577 * Here is what the APIC error bits mean:
13578 @@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(void)
13579 u16 *bios_cpu_apicid;
13580 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
13581
13582 + pax_track_stack();
13583 +
13584 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
13585 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
13586
13587 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13588 index 8928d97..f799cea 100644
13589 --- a/arch/x86/kernel/apic/io_apic.c
13590 +++ b/arch/x86/kernel/apic/io_apic.c
13591 @@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void)
13592 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
13593 GFP_ATOMIC);
13594 if (!ioapic_entries)
13595 - return 0;
13596 + return NULL;
13597
13598 for (apic = 0; apic < nr_ioapics; apic++) {
13599 ioapic_entries[apic] =
13600 @@ -733,7 +733,7 @@ nomem:
13601 kfree(ioapic_entries[apic]);
13602 kfree(ioapic_entries);
13603
13604 - return 0;
13605 + return NULL;
13606 }
13607
13608 /*
13609 @@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13610 }
13611 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13612
13613 -void lock_vector_lock(void)
13614 +void lock_vector_lock(void) __acquires(vector_lock)
13615 {
13616 /* Used to the online set of cpus does not change
13617 * during assign_irq_vector.
13618 @@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
13619 spin_lock(&vector_lock);
13620 }
13621
13622 -void unlock_vector_lock(void)
13623 +void unlock_vector_lock(void) __releases(vector_lock)
13624 {
13625 spin_unlock(&vector_lock);
13626 }
13627 @@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int irq)
13628 ack_APIC_irq();
13629 }
13630
13631 -atomic_t irq_mis_count;
13632 +atomic_unchecked_t irq_mis_count;
13633
13634 static void ack_apic_level(unsigned int irq)
13635 {
13636 @@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int irq)
13637
13638 /* Tail end of version 0x11 I/O APIC bug workaround */
13639 if (!(v & (1 << (i & 0x1f)))) {
13640 - atomic_inc(&irq_mis_count);
13641 + atomic_inc_unchecked(&irq_mis_count);
13642 spin_lock(&ioapic_lock);
13643 __mask_and_edge_IO_APIC_irq(cfg);
13644 __unmask_and_level_IO_APIC_irq(cfg);
13645 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13646 index 151ace6..f317474 100644
13647 --- a/arch/x86/kernel/apm_32.c
13648 +++ b/arch/x86/kernel/apm_32.c
13649 @@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
13650 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13651 * even though they are called in protected mode.
13652 */
13653 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13654 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13655 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13656
13657 static const char driver_version[] = "1.16ac"; /* no spaces */
13658 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
13659 BUG_ON(cpu != 0);
13660 gdt = get_cpu_gdt_table(cpu);
13661 save_desc_40 = gdt[0x40 / 8];
13662 +
13663 + pax_open_kernel();
13664 gdt[0x40 / 8] = bad_bios_desc;
13665 + pax_close_kernel();
13666
13667 apm_irq_save(flags);
13668 APM_DO_SAVE_SEGS;
13669 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
13670 &call->esi);
13671 APM_DO_RESTORE_SEGS;
13672 apm_irq_restore(flags);
13673 +
13674 + pax_open_kernel();
13675 gdt[0x40 / 8] = save_desc_40;
13676 + pax_close_kernel();
13677 +
13678 put_cpu();
13679
13680 return call->eax & 0xff;
13681 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
13682 BUG_ON(cpu != 0);
13683 gdt = get_cpu_gdt_table(cpu);
13684 save_desc_40 = gdt[0x40 / 8];
13685 +
13686 + pax_open_kernel();
13687 gdt[0x40 / 8] = bad_bios_desc;
13688 + pax_close_kernel();
13689
13690 apm_irq_save(flags);
13691 APM_DO_SAVE_SEGS;
13692 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
13693 &call->eax);
13694 APM_DO_RESTORE_SEGS;
13695 apm_irq_restore(flags);
13696 +
13697 + pax_open_kernel();
13698 gdt[0x40 / 8] = save_desc_40;
13699 + pax_close_kernel();
13700 +
13701 put_cpu();
13702 return error;
13703 }
13704 @@ -975,7 +989,7 @@ recalc:
13705
13706 static void apm_power_off(void)
13707 {
13708 - unsigned char po_bios_call[] = {
13709 + const unsigned char po_bios_call[] = {
13710 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
13711 0x8e, 0xd0, /* movw ax,ss */
13712 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
13713 @@ -2357,12 +2371,15 @@ static int __init apm_init(void)
13714 * code to that CPU.
13715 */
13716 gdt = get_cpu_gdt_table(0);
13717 +
13718 + pax_open_kernel();
13719 set_desc_base(&gdt[APM_CS >> 3],
13720 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
13721 set_desc_base(&gdt[APM_CS_16 >> 3],
13722 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
13723 set_desc_base(&gdt[APM_DS >> 3],
13724 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
13725 + pax_close_kernel();
13726
13727 proc_create("apm", 0, NULL, &apm_file_ops);
13728
13729 diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
13730 index dfdbf64..9b2b6ce 100644
13731 --- a/arch/x86/kernel/asm-offsets_32.c
13732 +++ b/arch/x86/kernel/asm-offsets_32.c
13733 @@ -51,7 +51,6 @@ void foo(void)
13734 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
13735 BLANK();
13736
13737 - OFFSET(TI_task, thread_info, task);
13738 OFFSET(TI_exec_domain, thread_info, exec_domain);
13739 OFFSET(TI_flags, thread_info, flags);
13740 OFFSET(TI_status, thread_info, status);
13741 @@ -60,6 +59,8 @@ void foo(void)
13742 OFFSET(TI_restart_block, thread_info, restart_block);
13743 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
13744 OFFSET(TI_cpu, thread_info, cpu);
13745 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
13746 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13747 BLANK();
13748
13749 OFFSET(GDS_size, desc_ptr, size);
13750 @@ -99,6 +100,7 @@ void foo(void)
13751
13752 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13753 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
13754 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13755 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
13756 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
13757 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
13758 @@ -115,6 +117,11 @@ void foo(void)
13759 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
13760 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13761 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13762 +
13763 +#ifdef CONFIG_PAX_KERNEXEC
13764 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13765 +#endif
13766 +
13767 #endif
13768
13769 #ifdef CONFIG_XEN
13770 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
13771 index 4a6aeed..371de20 100644
13772 --- a/arch/x86/kernel/asm-offsets_64.c
13773 +++ b/arch/x86/kernel/asm-offsets_64.c
13774 @@ -44,6 +44,8 @@ int main(void)
13775 ENTRY(addr_limit);
13776 ENTRY(preempt_count);
13777 ENTRY(status);
13778 + ENTRY(lowest_stack);
13779 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13780 #ifdef CONFIG_IA32_EMULATION
13781 ENTRY(sysenter_return);
13782 #endif
13783 @@ -63,6 +65,18 @@ int main(void)
13784 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13785 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
13786 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
13787 +
13788 +#ifdef CONFIG_PAX_KERNEXEC
13789 + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13790 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13791 +#endif
13792 +
13793 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13794 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
13795 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
13796 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
13797 +#endif
13798 +
13799 #endif
13800
13801
13802 @@ -115,6 +129,7 @@ int main(void)
13803 ENTRY(cr8);
13804 BLANK();
13805 #undef ENTRY
13806 + DEFINE(TSS_size, sizeof(struct tss_struct));
13807 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
13808 BLANK();
13809 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
13810 @@ -130,6 +145,7 @@ int main(void)
13811
13812 BLANK();
13813 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13814 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13815 #ifdef CONFIG_XEN
13816 BLANK();
13817 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
13818 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
13819 index ff502cc..dc5133e 100644
13820 --- a/arch/x86/kernel/cpu/Makefile
13821 +++ b/arch/x86/kernel/cpu/Makefile
13822 @@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
13823 CFLAGS_REMOVE_common.o = -pg
13824 endif
13825
13826 -# Make sure load_percpu_segment has no stackprotector
13827 -nostackp := $(call cc-option, -fno-stack-protector)
13828 -CFLAGS_common.o := $(nostackp)
13829 -
13830 obj-y := intel_cacheinfo.o addon_cpuid_features.o
13831 obj-y += proc.o capflags.o powerflags.o common.o
13832 obj-y += vmware.o hypervisor.o sched.o
13833 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
13834 index 6e082dc..a0b5f36 100644
13835 --- a/arch/x86/kernel/cpu/amd.c
13836 +++ b/arch/x86/kernel/cpu/amd.c
13837 @@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
13838 unsigned int size)
13839 {
13840 /* AMD errata T13 (order #21922) */
13841 - if ((c->x86 == 6)) {
13842 + if (c->x86 == 6) {
13843 /* Duron Rev A0 */
13844 if (c->x86_model == 3 && c->x86_mask == 0)
13845 size = 64;
13846 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
13847 index 4e34d10..ba6bc97 100644
13848 --- a/arch/x86/kernel/cpu/common.c
13849 +++ b/arch/x86/kernel/cpu/common.c
13850 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
13851
13852 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
13853
13854 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
13855 -#ifdef CONFIG_X86_64
13856 - /*
13857 - * We need valid kernel segments for data and code in long mode too
13858 - * IRET will check the segment types kkeil 2000/10/28
13859 - * Also sysret mandates a special GDT layout
13860 - *
13861 - * TLS descriptors are currently at a different place compared to i386.
13862 - * Hopefully nobody expects them at a fixed place (Wine?)
13863 - */
13864 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
13865 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
13866 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
13867 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
13868 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
13869 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
13870 -#else
13871 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
13872 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13873 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
13874 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
13875 - /*
13876 - * Segments used for calling PnP BIOS have byte granularity.
13877 - * They code segments and data segments have fixed 64k limits,
13878 - * the transfer segment sizes are set at run time.
13879 - */
13880 - /* 32-bit code */
13881 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13882 - /* 16-bit code */
13883 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13884 - /* 16-bit data */
13885 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
13886 - /* 16-bit data */
13887 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
13888 - /* 16-bit data */
13889 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
13890 - /*
13891 - * The APM segments have byte granularity and their bases
13892 - * are set at run time. All have 64k limits.
13893 - */
13894 - /* 32-bit code */
13895 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13896 - /* 16-bit code */
13897 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13898 - /* data */
13899 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
13900 -
13901 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13902 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13903 - GDT_STACK_CANARY_INIT
13904 -#endif
13905 -} };
13906 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
13907 -
13908 static int __init x86_xsave_setup(char *s)
13909 {
13910 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
13911 @@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
13912 {
13913 struct desc_ptr gdt_descr;
13914
13915 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
13916 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
13917 gdt_descr.size = GDT_SIZE - 1;
13918 load_gdt(&gdt_descr);
13919 /* Reload the per-cpu base */
13920 @@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
13921 /* Filter out anything that depends on CPUID levels we don't have */
13922 filter_cpuid_features(c, true);
13923
13924 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
13925 + setup_clear_cpu_cap(X86_FEATURE_SEP);
13926 +#endif
13927 +
13928 /* If the model name is still unset, do table lookup. */
13929 if (!c->x86_model_id[0]) {
13930 const char *p;
13931 @@ -980,6 +930,9 @@ static __init int setup_disablecpuid(char *arg)
13932 }
13933 __setup("clearcpuid=", setup_disablecpuid);
13934
13935 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
13936 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
13937 +
13938 #ifdef CONFIG_X86_64
13939 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
13940
13941 @@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
13942 EXPORT_PER_CPU_SYMBOL(current_task);
13943
13944 DEFINE_PER_CPU(unsigned long, kernel_stack) =
13945 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
13946 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
13947 EXPORT_PER_CPU_SYMBOL(kernel_stack);
13948
13949 DEFINE_PER_CPU(char *, irq_stack_ptr) =
13950 @@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
13951 {
13952 memset(regs, 0, sizeof(struct pt_regs));
13953 regs->fs = __KERNEL_PERCPU;
13954 - regs->gs = __KERNEL_STACK_CANARY;
13955 + savesegment(gs, regs->gs);
13956
13957 return regs;
13958 }
13959 @@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
13960 int i;
13961
13962 cpu = stack_smp_processor_id();
13963 - t = &per_cpu(init_tss, cpu);
13964 + t = init_tss + cpu;
13965 orig_ist = &per_cpu(orig_ist, cpu);
13966
13967 #ifdef CONFIG_NUMA
13968 @@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
13969 switch_to_new_gdt(cpu);
13970 loadsegment(fs, 0);
13971
13972 - load_idt((const struct desc_ptr *)&idt_descr);
13973 + load_idt(&idt_descr);
13974
13975 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
13976 syscall_init();
13977 @@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
13978 wrmsrl(MSR_KERNEL_GS_BASE, 0);
13979 barrier();
13980
13981 - check_efer();
13982 if (cpu != 0)
13983 enable_x2apic();
13984
13985 @@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
13986 {
13987 int cpu = smp_processor_id();
13988 struct task_struct *curr = current;
13989 - struct tss_struct *t = &per_cpu(init_tss, cpu);
13990 + struct tss_struct *t = init_tss + cpu;
13991 struct thread_struct *thread = &curr->thread;
13992
13993 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
13994 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
13995 index 6a77cca..4f4fca0 100644
13996 --- a/arch/x86/kernel/cpu/intel.c
13997 +++ b/arch/x86/kernel/cpu/intel.c
13998 @@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug(void)
13999 * Update the IDT descriptor and reload the IDT so that
14000 * it uses the read-only mapped virtual address.
14001 */
14002 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
14003 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
14004 load_idt(&idt_descr);
14005 }
14006 #endif
14007 diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
14008 index 417990f..96dc36b 100644
14009 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c
14010 +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
14011 @@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
14012 return ret;
14013 }
14014
14015 -static struct sysfs_ops sysfs_ops = {
14016 +static const struct sysfs_ops sysfs_ops = {
14017 .show = show,
14018 .store = store,
14019 };
14020 diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
14021 index 472763d..9831e11 100644
14022 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
14023 +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
14024 @@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *filp, const char __user *ubuf,
14025 static int inject_init(void)
14026 {
14027 printk(KERN_INFO "Machine check injector initialized\n");
14028 - mce_chrdev_ops.write = mce_write;
14029 + pax_open_kernel();
14030 + *(void **)&mce_chrdev_ops.write = mce_write;
14031 + pax_close_kernel();
14032 register_die_notifier(&mce_raise_nb);
14033 return 0;
14034 }
14035 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
14036 index 0f16a2b..21740f5 100644
14037 --- a/arch/x86/kernel/cpu/mcheck/mce.c
14038 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
14039 @@ -43,6 +43,7 @@
14040 #include <asm/ipi.h>
14041 #include <asm/mce.h>
14042 #include <asm/msr.h>
14043 +#include <asm/local.h>
14044
14045 #include "mce-internal.h"
14046
14047 @@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
14048 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
14049 m->cs, m->ip);
14050
14051 - if (m->cs == __KERNEL_CS)
14052 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
14053 print_symbol("{%s}", m->ip);
14054 pr_cont("\n");
14055 }
14056 @@ -221,10 +222,10 @@ static void print_mce_tail(void)
14057
14058 #define PANIC_TIMEOUT 5 /* 5 seconds */
14059
14060 -static atomic_t mce_paniced;
14061 +static atomic_unchecked_t mce_paniced;
14062
14063 static int fake_panic;
14064 -static atomic_t mce_fake_paniced;
14065 +static atomic_unchecked_t mce_fake_paniced;
14066
14067 /* Panic in progress. Enable interrupts and wait for final IPI */
14068 static void wait_for_panic(void)
14069 @@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14070 /*
14071 * Make sure only one CPU runs in machine check panic
14072 */
14073 - if (atomic_inc_return(&mce_paniced) > 1)
14074 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
14075 wait_for_panic();
14076 barrier();
14077
14078 @@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14079 console_verbose();
14080 } else {
14081 /* Don't log too much for fake panic */
14082 - if (atomic_inc_return(&mce_fake_paniced) > 1)
14083 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
14084 return;
14085 }
14086 print_mce_head();
14087 @@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
14088 * might have been modified by someone else.
14089 */
14090 rmb();
14091 - if (atomic_read(&mce_paniced))
14092 + if (atomic_read_unchecked(&mce_paniced))
14093 wait_for_panic();
14094 if (!monarch_timeout)
14095 goto out;
14096 @@ -1394,7 +1395,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
14097 }
14098
14099 /* Call the installed machine check handler for this CPU setup. */
14100 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
14101 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
14102 unexpected_machine_check;
14103
14104 /*
14105 @@ -1416,7 +1417,9 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
14106 return;
14107 }
14108
14109 + pax_open_kernel();
14110 machine_check_vector = do_machine_check;
14111 + pax_close_kernel();
14112
14113 mce_init();
14114 mce_cpu_features(c);
14115 @@ -1429,14 +1432,14 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
14116 */
14117
14118 static DEFINE_SPINLOCK(mce_state_lock);
14119 -static int open_count; /* #times opened */
14120 +static local_t open_count; /* #times opened */
14121 static int open_exclu; /* already open exclusive? */
14122
14123 static int mce_open(struct inode *inode, struct file *file)
14124 {
14125 spin_lock(&mce_state_lock);
14126
14127 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
14128 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
14129 spin_unlock(&mce_state_lock);
14130
14131 return -EBUSY;
14132 @@ -1444,7 +1447,7 @@ static int mce_open(struct inode *inode, struct file *file)
14133
14134 if (file->f_flags & O_EXCL)
14135 open_exclu = 1;
14136 - open_count++;
14137 + local_inc(&open_count);
14138
14139 spin_unlock(&mce_state_lock);
14140
14141 @@ -1455,7 +1458,7 @@ static int mce_release(struct inode *inode, struct file *file)
14142 {
14143 spin_lock(&mce_state_lock);
14144
14145 - open_count--;
14146 + local_dec(&open_count);
14147 open_exclu = 0;
14148
14149 spin_unlock(&mce_state_lock);
14150 @@ -2082,7 +2085,7 @@ struct dentry *mce_get_debugfs_dir(void)
14151 static void mce_reset(void)
14152 {
14153 cpu_missing = 0;
14154 - atomic_set(&mce_fake_paniced, 0);
14155 + atomic_set_unchecked(&mce_fake_paniced, 0);
14156 atomic_set(&mce_executing, 0);
14157 atomic_set(&mce_callin, 0);
14158 atomic_set(&global_nwo, 0);
14159 diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
14160 index ef3cd31..9d2f6ab 100644
14161 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
14162 +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
14163 @@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
14164 return ret;
14165 }
14166
14167 -static struct sysfs_ops threshold_ops = {
14168 +static const struct sysfs_ops threshold_ops = {
14169 .show = show,
14170 .store = store,
14171 };
14172 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
14173 index 5c0e653..0882b0a 100644
14174 --- a/arch/x86/kernel/cpu/mcheck/p5.c
14175 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
14176 @@ -12,6 +12,7 @@
14177 #include <asm/system.h>
14178 #include <asm/mce.h>
14179 #include <asm/msr.h>
14180 +#include <asm/pgtable.h>
14181
14182 /* By default disabled */
14183 int mce_p5_enabled __read_mostly;
14184 @@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
14185 if (!cpu_has(c, X86_FEATURE_MCE))
14186 return;
14187
14188 + pax_open_kernel();
14189 machine_check_vector = pentium_machine_check;
14190 + pax_close_kernel();
14191 /* Make sure the vector pointer is visible before we enable MCEs: */
14192 wmb();
14193
14194 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
14195 index 54060f5..c1a7577 100644
14196 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
14197 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
14198 @@ -11,6 +11,7 @@
14199 #include <asm/system.h>
14200 #include <asm/mce.h>
14201 #include <asm/msr.h>
14202 +#include <asm/pgtable.h>
14203
14204 /* Machine check handler for WinChip C6: */
14205 static void winchip_machine_check(struct pt_regs *regs, long error_code)
14206 @@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
14207 {
14208 u32 lo, hi;
14209
14210 + pax_open_kernel();
14211 machine_check_vector = winchip_machine_check;
14212 + pax_close_kernel();
14213 /* Make sure the vector pointer is visible before we enable MCEs: */
14214 wmb();
14215
14216 diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
14217 index 33af141..92ba9cd 100644
14218 --- a/arch/x86/kernel/cpu/mtrr/amd.c
14219 +++ b/arch/x86/kernel/cpu/mtrr/amd.c
14220 @@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
14221 return 0;
14222 }
14223
14224 -static struct mtrr_ops amd_mtrr_ops = {
14225 +static const struct mtrr_ops amd_mtrr_ops = {
14226 .vendor = X86_VENDOR_AMD,
14227 .set = amd_set_mtrr,
14228 .get = amd_get_mtrr,
14229 diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
14230 index de89f14..316fe3e 100644
14231 --- a/arch/x86/kernel/cpu/mtrr/centaur.c
14232 +++ b/arch/x86/kernel/cpu/mtrr/centaur.c
14233 @@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
14234 return 0;
14235 }
14236
14237 -static struct mtrr_ops centaur_mtrr_ops = {
14238 +static const struct mtrr_ops centaur_mtrr_ops = {
14239 .vendor = X86_VENDOR_CENTAUR,
14240 .set = centaur_set_mcr,
14241 .get = centaur_get_mcr,
14242 diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
14243 index 228d982..68a3343 100644
14244 --- a/arch/x86/kernel/cpu/mtrr/cyrix.c
14245 +++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
14246 @@ -265,7 +265,7 @@ static void cyrix_set_all(void)
14247 post_set();
14248 }
14249
14250 -static struct mtrr_ops cyrix_mtrr_ops = {
14251 +static const struct mtrr_ops cyrix_mtrr_ops = {
14252 .vendor = X86_VENDOR_CYRIX,
14253 .set_all = cyrix_set_all,
14254 .set = cyrix_set_arr,
14255 diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
14256 index 55da0c5..4d75584 100644
14257 --- a/arch/x86/kernel/cpu/mtrr/generic.c
14258 +++ b/arch/x86/kernel/cpu/mtrr/generic.c
14259 @@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
14260 /*
14261 * Generic structure...
14262 */
14263 -struct mtrr_ops generic_mtrr_ops = {
14264 +const struct mtrr_ops generic_mtrr_ops = {
14265 .use_intel_if = 1,
14266 .set_all = generic_set_all,
14267 .get = generic_get_mtrr,
14268 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14269 index fd60f09..c94ef52 100644
14270 --- a/arch/x86/kernel/cpu/mtrr/main.c
14271 +++ b/arch/x86/kernel/cpu/mtrr/main.c
14272 @@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
14273 u64 size_or_mask, size_and_mask;
14274 static bool mtrr_aps_delayed_init;
14275
14276 -static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
14277 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14278
14279 -struct mtrr_ops *mtrr_if;
14280 +const struct mtrr_ops *mtrr_if;
14281
14282 static void set_mtrr(unsigned int reg, unsigned long base,
14283 unsigned long size, mtrr_type type);
14284
14285 -void set_mtrr_ops(struct mtrr_ops *ops)
14286 +void set_mtrr_ops(const struct mtrr_ops *ops)
14287 {
14288 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
14289 mtrr_ops[ops->vendor] = ops;
14290 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14291 index a501dee..816c719 100644
14292 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14293 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14294 @@ -25,14 +25,14 @@ struct mtrr_ops {
14295 int (*validate_add_page)(unsigned long base, unsigned long size,
14296 unsigned int type);
14297 int (*have_wrcomb)(void);
14298 -};
14299 +} __do_const;
14300
14301 extern int generic_get_free_region(unsigned long base, unsigned long size,
14302 int replace_reg);
14303 extern int generic_validate_add_page(unsigned long base, unsigned long size,
14304 unsigned int type);
14305
14306 -extern struct mtrr_ops generic_mtrr_ops;
14307 +extern const struct mtrr_ops generic_mtrr_ops;
14308
14309 extern int positive_have_wrcomb(void);
14310
14311 @@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index,
14312 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
14313 void get_mtrr_state(void);
14314
14315 -extern void set_mtrr_ops(struct mtrr_ops *ops);
14316 +extern void set_mtrr_ops(const struct mtrr_ops *ops);
14317
14318 extern u64 size_or_mask, size_and_mask;
14319 -extern struct mtrr_ops *mtrr_if;
14320 +extern const struct mtrr_ops *mtrr_if;
14321
14322 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
14323 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
14324 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14325 index 0ff02ca..fc49a60 100644
14326 --- a/arch/x86/kernel/cpu/perf_event.c
14327 +++ b/arch/x86/kernel/cpu/perf_event.c
14328 @@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event *event,
14329 * count to the generic event atomically:
14330 */
14331 again:
14332 - prev_raw_count = atomic64_read(&hwc->prev_count);
14333 + prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
14334 rdmsrl(hwc->event_base + idx, new_raw_count);
14335
14336 - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
14337 + if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
14338 new_raw_count) != prev_raw_count)
14339 goto again;
14340
14341 @@ -741,7 +741,7 @@ again:
14342 delta = (new_raw_count << shift) - (prev_raw_count << shift);
14343 delta >>= shift;
14344
14345 - atomic64_add(delta, &event->count);
14346 + atomic64_add_unchecked(delta, &event->count);
14347 atomic64_sub(delta, &hwc->period_left);
14348
14349 return new_raw_count;
14350 @@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_event *event,
14351 * The hw event starts counting from this event offset,
14352 * mark it to be able to extra future deltas:
14353 */
14354 - atomic64_set(&hwc->prev_count, (u64)-left);
14355 + atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
14356
14357 err = checking_wrmsrl(hwc->event_base + idx,
14358 (u64)(-left) & x86_pmu.event_mask);
14359 @@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
14360 break;
14361
14362 callchain_store(entry, frame.return_address);
14363 - fp = frame.next_frame;
14364 + fp = (__force const void __user *)frame.next_frame;
14365 }
14366 }
14367
14368 diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
14369 index 898df97..9e82503 100644
14370 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c
14371 +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
14372 @@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
14373
14374 /* Interface defining a CPU specific perfctr watchdog */
14375 struct wd_ops {
14376 - int (*reserve)(void);
14377 - void (*unreserve)(void);
14378 - int (*setup)(unsigned nmi_hz);
14379 - void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14380 - void (*stop)(void);
14381 + int (* const reserve)(void);
14382 + void (* const unreserve)(void);
14383 + int (* const setup)(unsigned nmi_hz);
14384 + void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14385 + void (* const stop)(void);
14386 unsigned perfctr;
14387 unsigned evntsel;
14388 u64 checkbit;
14389 @@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
14390 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
14391 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
14392
14393 +/* cannot be const */
14394 static struct wd_ops intel_arch_wd_ops;
14395
14396 static int setup_intel_arch_watchdog(unsigned nmi_hz)
14397 @@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
14398 return 1;
14399 }
14400
14401 +/* cannot be const */
14402 static struct wd_ops intel_arch_wd_ops __read_mostly = {
14403 .reserve = single_msr_reserve,
14404 .unreserve = single_msr_unreserve,
14405 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14406 index ff95824..2ffdcb5 100644
14407 --- a/arch/x86/kernel/crash.c
14408 +++ b/arch/x86/kernel/crash.c
14409 @@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
14410 regs = args->regs;
14411
14412 #ifdef CONFIG_X86_32
14413 - if (!user_mode_vm(regs)) {
14414 + if (!user_mode(regs)) {
14415 crash_fixup_ss_esp(&fixed_regs, regs);
14416 regs = &fixed_regs;
14417 }
14418 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14419 index 37250fe..bf2ec74 100644
14420 --- a/arch/x86/kernel/doublefault_32.c
14421 +++ b/arch/x86/kernel/doublefault_32.c
14422 @@ -11,7 +11,7 @@
14423
14424 #define DOUBLEFAULT_STACKSIZE (1024)
14425 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14426 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14427 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14428
14429 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14430
14431 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
14432 unsigned long gdt, tss;
14433
14434 store_gdt(&gdt_desc);
14435 - gdt = gdt_desc.address;
14436 + gdt = (unsigned long)gdt_desc.address;
14437
14438 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14439
14440 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14441 /* 0x2 bit is always set */
14442 .flags = X86_EFLAGS_SF | 0x2,
14443 .sp = STACK_START,
14444 - .es = __USER_DS,
14445 + .es = __KERNEL_DS,
14446 .cs = __KERNEL_CS,
14447 .ss = __KERNEL_DS,
14448 - .ds = __USER_DS,
14449 + .ds = __KERNEL_DS,
14450 .fs = __KERNEL_PERCPU,
14451
14452 .__cr3 = __pa_nodebug(swapper_pg_dir),
14453 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14454 index 2d8a371..4fa6ae6 100644
14455 --- a/arch/x86/kernel/dumpstack.c
14456 +++ b/arch/x86/kernel/dumpstack.c
14457 @@ -2,6 +2,9 @@
14458 * Copyright (C) 1991, 1992 Linus Torvalds
14459 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14460 */
14461 +#ifdef CONFIG_GRKERNSEC_HIDESYM
14462 +#define __INCLUDED_BY_HIDESYM 1
14463 +#endif
14464 #include <linux/kallsyms.h>
14465 #include <linux/kprobes.h>
14466 #include <linux/uaccess.h>
14467 @@ -28,7 +31,7 @@ static int die_counter;
14468
14469 void printk_address(unsigned long address, int reliable)
14470 {
14471 - printk(" [<%p>] %s%pS\n", (void *) address,
14472 + printk(" [<%p>] %s%pA\n", (void *) address,
14473 reliable ? "" : "? ", (void *) address);
14474 }
14475
14476 @@ -36,9 +39,8 @@ void printk_address(unsigned long address, int reliable)
14477 static void
14478 print_ftrace_graph_addr(unsigned long addr, void *data,
14479 const struct stacktrace_ops *ops,
14480 - struct thread_info *tinfo, int *graph)
14481 + struct task_struct *task, int *graph)
14482 {
14483 - struct task_struct *task = tinfo->task;
14484 unsigned long ret_addr;
14485 int index = task->curr_ret_stack;
14486
14487 @@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14488 static inline void
14489 print_ftrace_graph_addr(unsigned long addr, void *data,
14490 const struct stacktrace_ops *ops,
14491 - struct thread_info *tinfo, int *graph)
14492 + struct task_struct *task, int *graph)
14493 { }
14494 #endif
14495
14496 @@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14497 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14498 */
14499
14500 -static inline int valid_stack_ptr(struct thread_info *tinfo,
14501 - void *p, unsigned int size, void *end)
14502 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14503 {
14504 - void *t = tinfo;
14505 if (end) {
14506 if (p < end && p >= (end-THREAD_SIZE))
14507 return 1;
14508 @@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14509 }
14510
14511 unsigned long
14512 -print_context_stack(struct thread_info *tinfo,
14513 +print_context_stack(struct task_struct *task, void *stack_start,
14514 unsigned long *stack, unsigned long bp,
14515 const struct stacktrace_ops *ops, void *data,
14516 unsigned long *end, int *graph)
14517 {
14518 struct stack_frame *frame = (struct stack_frame *)bp;
14519
14520 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14521 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14522 unsigned long addr;
14523
14524 addr = *stack;
14525 @@ -103,7 +103,7 @@ print_context_stack(struct thread_info *tinfo,
14526 } else {
14527 ops->address(data, addr, 0);
14528 }
14529 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14530 + print_ftrace_graph_addr(addr, data, ops, task, graph);
14531 }
14532 stack++;
14533 }
14534 @@ -180,7 +180,7 @@ void dump_stack(void)
14535 #endif
14536
14537 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14538 - current->pid, current->comm, print_tainted(),
14539 + task_pid_nr(current), current->comm, print_tainted(),
14540 init_utsname()->release,
14541 (int)strcspn(init_utsname()->version, " "),
14542 init_utsname()->version);
14543 @@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
14544 return flags;
14545 }
14546
14547 +extern void gr_handle_kernel_exploit(void);
14548 +
14549 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14550 {
14551 if (regs && kexec_should_crash(current))
14552 @@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14553 panic("Fatal exception in interrupt");
14554 if (panic_on_oops)
14555 panic("Fatal exception");
14556 - do_exit(signr);
14557 +
14558 + gr_handle_kernel_exploit();
14559 +
14560 + do_group_exit(signr);
14561 }
14562
14563 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14564 @@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14565 unsigned long flags = oops_begin();
14566 int sig = SIGSEGV;
14567
14568 - if (!user_mode_vm(regs))
14569 + if (!user_mode(regs))
14570 report_bug(regs->ip, regs);
14571
14572 if (__die(str, regs, err))
14573 diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
14574 index 81086c2..13e8b17 100644
14575 --- a/arch/x86/kernel/dumpstack.h
14576 +++ b/arch/x86/kernel/dumpstack.h
14577 @@ -15,7 +15,7 @@
14578 #endif
14579
14580 extern unsigned long
14581 -print_context_stack(struct thread_info *tinfo,
14582 +print_context_stack(struct task_struct *task, void *stack_start,
14583 unsigned long *stack, unsigned long bp,
14584 const struct stacktrace_ops *ops, void *data,
14585 unsigned long *end, int *graph);
14586 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14587 index f7dd2a7..504f53b 100644
14588 --- a/arch/x86/kernel/dumpstack_32.c
14589 +++ b/arch/x86/kernel/dumpstack_32.c
14590 @@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14591 #endif
14592
14593 for (;;) {
14594 - struct thread_info *context;
14595 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14596 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14597
14598 - context = (struct thread_info *)
14599 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14600 - bp = print_context_stack(context, stack, bp, ops,
14601 - data, NULL, &graph);
14602 -
14603 - stack = (unsigned long *)context->previous_esp;
14604 - if (!stack)
14605 + if (stack_start == task_stack_page(task))
14606 break;
14607 + stack = *(unsigned long **)stack_start;
14608 if (ops->stack(data, "IRQ") < 0)
14609 break;
14610 touch_nmi_watchdog();
14611 @@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs)
14612 * When in-kernel, we also print out the stack and code at the
14613 * time of the fault..
14614 */
14615 - if (!user_mode_vm(regs)) {
14616 + if (!user_mode(regs)) {
14617 unsigned int code_prologue = code_bytes * 43 / 64;
14618 unsigned int code_len = code_bytes;
14619 unsigned char c;
14620 u8 *ip;
14621 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14622
14623 printk(KERN_EMERG "Stack:\n");
14624 show_stack_log_lvl(NULL, regs, &regs->sp,
14625 @@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs)
14626
14627 printk(KERN_EMERG "Code: ");
14628
14629 - ip = (u8 *)regs->ip - code_prologue;
14630 + ip = (u8 *)regs->ip - code_prologue + cs_base;
14631 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14632 /* try starting at IP */
14633 - ip = (u8 *)regs->ip;
14634 + ip = (u8 *)regs->ip + cs_base;
14635 code_len = code_len - code_prologue + 1;
14636 }
14637 for (i = 0; i < code_len; i++, ip++) {
14638 @@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs)
14639 printk(" Bad EIP value.");
14640 break;
14641 }
14642 - if (ip == (u8 *)regs->ip)
14643 + if (ip == (u8 *)regs->ip + cs_base)
14644 printk("<%02x> ", c);
14645 else
14646 printk("%02x ", c);
14647 @@ -145,10 +142,23 @@ void show_registers(struct pt_regs *regs)
14648 printk("\n");
14649 }
14650
14651 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14652 +void pax_check_alloca(unsigned long size)
14653 +{
14654 + unsigned long sp = (unsigned long)&sp, stack_left;
14655 +
14656 + /* all kernel stacks are of the same size */
14657 + stack_left = sp & (THREAD_SIZE - 1);
14658 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14659 +}
14660 +EXPORT_SYMBOL(pax_check_alloca);
14661 +#endif
14662 +
14663 int is_valid_bugaddr(unsigned long ip)
14664 {
14665 unsigned short ud2;
14666
14667 + ip = ktla_ktva(ip);
14668 if (ip < PAGE_OFFSET)
14669 return 0;
14670 if (probe_kernel_address((unsigned short *)ip, ud2))
14671 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14672 index a071e6b..36cd585 100644
14673 --- a/arch/x86/kernel/dumpstack_64.c
14674 +++ b/arch/x86/kernel/dumpstack_64.c
14675 @@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14676 unsigned long *irq_stack_end =
14677 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14678 unsigned used = 0;
14679 - struct thread_info *tinfo;
14680 int graph = 0;
14681 + void *stack_start;
14682
14683 if (!task)
14684 task = current;
14685 @@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14686 * current stack address. If the stacks consist of nested
14687 * exceptions
14688 */
14689 - tinfo = task_thread_info(task);
14690 for (;;) {
14691 char *id;
14692 unsigned long *estack_end;
14693 +
14694 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14695 &used, &id);
14696
14697 @@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14698 if (ops->stack(data, id) < 0)
14699 break;
14700
14701 - bp = print_context_stack(tinfo, stack, bp, ops,
14702 + bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14703 data, estack_end, &graph);
14704 ops->stack(data, "<EOE>");
14705 /*
14706 @@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14707 if (stack >= irq_stack && stack < irq_stack_end) {
14708 if (ops->stack(data, "IRQ") < 0)
14709 break;
14710 - bp = print_context_stack(tinfo, stack, bp,
14711 + bp = print_context_stack(task, irq_stack, stack, bp,
14712 ops, data, irq_stack_end, &graph);
14713 /*
14714 * We link to the next stack (which would be
14715 @@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14716 /*
14717 * This handles the process stack:
14718 */
14719 - bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14720 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14721 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14722 put_cpu();
14723 }
14724 EXPORT_SYMBOL(dump_trace);
14725 @@ -304,3 +305,50 @@ int is_valid_bugaddr(unsigned long ip)
14726 return ud2 == 0x0b0f;
14727 }
14728
14729 +
14730 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14731 +void pax_check_alloca(unsigned long size)
14732 +{
14733 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14734 + unsigned cpu, used;
14735 + char *id;
14736 +
14737 + /* check the process stack first */
14738 + stack_start = (unsigned long)task_stack_page(current);
14739 + stack_end = stack_start + THREAD_SIZE;
14740 + if (likely(stack_start <= sp && sp < stack_end)) {
14741 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
14742 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14743 + return;
14744 + }
14745 +
14746 + cpu = get_cpu();
14747 +
14748 + /* check the irq stacks */
14749 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14750 + stack_start = stack_end - IRQ_STACK_SIZE;
14751 + if (stack_start <= sp && sp < stack_end) {
14752 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14753 + put_cpu();
14754 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14755 + return;
14756 + }
14757 +
14758 + /* check the exception stacks */
14759 + used = 0;
14760 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14761 + stack_start = stack_end - EXCEPTION_STKSZ;
14762 + if (stack_end && stack_start <= sp && sp < stack_end) {
14763 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14764 + put_cpu();
14765 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14766 + return;
14767 + }
14768 +
14769 + put_cpu();
14770 +
14771 + /* unknown stack */
14772 + BUG();
14773 +}
14774 +EXPORT_SYMBOL(pax_check_alloca);
14775 +#endif
14776 diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
14777 index a89739a..95e0c48 100644
14778 --- a/arch/x86/kernel/e820.c
14779 +++ b/arch/x86/kernel/e820.c
14780 @@ -733,7 +733,7 @@ struct early_res {
14781 };
14782 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
14783 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
14784 - {}
14785 + { 0, 0, {0}, 0 }
14786 };
14787
14788 static int __init find_overlapped_early(u64 start, u64 end)
14789 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14790 index b9c830c..1e41a96 100644
14791 --- a/arch/x86/kernel/early_printk.c
14792 +++ b/arch/x86/kernel/early_printk.c
14793 @@ -7,6 +7,7 @@
14794 #include <linux/pci_regs.h>
14795 #include <linux/pci_ids.h>
14796 #include <linux/errno.h>
14797 +#include <linux/sched.h>
14798 #include <asm/io.h>
14799 #include <asm/processor.h>
14800 #include <asm/fcntl.h>
14801 @@ -170,6 +171,8 @@ asmlinkage void early_printk(const char *fmt, ...)
14802 int n;
14803 va_list ap;
14804
14805 + pax_track_stack();
14806 +
14807 va_start(ap, fmt);
14808 n = vscnprintf(buf, sizeof(buf), fmt, ap);
14809 early_console->write(early_console, buf, n);
14810 diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
14811 index 5cab48e..b025f9b 100644
14812 --- a/arch/x86/kernel/efi_32.c
14813 +++ b/arch/x86/kernel/efi_32.c
14814 @@ -38,70 +38,56 @@
14815 */
14816
14817 static unsigned long efi_rt_eflags;
14818 -static pgd_t efi_bak_pg_dir_pointer[2];
14819 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
14820
14821 -void efi_call_phys_prelog(void)
14822 +void __init efi_call_phys_prelog(void)
14823 {
14824 - unsigned long cr4;
14825 - unsigned long temp;
14826 struct desc_ptr gdt_descr;
14827
14828 +#ifdef CONFIG_PAX_KERNEXEC
14829 + struct desc_struct d;
14830 +#endif
14831 +
14832 local_irq_save(efi_rt_eflags);
14833
14834 - /*
14835 - * If I don't have PAE, I should just duplicate two entries in page
14836 - * directory. If I have PAE, I just need to duplicate one entry in
14837 - * page directory.
14838 - */
14839 - cr4 = read_cr4_safe();
14840 -
14841 - if (cr4 & X86_CR4_PAE) {
14842 - efi_bak_pg_dir_pointer[0].pgd =
14843 - swapper_pg_dir[pgd_index(0)].pgd;
14844 - swapper_pg_dir[0].pgd =
14845 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14846 - } else {
14847 - efi_bak_pg_dir_pointer[0].pgd =
14848 - swapper_pg_dir[pgd_index(0)].pgd;
14849 - efi_bak_pg_dir_pointer[1].pgd =
14850 - swapper_pg_dir[pgd_index(0x400000)].pgd;
14851 - swapper_pg_dir[pgd_index(0)].pgd =
14852 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14853 - temp = PAGE_OFFSET + 0x400000;
14854 - swapper_pg_dir[pgd_index(0x400000)].pgd =
14855 - swapper_pg_dir[pgd_index(temp)].pgd;
14856 - }
14857 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
14858 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14859 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
14860
14861 /*
14862 * After the lock is released, the original page table is restored.
14863 */
14864 __flush_tlb_all();
14865
14866 +#ifdef CONFIG_PAX_KERNEXEC
14867 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
14868 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14869 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
14870 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14871 +#endif
14872 +
14873 gdt_descr.address = __pa(get_cpu_gdt_table(0));
14874 gdt_descr.size = GDT_SIZE - 1;
14875 load_gdt(&gdt_descr);
14876 }
14877
14878 -void efi_call_phys_epilog(void)
14879 +void __init efi_call_phys_epilog(void)
14880 {
14881 - unsigned long cr4;
14882 struct desc_ptr gdt_descr;
14883
14884 +#ifdef CONFIG_PAX_KERNEXEC
14885 + struct desc_struct d;
14886 +
14887 + memset(&d, 0, sizeof d);
14888 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14889 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14890 +#endif
14891 +
14892 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
14893 gdt_descr.size = GDT_SIZE - 1;
14894 load_gdt(&gdt_descr);
14895
14896 - cr4 = read_cr4_safe();
14897 -
14898 - if (cr4 & X86_CR4_PAE) {
14899 - swapper_pg_dir[pgd_index(0)].pgd =
14900 - efi_bak_pg_dir_pointer[0].pgd;
14901 - } else {
14902 - swapper_pg_dir[pgd_index(0)].pgd =
14903 - efi_bak_pg_dir_pointer[0].pgd;
14904 - swapper_pg_dir[pgd_index(0x400000)].pgd =
14905 - efi_bak_pg_dir_pointer[1].pgd;
14906 - }
14907 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
14908
14909 /*
14910 * After the lock is released, the original page table is restored.
14911 diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S
14912 index fbe66e6..c5c0dd2 100644
14913 --- a/arch/x86/kernel/efi_stub_32.S
14914 +++ b/arch/x86/kernel/efi_stub_32.S
14915 @@ -6,7 +6,9 @@
14916 */
14917
14918 #include <linux/linkage.h>
14919 +#include <linux/init.h>
14920 #include <asm/page_types.h>
14921 +#include <asm/segment.h>
14922
14923 /*
14924 * efi_call_phys(void *, ...) is a function with variable parameters.
14925 @@ -20,7 +22,7 @@
14926 * service functions will comply with gcc calling convention, too.
14927 */
14928
14929 -.text
14930 +__INIT
14931 ENTRY(efi_call_phys)
14932 /*
14933 * 0. The function can only be called in Linux kernel. So CS has been
14934 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
14935 * The mapping of lower virtual memory has been created in prelog and
14936 * epilog.
14937 */
14938 - movl $1f, %edx
14939 - subl $__PAGE_OFFSET, %edx
14940 - jmp *%edx
14941 + movl $(__KERNEXEC_EFI_DS), %edx
14942 + mov %edx, %ds
14943 + mov %edx, %es
14944 + mov %edx, %ss
14945 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
14946 1:
14947
14948 /*
14949 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
14950 * parameter 2, ..., param n. To make things easy, we save the return
14951 * address of efi_call_phys in a global variable.
14952 */
14953 - popl %edx
14954 - movl %edx, saved_return_addr
14955 - /* get the function pointer into ECX*/
14956 - popl %ecx
14957 - movl %ecx, efi_rt_function_ptr
14958 - movl $2f, %edx
14959 - subl $__PAGE_OFFSET, %edx
14960 - pushl %edx
14961 + popl (saved_return_addr)
14962 + popl (efi_rt_function_ptr)
14963
14964 /*
14965 * 3. Clear PG bit in %CR0.
14966 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
14967 /*
14968 * 5. Call the physical function.
14969 */
14970 - jmp *%ecx
14971 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
14972
14973 -2:
14974 /*
14975 * 6. After EFI runtime service returns, control will return to
14976 * following instruction. We'd better readjust stack pointer first.
14977 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
14978 movl %cr0, %edx
14979 orl $0x80000000, %edx
14980 movl %edx, %cr0
14981 - jmp 1f
14982 -1:
14983 +
14984 /*
14985 * 8. Now restore the virtual mode from flat mode by
14986 * adding EIP with PAGE_OFFSET.
14987 */
14988 - movl $1f, %edx
14989 - jmp *%edx
14990 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
14991 1:
14992 + movl $(__KERNEL_DS), %edx
14993 + mov %edx, %ds
14994 + mov %edx, %es
14995 + mov %edx, %ss
14996
14997 /*
14998 * 9. Balance the stack. And because EAX contain the return value,
14999 * we'd better not clobber it.
15000 */
15001 - leal efi_rt_function_ptr, %edx
15002 - movl (%edx), %ecx
15003 - pushl %ecx
15004 + pushl (efi_rt_function_ptr)
15005
15006 /*
15007 - * 10. Push the saved return address onto the stack and return.
15008 + * 10. Return to the saved return address.
15009 */
15010 - leal saved_return_addr, %edx
15011 - movl (%edx), %ecx
15012 - pushl %ecx
15013 - ret
15014 + jmpl *(saved_return_addr)
15015 ENDPROC(efi_call_phys)
15016 .previous
15017
15018 -.data
15019 +__INITDATA
15020 saved_return_addr:
15021 .long 0
15022 efi_rt_function_ptr:
15023 diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
15024 index 4c07cca..2c8427d 100644
15025 --- a/arch/x86/kernel/efi_stub_64.S
15026 +++ b/arch/x86/kernel/efi_stub_64.S
15027 @@ -7,6 +7,7 @@
15028 */
15029
15030 #include <linux/linkage.h>
15031 +#include <asm/alternative-asm.h>
15032
15033 #define SAVE_XMM \
15034 mov %rsp, %rax; \
15035 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
15036 call *%rdi
15037 addq $32, %rsp
15038 RESTORE_XMM
15039 + pax_force_retaddr 0, 1
15040 ret
15041 ENDPROC(efi_call0)
15042
15043 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
15044 call *%rdi
15045 addq $32, %rsp
15046 RESTORE_XMM
15047 + pax_force_retaddr 0, 1
15048 ret
15049 ENDPROC(efi_call1)
15050
15051 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
15052 call *%rdi
15053 addq $32, %rsp
15054 RESTORE_XMM
15055 + pax_force_retaddr 0, 1
15056 ret
15057 ENDPROC(efi_call2)
15058
15059 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
15060 call *%rdi
15061 addq $32, %rsp
15062 RESTORE_XMM
15063 + pax_force_retaddr 0, 1
15064 ret
15065 ENDPROC(efi_call3)
15066
15067 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
15068 call *%rdi
15069 addq $32, %rsp
15070 RESTORE_XMM
15071 + pax_force_retaddr 0, 1
15072 ret
15073 ENDPROC(efi_call4)
15074
15075 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
15076 call *%rdi
15077 addq $48, %rsp
15078 RESTORE_XMM
15079 + pax_force_retaddr 0, 1
15080 ret
15081 ENDPROC(efi_call5)
15082
15083 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
15084 call *%rdi
15085 addq $48, %rsp
15086 RESTORE_XMM
15087 + pax_force_retaddr 0, 1
15088 ret
15089 ENDPROC(efi_call6)
15090 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
15091 index c097e7d..c689cf4 100644
15092 --- a/arch/x86/kernel/entry_32.S
15093 +++ b/arch/x86/kernel/entry_32.S
15094 @@ -185,13 +185,146 @@
15095 /*CFI_REL_OFFSET gs, PT_GS*/
15096 .endm
15097 .macro SET_KERNEL_GS reg
15098 +
15099 +#ifdef CONFIG_CC_STACKPROTECTOR
15100 movl $(__KERNEL_STACK_CANARY), \reg
15101 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
15102 + movl $(__USER_DS), \reg
15103 +#else
15104 + xorl \reg, \reg
15105 +#endif
15106 +
15107 movl \reg, %gs
15108 .endm
15109
15110 #endif /* CONFIG_X86_32_LAZY_GS */
15111
15112 -.macro SAVE_ALL
15113 +.macro pax_enter_kernel
15114 +#ifdef CONFIG_PAX_KERNEXEC
15115 + call pax_enter_kernel
15116 +#endif
15117 +.endm
15118 +
15119 +.macro pax_exit_kernel
15120 +#ifdef CONFIG_PAX_KERNEXEC
15121 + call pax_exit_kernel
15122 +#endif
15123 +.endm
15124 +
15125 +#ifdef CONFIG_PAX_KERNEXEC
15126 +ENTRY(pax_enter_kernel)
15127 +#ifdef CONFIG_PARAVIRT
15128 + pushl %eax
15129 + pushl %ecx
15130 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
15131 + mov %eax, %esi
15132 +#else
15133 + mov %cr0, %esi
15134 +#endif
15135 + bts $16, %esi
15136 + jnc 1f
15137 + mov %cs, %esi
15138 + cmp $__KERNEL_CS, %esi
15139 + jz 3f
15140 + ljmp $__KERNEL_CS, $3f
15141 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
15142 +2:
15143 +#ifdef CONFIG_PARAVIRT
15144 + mov %esi, %eax
15145 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
15146 +#else
15147 + mov %esi, %cr0
15148 +#endif
15149 +3:
15150 +#ifdef CONFIG_PARAVIRT
15151 + popl %ecx
15152 + popl %eax
15153 +#endif
15154 + ret
15155 +ENDPROC(pax_enter_kernel)
15156 +
15157 +ENTRY(pax_exit_kernel)
15158 +#ifdef CONFIG_PARAVIRT
15159 + pushl %eax
15160 + pushl %ecx
15161 +#endif
15162 + mov %cs, %esi
15163 + cmp $__KERNEXEC_KERNEL_CS, %esi
15164 + jnz 2f
15165 +#ifdef CONFIG_PARAVIRT
15166 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
15167 + mov %eax, %esi
15168 +#else
15169 + mov %cr0, %esi
15170 +#endif
15171 + btr $16, %esi
15172 + ljmp $__KERNEL_CS, $1f
15173 +1:
15174 +#ifdef CONFIG_PARAVIRT
15175 + mov %esi, %eax
15176 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
15177 +#else
15178 + mov %esi, %cr0
15179 +#endif
15180 +2:
15181 +#ifdef CONFIG_PARAVIRT
15182 + popl %ecx
15183 + popl %eax
15184 +#endif
15185 + ret
15186 +ENDPROC(pax_exit_kernel)
15187 +#endif
15188 +
15189 +.macro pax_erase_kstack
15190 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15191 + call pax_erase_kstack
15192 +#endif
15193 +.endm
15194 +
15195 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15196 +/*
15197 + * ebp: thread_info
15198 + * ecx, edx: can be clobbered
15199 + */
15200 +ENTRY(pax_erase_kstack)
15201 + pushl %edi
15202 + pushl %eax
15203 +
15204 + mov TI_lowest_stack(%ebp), %edi
15205 + mov $-0xBEEF, %eax
15206 + std
15207 +
15208 +1: mov %edi, %ecx
15209 + and $THREAD_SIZE_asm - 1, %ecx
15210 + shr $2, %ecx
15211 + repne scasl
15212 + jecxz 2f
15213 +
15214 + cmp $2*16, %ecx
15215 + jc 2f
15216 +
15217 + mov $2*16, %ecx
15218 + repe scasl
15219 + jecxz 2f
15220 + jne 1b
15221 +
15222 +2: cld
15223 + mov %esp, %ecx
15224 + sub %edi, %ecx
15225 + shr $2, %ecx
15226 + rep stosl
15227 +
15228 + mov TI_task_thread_sp0(%ebp), %edi
15229 + sub $128, %edi
15230 + mov %edi, TI_lowest_stack(%ebp)
15231 +
15232 + popl %eax
15233 + popl %edi
15234 + ret
15235 +ENDPROC(pax_erase_kstack)
15236 +#endif
15237 +
15238 +.macro __SAVE_ALL _DS
15239 cld
15240 PUSH_GS
15241 pushl %fs
15242 @@ -224,7 +357,7 @@
15243 pushl %ebx
15244 CFI_ADJUST_CFA_OFFSET 4
15245 CFI_REL_OFFSET ebx, 0
15246 - movl $(__USER_DS), %edx
15247 + movl $\_DS, %edx
15248 movl %edx, %ds
15249 movl %edx, %es
15250 movl $(__KERNEL_PERCPU), %edx
15251 @@ -232,6 +365,15 @@
15252 SET_KERNEL_GS %edx
15253 .endm
15254
15255 +.macro SAVE_ALL
15256 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
15257 + __SAVE_ALL __KERNEL_DS
15258 + pax_enter_kernel
15259 +#else
15260 + __SAVE_ALL __USER_DS
15261 +#endif
15262 +.endm
15263 +
15264 .macro RESTORE_INT_REGS
15265 popl %ebx
15266 CFI_ADJUST_CFA_OFFSET -4
15267 @@ -331,7 +473,7 @@ ENTRY(ret_from_fork)
15268 CFI_ADJUST_CFA_OFFSET -4
15269 jmp syscall_exit
15270 CFI_ENDPROC
15271 -END(ret_from_fork)
15272 +ENDPROC(ret_from_fork)
15273
15274 /*
15275 * Return to user mode is not as complex as all this looks,
15276 @@ -352,7 +494,15 @@ check_userspace:
15277 movb PT_CS(%esp), %al
15278 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
15279 cmpl $USER_RPL, %eax
15280 +
15281 +#ifdef CONFIG_PAX_KERNEXEC
15282 + jae resume_userspace
15283 +
15284 + PAX_EXIT_KERNEL
15285 + jmp resume_kernel
15286 +#else
15287 jb resume_kernel # not returning to v8086 or userspace
15288 +#endif
15289
15290 ENTRY(resume_userspace)
15291 LOCKDEP_SYS_EXIT
15292 @@ -364,8 +514,8 @@ ENTRY(resume_userspace)
15293 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15294 # int/exception return?
15295 jne work_pending
15296 - jmp restore_all
15297 -END(ret_from_exception)
15298 + jmp restore_all_pax
15299 +ENDPROC(ret_from_exception)
15300
15301 #ifdef CONFIG_PREEMPT
15302 ENTRY(resume_kernel)
15303 @@ -380,7 +530,7 @@ need_resched:
15304 jz restore_all
15305 call preempt_schedule_irq
15306 jmp need_resched
15307 -END(resume_kernel)
15308 +ENDPROC(resume_kernel)
15309 #endif
15310 CFI_ENDPROC
15311
15312 @@ -414,25 +564,36 @@ sysenter_past_esp:
15313 /*CFI_REL_OFFSET cs, 0*/
15314 /*
15315 * Push current_thread_info()->sysenter_return to the stack.
15316 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15317 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
15318 */
15319 - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
15320 + pushl $0
15321 CFI_ADJUST_CFA_OFFSET 4
15322 CFI_REL_OFFSET eip, 0
15323
15324 pushl %eax
15325 CFI_ADJUST_CFA_OFFSET 4
15326 SAVE_ALL
15327 + GET_THREAD_INFO(%ebp)
15328 + movl TI_sysenter_return(%ebp),%ebp
15329 + movl %ebp,PT_EIP(%esp)
15330 ENABLE_INTERRUPTS(CLBR_NONE)
15331
15332 /*
15333 * Load the potential sixth argument from user stack.
15334 * Careful about security.
15335 */
15336 + movl PT_OLDESP(%esp),%ebp
15337 +
15338 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15339 + mov PT_OLDSS(%esp),%ds
15340 +1: movl %ds:(%ebp),%ebp
15341 + push %ss
15342 + pop %ds
15343 +#else
15344 cmpl $__PAGE_OFFSET-3,%ebp
15345 jae syscall_fault
15346 1: movl (%ebp),%ebp
15347 +#endif
15348 +
15349 movl %ebp,PT_EBP(%esp)
15350 .section __ex_table,"a"
15351 .align 4
15352 @@ -455,12 +616,24 @@ sysenter_do_call:
15353 testl $_TIF_ALLWORK_MASK, %ecx
15354 jne sysexit_audit
15355 sysenter_exit:
15356 +
15357 +#ifdef CONFIG_PAX_RANDKSTACK
15358 + pushl_cfi %eax
15359 + movl %esp, %eax
15360 + call pax_randomize_kstack
15361 + popl_cfi %eax
15362 +#endif
15363 +
15364 + pax_erase_kstack
15365 +
15366 /* if something modifies registers it must also disable sysexit */
15367 movl PT_EIP(%esp), %edx
15368 movl PT_OLDESP(%esp), %ecx
15369 xorl %ebp,%ebp
15370 TRACE_IRQS_ON
15371 1: mov PT_FS(%esp), %fs
15372 +2: mov PT_DS(%esp), %ds
15373 +3: mov PT_ES(%esp), %es
15374 PTGS_TO_GS
15375 ENABLE_INTERRUPTS_SYSEXIT
15376
15377 @@ -477,6 +650,9 @@ sysenter_audit:
15378 movl %eax,%edx /* 2nd arg: syscall number */
15379 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15380 call audit_syscall_entry
15381 +
15382 + pax_erase_kstack
15383 +
15384 pushl %ebx
15385 CFI_ADJUST_CFA_OFFSET 4
15386 movl PT_EAX(%esp),%eax /* reload syscall number */
15387 @@ -504,11 +680,17 @@ sysexit_audit:
15388
15389 CFI_ENDPROC
15390 .pushsection .fixup,"ax"
15391 -2: movl $0,PT_FS(%esp)
15392 +4: movl $0,PT_FS(%esp)
15393 + jmp 1b
15394 +5: movl $0,PT_DS(%esp)
15395 + jmp 1b
15396 +6: movl $0,PT_ES(%esp)
15397 jmp 1b
15398 .section __ex_table,"a"
15399 .align 4
15400 - .long 1b,2b
15401 + .long 1b,4b
15402 + .long 2b,5b
15403 + .long 3b,6b
15404 .popsection
15405 PTGS_TO_GS_EX
15406 ENDPROC(ia32_sysenter_target)
15407 @@ -538,6 +720,15 @@ syscall_exit:
15408 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15409 jne syscall_exit_work
15410
15411 +restore_all_pax:
15412 +
15413 +#ifdef CONFIG_PAX_RANDKSTACK
15414 + movl %esp, %eax
15415 + call pax_randomize_kstack
15416 +#endif
15417 +
15418 + pax_erase_kstack
15419 +
15420 restore_all:
15421 TRACE_IRQS_IRET
15422 restore_all_notrace:
15423 @@ -602,10 +793,29 @@ ldt_ss:
15424 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15425 mov %dx, %ax /* eax: new kernel esp */
15426 sub %eax, %edx /* offset (low word is 0) */
15427 - PER_CPU(gdt_page, %ebx)
15428 +#ifdef CONFIG_SMP
15429 + movl PER_CPU_VAR(cpu_number), %ebx
15430 + shll $PAGE_SHIFT_asm, %ebx
15431 + addl $cpu_gdt_table, %ebx
15432 +#else
15433 + movl $cpu_gdt_table, %ebx
15434 +#endif
15435 shr $16, %edx
15436 +
15437 +#ifdef CONFIG_PAX_KERNEXEC
15438 + mov %cr0, %esi
15439 + btr $16, %esi
15440 + mov %esi, %cr0
15441 +#endif
15442 +
15443 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
15444 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
15445 +
15446 +#ifdef CONFIG_PAX_KERNEXEC
15447 + bts $16, %esi
15448 + mov %esi, %cr0
15449 +#endif
15450 +
15451 pushl $__ESPFIX_SS
15452 CFI_ADJUST_CFA_OFFSET 4
15453 push %eax /* new kernel esp */
15454 @@ -636,36 +846,30 @@ work_resched:
15455 movl TI_flags(%ebp), %ecx
15456 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15457 # than syscall tracing?
15458 - jz restore_all
15459 + jz restore_all_pax
15460 testb $_TIF_NEED_RESCHED, %cl
15461 jnz work_resched
15462
15463 work_notifysig: # deal with pending signals and
15464 # notify-resume requests
15465 + movl %esp, %eax
15466 #ifdef CONFIG_VM86
15467 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15468 - movl %esp, %eax
15469 - jne work_notifysig_v86 # returning to kernel-space or
15470 + jz 1f # returning to kernel-space or
15471 # vm86-space
15472 - xorl %edx, %edx
15473 - call do_notify_resume
15474 - jmp resume_userspace_sig
15475
15476 - ALIGN
15477 -work_notifysig_v86:
15478 pushl %ecx # save ti_flags for do_notify_resume
15479 CFI_ADJUST_CFA_OFFSET 4
15480 call save_v86_state # %eax contains pt_regs pointer
15481 popl %ecx
15482 CFI_ADJUST_CFA_OFFSET -4
15483 movl %eax, %esp
15484 -#else
15485 - movl %esp, %eax
15486 +1:
15487 #endif
15488 xorl %edx, %edx
15489 call do_notify_resume
15490 jmp resume_userspace_sig
15491 -END(work_pending)
15492 +ENDPROC(work_pending)
15493
15494 # perform syscall exit tracing
15495 ALIGN
15496 @@ -673,11 +877,14 @@ syscall_trace_entry:
15497 movl $-ENOSYS,PT_EAX(%esp)
15498 movl %esp, %eax
15499 call syscall_trace_enter
15500 +
15501 + pax_erase_kstack
15502 +
15503 /* What it returned is what we'll actually use. */
15504 cmpl $(nr_syscalls), %eax
15505 jnae syscall_call
15506 jmp syscall_exit
15507 -END(syscall_trace_entry)
15508 +ENDPROC(syscall_trace_entry)
15509
15510 # perform syscall exit tracing
15511 ALIGN
15512 @@ -690,20 +897,24 @@ syscall_exit_work:
15513 movl %esp, %eax
15514 call syscall_trace_leave
15515 jmp resume_userspace
15516 -END(syscall_exit_work)
15517 +ENDPROC(syscall_exit_work)
15518 CFI_ENDPROC
15519
15520 RING0_INT_FRAME # can't unwind into user space anyway
15521 syscall_fault:
15522 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15523 + push %ss
15524 + pop %ds
15525 +#endif
15526 GET_THREAD_INFO(%ebp)
15527 movl $-EFAULT,PT_EAX(%esp)
15528 jmp resume_userspace
15529 -END(syscall_fault)
15530 +ENDPROC(syscall_fault)
15531
15532 syscall_badsys:
15533 movl $-ENOSYS,PT_EAX(%esp)
15534 jmp resume_userspace
15535 -END(syscall_badsys)
15536 +ENDPROC(syscall_badsys)
15537 CFI_ENDPROC
15538
15539 /*
15540 @@ -726,6 +937,33 @@ PTREGSCALL(rt_sigreturn)
15541 PTREGSCALL(vm86)
15542 PTREGSCALL(vm86old)
15543
15544 + ALIGN;
15545 +ENTRY(kernel_execve)
15546 + push %ebp
15547 + sub $PT_OLDSS+4,%esp
15548 + push %edi
15549 + push %ecx
15550 + push %eax
15551 + lea 3*4(%esp),%edi
15552 + mov $PT_OLDSS/4+1,%ecx
15553 + xorl %eax,%eax
15554 + rep stosl
15555 + pop %eax
15556 + pop %ecx
15557 + pop %edi
15558 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15559 + mov %eax,PT_EBX(%esp)
15560 + mov %edx,PT_ECX(%esp)
15561 + mov %ecx,PT_EDX(%esp)
15562 + mov %esp,%eax
15563 + call sys_execve
15564 + GET_THREAD_INFO(%ebp)
15565 + test %eax,%eax
15566 + jz syscall_exit
15567 + add $PT_OLDSS+4,%esp
15568 + pop %ebp
15569 + ret
15570 +
15571 .macro FIXUP_ESPFIX_STACK
15572 /*
15573 * Switch back for ESPFIX stack to the normal zerobased stack
15574 @@ -735,7 +973,13 @@ PTREGSCALL(vm86old)
15575 * normal stack and adjusts ESP with the matching offset.
15576 */
15577 /* fixup the stack */
15578 - PER_CPU(gdt_page, %ebx)
15579 +#ifdef CONFIG_SMP
15580 + movl PER_CPU_VAR(cpu_number), %ebx
15581 + shll $PAGE_SHIFT_asm, %ebx
15582 + addl $cpu_gdt_table, %ebx
15583 +#else
15584 + movl $cpu_gdt_table, %ebx
15585 +#endif
15586 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
15587 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
15588 shl $16, %eax
15589 @@ -793,7 +1037,7 @@ vector=vector+1
15590 .endr
15591 2: jmp common_interrupt
15592 .endr
15593 -END(irq_entries_start)
15594 +ENDPROC(irq_entries_start)
15595
15596 .previous
15597 END(interrupt)
15598 @@ -840,7 +1084,7 @@ ENTRY(coprocessor_error)
15599 CFI_ADJUST_CFA_OFFSET 4
15600 jmp error_code
15601 CFI_ENDPROC
15602 -END(coprocessor_error)
15603 +ENDPROC(coprocessor_error)
15604
15605 ENTRY(simd_coprocessor_error)
15606 RING0_INT_FRAME
15607 @@ -850,7 +1094,7 @@ ENTRY(simd_coprocessor_error)
15608 CFI_ADJUST_CFA_OFFSET 4
15609 jmp error_code
15610 CFI_ENDPROC
15611 -END(simd_coprocessor_error)
15612 +ENDPROC(simd_coprocessor_error)
15613
15614 ENTRY(device_not_available)
15615 RING0_INT_FRAME
15616 @@ -860,7 +1104,7 @@ ENTRY(device_not_available)
15617 CFI_ADJUST_CFA_OFFSET 4
15618 jmp error_code
15619 CFI_ENDPROC
15620 -END(device_not_available)
15621 +ENDPROC(device_not_available)
15622
15623 #ifdef CONFIG_PARAVIRT
15624 ENTRY(native_iret)
15625 @@ -869,12 +1113,12 @@ ENTRY(native_iret)
15626 .align 4
15627 .long native_iret, iret_exc
15628 .previous
15629 -END(native_iret)
15630 +ENDPROC(native_iret)
15631
15632 ENTRY(native_irq_enable_sysexit)
15633 sti
15634 sysexit
15635 -END(native_irq_enable_sysexit)
15636 +ENDPROC(native_irq_enable_sysexit)
15637 #endif
15638
15639 ENTRY(overflow)
15640 @@ -885,7 +1129,7 @@ ENTRY(overflow)
15641 CFI_ADJUST_CFA_OFFSET 4
15642 jmp error_code
15643 CFI_ENDPROC
15644 -END(overflow)
15645 +ENDPROC(overflow)
15646
15647 ENTRY(bounds)
15648 RING0_INT_FRAME
15649 @@ -895,7 +1139,7 @@ ENTRY(bounds)
15650 CFI_ADJUST_CFA_OFFSET 4
15651 jmp error_code
15652 CFI_ENDPROC
15653 -END(bounds)
15654 +ENDPROC(bounds)
15655
15656 ENTRY(invalid_op)
15657 RING0_INT_FRAME
15658 @@ -905,7 +1149,7 @@ ENTRY(invalid_op)
15659 CFI_ADJUST_CFA_OFFSET 4
15660 jmp error_code
15661 CFI_ENDPROC
15662 -END(invalid_op)
15663 +ENDPROC(invalid_op)
15664
15665 ENTRY(coprocessor_segment_overrun)
15666 RING0_INT_FRAME
15667 @@ -915,7 +1159,7 @@ ENTRY(coprocessor_segment_overrun)
15668 CFI_ADJUST_CFA_OFFSET 4
15669 jmp error_code
15670 CFI_ENDPROC
15671 -END(coprocessor_segment_overrun)
15672 +ENDPROC(coprocessor_segment_overrun)
15673
15674 ENTRY(invalid_TSS)
15675 RING0_EC_FRAME
15676 @@ -923,7 +1167,7 @@ ENTRY(invalid_TSS)
15677 CFI_ADJUST_CFA_OFFSET 4
15678 jmp error_code
15679 CFI_ENDPROC
15680 -END(invalid_TSS)
15681 +ENDPROC(invalid_TSS)
15682
15683 ENTRY(segment_not_present)
15684 RING0_EC_FRAME
15685 @@ -931,7 +1175,7 @@ ENTRY(segment_not_present)
15686 CFI_ADJUST_CFA_OFFSET 4
15687 jmp error_code
15688 CFI_ENDPROC
15689 -END(segment_not_present)
15690 +ENDPROC(segment_not_present)
15691
15692 ENTRY(stack_segment)
15693 RING0_EC_FRAME
15694 @@ -939,7 +1183,7 @@ ENTRY(stack_segment)
15695 CFI_ADJUST_CFA_OFFSET 4
15696 jmp error_code
15697 CFI_ENDPROC
15698 -END(stack_segment)
15699 +ENDPROC(stack_segment)
15700
15701 ENTRY(alignment_check)
15702 RING0_EC_FRAME
15703 @@ -947,7 +1191,7 @@ ENTRY(alignment_check)
15704 CFI_ADJUST_CFA_OFFSET 4
15705 jmp error_code
15706 CFI_ENDPROC
15707 -END(alignment_check)
15708 +ENDPROC(alignment_check)
15709
15710 ENTRY(divide_error)
15711 RING0_INT_FRAME
15712 @@ -957,7 +1201,7 @@ ENTRY(divide_error)
15713 CFI_ADJUST_CFA_OFFSET 4
15714 jmp error_code
15715 CFI_ENDPROC
15716 -END(divide_error)
15717 +ENDPROC(divide_error)
15718
15719 #ifdef CONFIG_X86_MCE
15720 ENTRY(machine_check)
15721 @@ -968,7 +1212,7 @@ ENTRY(machine_check)
15722 CFI_ADJUST_CFA_OFFSET 4
15723 jmp error_code
15724 CFI_ENDPROC
15725 -END(machine_check)
15726 +ENDPROC(machine_check)
15727 #endif
15728
15729 ENTRY(spurious_interrupt_bug)
15730 @@ -979,7 +1223,7 @@ ENTRY(spurious_interrupt_bug)
15731 CFI_ADJUST_CFA_OFFSET 4
15732 jmp error_code
15733 CFI_ENDPROC
15734 -END(spurious_interrupt_bug)
15735 +ENDPROC(spurious_interrupt_bug)
15736
15737 ENTRY(kernel_thread_helper)
15738 pushl $0 # fake return address for unwinder
15739 @@ -1095,7 +1339,7 @@ ENDPROC(xen_failsafe_callback)
15740
15741 ENTRY(mcount)
15742 ret
15743 -END(mcount)
15744 +ENDPROC(mcount)
15745
15746 ENTRY(ftrace_caller)
15747 cmpl $0, function_trace_stop
15748 @@ -1124,7 +1368,7 @@ ftrace_graph_call:
15749 .globl ftrace_stub
15750 ftrace_stub:
15751 ret
15752 -END(ftrace_caller)
15753 +ENDPROC(ftrace_caller)
15754
15755 #else /* ! CONFIG_DYNAMIC_FTRACE */
15756
15757 @@ -1160,7 +1404,7 @@ trace:
15758 popl %ecx
15759 popl %eax
15760 jmp ftrace_stub
15761 -END(mcount)
15762 +ENDPROC(mcount)
15763 #endif /* CONFIG_DYNAMIC_FTRACE */
15764 #endif /* CONFIG_FUNCTION_TRACER */
15765
15766 @@ -1181,7 +1425,7 @@ ENTRY(ftrace_graph_caller)
15767 popl %ecx
15768 popl %eax
15769 ret
15770 -END(ftrace_graph_caller)
15771 +ENDPROC(ftrace_graph_caller)
15772
15773 .globl return_to_handler
15774 return_to_handler:
15775 @@ -1198,7 +1442,6 @@ return_to_handler:
15776 ret
15777 #endif
15778
15779 -.section .rodata,"a"
15780 #include "syscall_table_32.S"
15781
15782 syscall_table_size=(.-sys_call_table)
15783 @@ -1255,15 +1498,18 @@ error_code:
15784 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15785 REG_TO_PTGS %ecx
15786 SET_KERNEL_GS %ecx
15787 - movl $(__USER_DS), %ecx
15788 + movl $(__KERNEL_DS), %ecx
15789 movl %ecx, %ds
15790 movl %ecx, %es
15791 +
15792 + pax_enter_kernel
15793 +
15794 TRACE_IRQS_OFF
15795 movl %esp,%eax # pt_regs pointer
15796 call *%edi
15797 jmp ret_from_exception
15798 CFI_ENDPROC
15799 -END(page_fault)
15800 +ENDPROC(page_fault)
15801
15802 /*
15803 * Debug traps and NMI can happen at the one SYSENTER instruction
15804 @@ -1309,7 +1555,7 @@ debug_stack_correct:
15805 call do_debug
15806 jmp ret_from_exception
15807 CFI_ENDPROC
15808 -END(debug)
15809 +ENDPROC(debug)
15810
15811 /*
15812 * NMI is doubly nasty. It can happen _while_ we're handling
15813 @@ -1351,6 +1597,9 @@ nmi_stack_correct:
15814 xorl %edx,%edx # zero error code
15815 movl %esp,%eax # pt_regs pointer
15816 call do_nmi
15817 +
15818 + pax_exit_kernel
15819 +
15820 jmp restore_all_notrace
15821 CFI_ENDPROC
15822
15823 @@ -1391,12 +1640,15 @@ nmi_espfix_stack:
15824 FIXUP_ESPFIX_STACK # %eax == %esp
15825 xorl %edx,%edx # zero error code
15826 call do_nmi
15827 +
15828 + pax_exit_kernel
15829 +
15830 RESTORE_REGS
15831 lss 12+4(%esp), %esp # back to espfix stack
15832 CFI_ADJUST_CFA_OFFSET -24
15833 jmp irq_return
15834 CFI_ENDPROC
15835 -END(nmi)
15836 +ENDPROC(nmi)
15837
15838 ENTRY(int3)
15839 RING0_INT_FRAME
15840 @@ -1409,7 +1661,7 @@ ENTRY(int3)
15841 call do_int3
15842 jmp ret_from_exception
15843 CFI_ENDPROC
15844 -END(int3)
15845 +ENDPROC(int3)
15846
15847 ENTRY(general_protection)
15848 RING0_EC_FRAME
15849 @@ -1417,7 +1669,7 @@ ENTRY(general_protection)
15850 CFI_ADJUST_CFA_OFFSET 4
15851 jmp error_code
15852 CFI_ENDPROC
15853 -END(general_protection)
15854 +ENDPROC(general_protection)
15855
15856 /*
15857 * End of kprobes section
15858 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15859 index 34a56a9..87790b4 100644
15860 --- a/arch/x86/kernel/entry_64.S
15861 +++ b/arch/x86/kernel/entry_64.S
15862 @@ -53,6 +53,8 @@
15863 #include <asm/paravirt.h>
15864 #include <asm/ftrace.h>
15865 #include <asm/percpu.h>
15866 +#include <asm/pgtable.h>
15867 +#include <asm/alternative-asm.h>
15868
15869 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15870 #include <linux/elf-em.h>
15871 @@ -64,8 +66,9 @@
15872 #ifdef CONFIG_FUNCTION_TRACER
15873 #ifdef CONFIG_DYNAMIC_FTRACE
15874 ENTRY(mcount)
15875 + pax_force_retaddr
15876 retq
15877 -END(mcount)
15878 +ENDPROC(mcount)
15879
15880 ENTRY(ftrace_caller)
15881 cmpl $0, function_trace_stop
15882 @@ -88,8 +91,9 @@ GLOBAL(ftrace_graph_call)
15883 #endif
15884
15885 GLOBAL(ftrace_stub)
15886 + pax_force_retaddr
15887 retq
15888 -END(ftrace_caller)
15889 +ENDPROC(ftrace_caller)
15890
15891 #else /* ! CONFIG_DYNAMIC_FTRACE */
15892 ENTRY(mcount)
15893 @@ -108,6 +112,7 @@ ENTRY(mcount)
15894 #endif
15895
15896 GLOBAL(ftrace_stub)
15897 + pax_force_retaddr
15898 retq
15899
15900 trace:
15901 @@ -117,12 +122,13 @@ trace:
15902 movq 8(%rbp), %rsi
15903 subq $MCOUNT_INSN_SIZE, %rdi
15904
15905 + pax_force_fptr ftrace_trace_function
15906 call *ftrace_trace_function
15907
15908 MCOUNT_RESTORE_FRAME
15909
15910 jmp ftrace_stub
15911 -END(mcount)
15912 +ENDPROC(mcount)
15913 #endif /* CONFIG_DYNAMIC_FTRACE */
15914 #endif /* CONFIG_FUNCTION_TRACER */
15915
15916 @@ -142,8 +148,9 @@ ENTRY(ftrace_graph_caller)
15917
15918 MCOUNT_RESTORE_FRAME
15919
15920 + pax_force_retaddr
15921 retq
15922 -END(ftrace_graph_caller)
15923 +ENDPROC(ftrace_graph_caller)
15924
15925 GLOBAL(return_to_handler)
15926 subq $24, %rsp
15927 @@ -159,6 +166,7 @@ GLOBAL(return_to_handler)
15928 movq 8(%rsp), %rdx
15929 movq (%rsp), %rax
15930 addq $16, %rsp
15931 + pax_force_retaddr
15932 retq
15933 #endif
15934
15935 @@ -174,6 +182,282 @@ ENTRY(native_usergs_sysret64)
15936 ENDPROC(native_usergs_sysret64)
15937 #endif /* CONFIG_PARAVIRT */
15938
15939 + .macro ljmpq sel, off
15940 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15941 + .byte 0x48; ljmp *1234f(%rip)
15942 + .pushsection .rodata
15943 + .align 16
15944 + 1234: .quad \off; .word \sel
15945 + .popsection
15946 +#else
15947 + pushq $\sel
15948 + pushq $\off
15949 + lretq
15950 +#endif
15951 + .endm
15952 +
15953 + .macro pax_enter_kernel
15954 + pax_set_fptr_mask
15955 +#ifdef CONFIG_PAX_KERNEXEC
15956 + call pax_enter_kernel
15957 +#endif
15958 + .endm
15959 +
15960 + .macro pax_exit_kernel
15961 +#ifdef CONFIG_PAX_KERNEXEC
15962 + call pax_exit_kernel
15963 +#endif
15964 + .endm
15965 +
15966 +#ifdef CONFIG_PAX_KERNEXEC
15967 +ENTRY(pax_enter_kernel)
15968 + pushq %rdi
15969 +
15970 +#ifdef CONFIG_PARAVIRT
15971 + PV_SAVE_REGS(CLBR_RDI)
15972 +#endif
15973 +
15974 + GET_CR0_INTO_RDI
15975 + bts $16,%rdi
15976 + jnc 3f
15977 + mov %cs,%edi
15978 + cmp $__KERNEL_CS,%edi
15979 + jnz 2f
15980 +1:
15981 +
15982 +#ifdef CONFIG_PARAVIRT
15983 + PV_RESTORE_REGS(CLBR_RDI)
15984 +#endif
15985 +
15986 + popq %rdi
15987 + pax_force_retaddr
15988 + retq
15989 +
15990 +2: ljmpq __KERNEL_CS,1f
15991 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
15992 +4: SET_RDI_INTO_CR0
15993 + jmp 1b
15994 +ENDPROC(pax_enter_kernel)
15995 +
15996 +ENTRY(pax_exit_kernel)
15997 + pushq %rdi
15998 +
15999 +#ifdef CONFIG_PARAVIRT
16000 + PV_SAVE_REGS(CLBR_RDI)
16001 +#endif
16002 +
16003 + mov %cs,%rdi
16004 + cmp $__KERNEXEC_KERNEL_CS,%edi
16005 + jz 2f
16006 +1:
16007 +
16008 +#ifdef CONFIG_PARAVIRT
16009 + PV_RESTORE_REGS(CLBR_RDI);
16010 +#endif
16011 +
16012 + popq %rdi
16013 + pax_force_retaddr
16014 + retq
16015 +
16016 +2: GET_CR0_INTO_RDI
16017 + btr $16,%rdi
16018 + ljmpq __KERNEL_CS,3f
16019 +3: SET_RDI_INTO_CR0
16020 + jmp 1b
16021 +#ifdef CONFIG_PARAVIRT
16022 + PV_RESTORE_REGS(CLBR_RDI);
16023 +#endif
16024 +
16025 + popq %rdi
16026 + pax_force_retaddr
16027 + retq
16028 +ENDPROC(pax_exit_kernel)
16029 +#endif
16030 +
16031 + .macro pax_enter_kernel_user
16032 + pax_set_fptr_mask
16033 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16034 + call pax_enter_kernel_user
16035 +#endif
16036 + .endm
16037 +
16038 + .macro pax_exit_kernel_user
16039 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16040 + call pax_exit_kernel_user
16041 +#endif
16042 +#ifdef CONFIG_PAX_RANDKSTACK
16043 + pushq %rax
16044 + call pax_randomize_kstack
16045 + popq %rax
16046 +#endif
16047 + .endm
16048 +
16049 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16050 +ENTRY(pax_enter_kernel_user)
16051 + pushq %rdi
16052 + pushq %rbx
16053 +
16054 +#ifdef CONFIG_PARAVIRT
16055 + PV_SAVE_REGS(CLBR_RDI)
16056 +#endif
16057 +
16058 + GET_CR3_INTO_RDI
16059 + mov %rdi,%rbx
16060 + add $__START_KERNEL_map,%rbx
16061 + sub phys_base(%rip),%rbx
16062 +
16063 +#ifdef CONFIG_PARAVIRT
16064 + pushq %rdi
16065 + cmpl $0, pv_info+PARAVIRT_enabled
16066 + jz 1f
16067 + i = 0
16068 + .rept USER_PGD_PTRS
16069 + mov i*8(%rbx),%rsi
16070 + mov $0,%sil
16071 + lea i*8(%rbx),%rdi
16072 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
16073 + i = i + 1
16074 + .endr
16075 + jmp 2f
16076 +1:
16077 +#endif
16078 +
16079 + i = 0
16080 + .rept USER_PGD_PTRS
16081 + movb $0,i*8(%rbx)
16082 + i = i + 1
16083 + .endr
16084 +
16085 +#ifdef CONFIG_PARAVIRT
16086 +2: popq %rdi
16087 +#endif
16088 + SET_RDI_INTO_CR3
16089 +
16090 +#ifdef CONFIG_PAX_KERNEXEC
16091 + GET_CR0_INTO_RDI
16092 + bts $16,%rdi
16093 + SET_RDI_INTO_CR0
16094 +#endif
16095 +
16096 +#ifdef CONFIG_PARAVIRT
16097 + PV_RESTORE_REGS(CLBR_RDI)
16098 +#endif
16099 +
16100 + popq %rbx
16101 + popq %rdi
16102 + pax_force_retaddr
16103 + retq
16104 +ENDPROC(pax_enter_kernel_user)
16105 +
16106 +ENTRY(pax_exit_kernel_user)
16107 + push %rdi
16108 +
16109 +#ifdef CONFIG_PARAVIRT
16110 + pushq %rbx
16111 + PV_SAVE_REGS(CLBR_RDI)
16112 +#endif
16113 +
16114 +#ifdef CONFIG_PAX_KERNEXEC
16115 + GET_CR0_INTO_RDI
16116 + btr $16,%rdi
16117 + SET_RDI_INTO_CR0
16118 +#endif
16119 +
16120 + GET_CR3_INTO_RDI
16121 + add $__START_KERNEL_map,%rdi
16122 + sub phys_base(%rip),%rdi
16123 +
16124 +#ifdef CONFIG_PARAVIRT
16125 + cmpl $0, pv_info+PARAVIRT_enabled
16126 + jz 1f
16127 + mov %rdi,%rbx
16128 + i = 0
16129 + .rept USER_PGD_PTRS
16130 + mov i*8(%rbx),%rsi
16131 + mov $0x67,%sil
16132 + lea i*8(%rbx),%rdi
16133 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
16134 + i = i + 1
16135 + .endr
16136 + jmp 2f
16137 +1:
16138 +#endif
16139 +
16140 + i = 0
16141 + .rept USER_PGD_PTRS
16142 + movb $0x67,i*8(%rdi)
16143 + i = i + 1
16144 + .endr
16145 +
16146 +#ifdef CONFIG_PARAVIRT
16147 +2: PV_RESTORE_REGS(CLBR_RDI)
16148 + popq %rbx
16149 +#endif
16150 +
16151 + popq %rdi
16152 + pax_force_retaddr
16153 + retq
16154 +ENDPROC(pax_exit_kernel_user)
16155 +#endif
16156 +
16157 +.macro pax_erase_kstack
16158 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16159 + call pax_erase_kstack
16160 +#endif
16161 +.endm
16162 +
16163 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16164 +/*
16165 + * r11: thread_info
16166 + * rcx, rdx: can be clobbered
16167 + */
16168 +ENTRY(pax_erase_kstack)
16169 + pushq %rdi
16170 + pushq %rax
16171 + pushq %r11
16172 +
16173 + GET_THREAD_INFO(%r11)
16174 + mov TI_lowest_stack(%r11), %rdi
16175 + mov $-0xBEEF, %rax
16176 + std
16177 +
16178 +1: mov %edi, %ecx
16179 + and $THREAD_SIZE_asm - 1, %ecx
16180 + shr $3, %ecx
16181 + repne scasq
16182 + jecxz 2f
16183 +
16184 + cmp $2*8, %ecx
16185 + jc 2f
16186 +
16187 + mov $2*8, %ecx
16188 + repe scasq
16189 + jecxz 2f
16190 + jne 1b
16191 +
16192 +2: cld
16193 + mov %esp, %ecx
16194 + sub %edi, %ecx
16195 +
16196 + cmp $THREAD_SIZE_asm, %rcx
16197 + jb 3f
16198 + ud2
16199 +3:
16200 +
16201 + shr $3, %ecx
16202 + rep stosq
16203 +
16204 + mov TI_task_thread_sp0(%r11), %rdi
16205 + sub $256, %rdi
16206 + mov %rdi, TI_lowest_stack(%r11)
16207 +
16208 + popq %r11
16209 + popq %rax
16210 + popq %rdi
16211 + pax_force_retaddr
16212 + ret
16213 +ENDPROC(pax_erase_kstack)
16214 +#endif
16215
16216 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
16217 #ifdef CONFIG_TRACE_IRQFLAGS
16218 @@ -233,8 +517,8 @@ ENDPROC(native_usergs_sysret64)
16219 .endm
16220
16221 .macro UNFAKE_STACK_FRAME
16222 - addq $8*6, %rsp
16223 - CFI_ADJUST_CFA_OFFSET -(6*8)
16224 + addq $8*6 + ARG_SKIP, %rsp
16225 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
16226 .endm
16227
16228 /*
16229 @@ -317,7 +601,7 @@ ENTRY(save_args)
16230 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
16231 movq_cfi rbp, 8 /* push %rbp */
16232 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
16233 - testl $3, CS(%rdi)
16234 + testb $3, CS(%rdi)
16235 je 1f
16236 SWAPGS
16237 /*
16238 @@ -337,9 +621,10 @@ ENTRY(save_args)
16239 * We entered an interrupt context - irqs are off:
16240 */
16241 2: TRACE_IRQS_OFF
16242 + pax_force_retaddr
16243 ret
16244 CFI_ENDPROC
16245 -END(save_args)
16246 +ENDPROC(save_args)
16247
16248 ENTRY(save_rest)
16249 PARTIAL_FRAME 1 REST_SKIP+8
16250 @@ -352,9 +637,10 @@ ENTRY(save_rest)
16251 movq_cfi r15, R15+16
16252 movq %r11, 8(%rsp) /* return address */
16253 FIXUP_TOP_OF_STACK %r11, 16
16254 + pax_force_retaddr
16255 ret
16256 CFI_ENDPROC
16257 -END(save_rest)
16258 +ENDPROC(save_rest)
16259
16260 /* save complete stack frame */
16261 .pushsection .kprobes.text, "ax"
16262 @@ -383,9 +669,10 @@ ENTRY(save_paranoid)
16263 js 1f /* negative -> in kernel */
16264 SWAPGS
16265 xorl %ebx,%ebx
16266 -1: ret
16267 +1: pax_force_retaddr_bts
16268 + ret
16269 CFI_ENDPROC
16270 -END(save_paranoid)
16271 +ENDPROC(save_paranoid)
16272 .popsection
16273
16274 /*
16275 @@ -409,7 +696,7 @@ ENTRY(ret_from_fork)
16276
16277 RESTORE_REST
16278
16279 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16280 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16281 je int_ret_from_sys_call
16282
16283 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
16284 @@ -419,7 +706,7 @@ ENTRY(ret_from_fork)
16285 jmp ret_from_sys_call # go to the SYSRET fastpath
16286
16287 CFI_ENDPROC
16288 -END(ret_from_fork)
16289 +ENDPROC(ret_from_fork)
16290
16291 /*
16292 * System call entry. Upto 6 arguments in registers are supported.
16293 @@ -455,7 +742,7 @@ END(ret_from_fork)
16294 ENTRY(system_call)
16295 CFI_STARTPROC simple
16296 CFI_SIGNAL_FRAME
16297 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16298 + CFI_DEF_CFA rsp,0
16299 CFI_REGISTER rip,rcx
16300 /*CFI_REGISTER rflags,r11*/
16301 SWAPGS_UNSAFE_STACK
16302 @@ -468,12 +755,13 @@ ENTRY(system_call_after_swapgs)
16303
16304 movq %rsp,PER_CPU_VAR(old_rsp)
16305 movq PER_CPU_VAR(kernel_stack),%rsp
16306 + SAVE_ARGS 8*6,1
16307 + pax_enter_kernel_user
16308 /*
16309 * No need to follow this irqs off/on section - it's straight
16310 * and short:
16311 */
16312 ENABLE_INTERRUPTS(CLBR_NONE)
16313 - SAVE_ARGS 8,1
16314 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16315 movq %rcx,RIP-ARGOFFSET(%rsp)
16316 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16317 @@ -483,7 +771,7 @@ ENTRY(system_call_after_swapgs)
16318 system_call_fastpath:
16319 cmpq $__NR_syscall_max,%rax
16320 ja badsys
16321 - movq %r10,%rcx
16322 + movq R10-ARGOFFSET(%rsp),%rcx
16323 call *sys_call_table(,%rax,8) # XXX: rip relative
16324 movq %rax,RAX-ARGOFFSET(%rsp)
16325 /*
16326 @@ -502,6 +790,8 @@ sysret_check:
16327 andl %edi,%edx
16328 jnz sysret_careful
16329 CFI_REMEMBER_STATE
16330 + pax_exit_kernel_user
16331 + pax_erase_kstack
16332 /*
16333 * sysretq will re-enable interrupts:
16334 */
16335 @@ -555,14 +845,18 @@ badsys:
16336 * jump back to the normal fast path.
16337 */
16338 auditsys:
16339 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
16340 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16341 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16342 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16343 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16344 movq %rax,%rsi /* 2nd arg: syscall number */
16345 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16346 call audit_syscall_entry
16347 +
16348 + pax_erase_kstack
16349 +
16350 LOAD_ARGS 0 /* reload call-clobbered registers */
16351 + pax_set_fptr_mask
16352 jmp system_call_fastpath
16353
16354 /*
16355 @@ -592,16 +886,20 @@ tracesys:
16356 FIXUP_TOP_OF_STACK %rdi
16357 movq %rsp,%rdi
16358 call syscall_trace_enter
16359 +
16360 + pax_erase_kstack
16361 +
16362 /*
16363 * Reload arg registers from stack in case ptrace changed them.
16364 * We don't reload %rax because syscall_trace_enter() returned
16365 * the value it wants us to use in the table lookup.
16366 */
16367 LOAD_ARGS ARGOFFSET, 1
16368 + pax_set_fptr_mask
16369 RESTORE_REST
16370 cmpq $__NR_syscall_max,%rax
16371 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16372 - movq %r10,%rcx /* fixup for C */
16373 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16374 call *sys_call_table(,%rax,8)
16375 movq %rax,RAX-ARGOFFSET(%rsp)
16376 /* Use IRET because user could have changed frame */
16377 @@ -613,7 +911,7 @@ tracesys:
16378 GLOBAL(int_ret_from_sys_call)
16379 DISABLE_INTERRUPTS(CLBR_NONE)
16380 TRACE_IRQS_OFF
16381 - testl $3,CS-ARGOFFSET(%rsp)
16382 + testb $3,CS-ARGOFFSET(%rsp)
16383 je retint_restore_args
16384 movl $_TIF_ALLWORK_MASK,%edi
16385 /* edi: mask to check */
16386 @@ -624,6 +922,7 @@ GLOBAL(int_with_check)
16387 andl %edi,%edx
16388 jnz int_careful
16389 andl $~TS_COMPAT,TI_status(%rcx)
16390 + pax_erase_kstack
16391 jmp retint_swapgs
16392
16393 /* Either reschedule or signal or syscall exit tracking needed. */
16394 @@ -674,7 +973,7 @@ int_restore_rest:
16395 TRACE_IRQS_OFF
16396 jmp int_with_check
16397 CFI_ENDPROC
16398 -END(system_call)
16399 +ENDPROC(system_call)
16400
16401 /*
16402 * Certain special system calls that need to save a complete full stack frame.
16403 @@ -690,7 +989,7 @@ ENTRY(\label)
16404 call \func
16405 jmp ptregscall_common
16406 CFI_ENDPROC
16407 -END(\label)
16408 +ENDPROC(\label)
16409 .endm
16410
16411 PTREGSCALL stub_clone, sys_clone, %r8
16412 @@ -708,9 +1007,10 @@ ENTRY(ptregscall_common)
16413 movq_cfi_restore R12+8, r12
16414 movq_cfi_restore RBP+8, rbp
16415 movq_cfi_restore RBX+8, rbx
16416 + pax_force_retaddr
16417 ret $REST_SKIP /* pop extended registers */
16418 CFI_ENDPROC
16419 -END(ptregscall_common)
16420 +ENDPROC(ptregscall_common)
16421
16422 ENTRY(stub_execve)
16423 CFI_STARTPROC
16424 @@ -726,7 +1026,7 @@ ENTRY(stub_execve)
16425 RESTORE_REST
16426 jmp int_ret_from_sys_call
16427 CFI_ENDPROC
16428 -END(stub_execve)
16429 +ENDPROC(stub_execve)
16430
16431 /*
16432 * sigreturn is special because it needs to restore all registers on return.
16433 @@ -744,7 +1044,7 @@ ENTRY(stub_rt_sigreturn)
16434 RESTORE_REST
16435 jmp int_ret_from_sys_call
16436 CFI_ENDPROC
16437 -END(stub_rt_sigreturn)
16438 +ENDPROC(stub_rt_sigreturn)
16439
16440 /*
16441 * Build the entry stubs and pointer table with some assembler magic.
16442 @@ -780,7 +1080,7 @@ vector=vector+1
16443 2: jmp common_interrupt
16444 .endr
16445 CFI_ENDPROC
16446 -END(irq_entries_start)
16447 +ENDPROC(irq_entries_start)
16448
16449 .previous
16450 END(interrupt)
16451 @@ -800,6 +1100,16 @@ END(interrupt)
16452 CFI_ADJUST_CFA_OFFSET 10*8
16453 call save_args
16454 PARTIAL_FRAME 0
16455 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16456 + testb $3, CS(%rdi)
16457 + jnz 1f
16458 + pax_enter_kernel
16459 + jmp 2f
16460 +1: pax_enter_kernel_user
16461 +2:
16462 +#else
16463 + pax_enter_kernel
16464 +#endif
16465 call \func
16466 .endm
16467
16468 @@ -822,7 +1132,7 @@ ret_from_intr:
16469 CFI_ADJUST_CFA_OFFSET -8
16470 exit_intr:
16471 GET_THREAD_INFO(%rcx)
16472 - testl $3,CS-ARGOFFSET(%rsp)
16473 + testb $3,CS-ARGOFFSET(%rsp)
16474 je retint_kernel
16475
16476 /* Interrupt came from user space */
16477 @@ -844,12 +1154,15 @@ retint_swapgs: /* return to user-space */
16478 * The iretq could re-enable interrupts:
16479 */
16480 DISABLE_INTERRUPTS(CLBR_ANY)
16481 + pax_exit_kernel_user
16482 TRACE_IRQS_IRETQ
16483 SWAPGS
16484 jmp restore_args
16485
16486 retint_restore_args: /* return to kernel space */
16487 DISABLE_INTERRUPTS(CLBR_ANY)
16488 + pax_exit_kernel
16489 + pax_force_retaddr RIP-ARGOFFSET
16490 /*
16491 * The iretq could re-enable interrupts:
16492 */
16493 @@ -940,7 +1253,7 @@ ENTRY(retint_kernel)
16494 #endif
16495
16496 CFI_ENDPROC
16497 -END(common_interrupt)
16498 +ENDPROC(common_interrupt)
16499
16500 /*
16501 * APIC interrupts.
16502 @@ -953,7 +1266,7 @@ ENTRY(\sym)
16503 interrupt \do_sym
16504 jmp ret_from_intr
16505 CFI_ENDPROC
16506 -END(\sym)
16507 +ENDPROC(\sym)
16508 .endm
16509
16510 #ifdef CONFIG_SMP
16511 @@ -1032,12 +1345,22 @@ ENTRY(\sym)
16512 CFI_ADJUST_CFA_OFFSET 15*8
16513 call error_entry
16514 DEFAULT_FRAME 0
16515 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16516 + testb $3, CS(%rsp)
16517 + jnz 1f
16518 + pax_enter_kernel
16519 + jmp 2f
16520 +1: pax_enter_kernel_user
16521 +2:
16522 +#else
16523 + pax_enter_kernel
16524 +#endif
16525 movq %rsp,%rdi /* pt_regs pointer */
16526 xorl %esi,%esi /* no error code */
16527 call \do_sym
16528 jmp error_exit /* %ebx: no swapgs flag */
16529 CFI_ENDPROC
16530 -END(\sym)
16531 +ENDPROC(\sym)
16532 .endm
16533
16534 .macro paranoidzeroentry sym do_sym
16535 @@ -1049,12 +1372,22 @@ ENTRY(\sym)
16536 subq $15*8, %rsp
16537 call save_paranoid
16538 TRACE_IRQS_OFF
16539 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16540 + testb $3, CS(%rsp)
16541 + jnz 1f
16542 + pax_enter_kernel
16543 + jmp 2f
16544 +1: pax_enter_kernel_user
16545 +2:
16546 +#else
16547 + pax_enter_kernel
16548 +#endif
16549 movq %rsp,%rdi /* pt_regs pointer */
16550 xorl %esi,%esi /* no error code */
16551 call \do_sym
16552 jmp paranoid_exit /* %ebx: no swapgs flag */
16553 CFI_ENDPROC
16554 -END(\sym)
16555 +ENDPROC(\sym)
16556 .endm
16557
16558 .macro paranoidzeroentry_ist sym do_sym ist
16559 @@ -1066,15 +1399,30 @@ ENTRY(\sym)
16560 subq $15*8, %rsp
16561 call save_paranoid
16562 TRACE_IRQS_OFF
16563 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16564 + testb $3, CS(%rsp)
16565 + jnz 1f
16566 + pax_enter_kernel
16567 + jmp 2f
16568 +1: pax_enter_kernel_user
16569 +2:
16570 +#else
16571 + pax_enter_kernel
16572 +#endif
16573 movq %rsp,%rdi /* pt_regs pointer */
16574 xorl %esi,%esi /* no error code */
16575 - PER_CPU(init_tss, %rbp)
16576 +#ifdef CONFIG_SMP
16577 + imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
16578 + lea init_tss(%rbp), %rbp
16579 +#else
16580 + lea init_tss(%rip), %rbp
16581 +#endif
16582 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16583 call \do_sym
16584 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16585 jmp paranoid_exit /* %ebx: no swapgs flag */
16586 CFI_ENDPROC
16587 -END(\sym)
16588 +ENDPROC(\sym)
16589 .endm
16590
16591 .macro errorentry sym do_sym
16592 @@ -1085,13 +1433,23 @@ ENTRY(\sym)
16593 CFI_ADJUST_CFA_OFFSET 15*8
16594 call error_entry
16595 DEFAULT_FRAME 0
16596 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16597 + testb $3, CS(%rsp)
16598 + jnz 1f
16599 + pax_enter_kernel
16600 + jmp 2f
16601 +1: pax_enter_kernel_user
16602 +2:
16603 +#else
16604 + pax_enter_kernel
16605 +#endif
16606 movq %rsp,%rdi /* pt_regs pointer */
16607 movq ORIG_RAX(%rsp),%rsi /* get error code */
16608 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16609 call \do_sym
16610 jmp error_exit /* %ebx: no swapgs flag */
16611 CFI_ENDPROC
16612 -END(\sym)
16613 +ENDPROC(\sym)
16614 .endm
16615
16616 /* error code is on the stack already */
16617 @@ -1104,13 +1462,23 @@ ENTRY(\sym)
16618 call save_paranoid
16619 DEFAULT_FRAME 0
16620 TRACE_IRQS_OFF
16621 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16622 + testb $3, CS(%rsp)
16623 + jnz 1f
16624 + pax_enter_kernel
16625 + jmp 2f
16626 +1: pax_enter_kernel_user
16627 +2:
16628 +#else
16629 + pax_enter_kernel
16630 +#endif
16631 movq %rsp,%rdi /* pt_regs pointer */
16632 movq ORIG_RAX(%rsp),%rsi /* get error code */
16633 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16634 call \do_sym
16635 jmp paranoid_exit /* %ebx: no swapgs flag */
16636 CFI_ENDPROC
16637 -END(\sym)
16638 +ENDPROC(\sym)
16639 .endm
16640
16641 zeroentry divide_error do_divide_error
16642 @@ -1141,9 +1509,10 @@ gs_change:
16643 SWAPGS
16644 popf
16645 CFI_ADJUST_CFA_OFFSET -8
16646 + pax_force_retaddr
16647 ret
16648 CFI_ENDPROC
16649 -END(native_load_gs_index)
16650 +ENDPROC(native_load_gs_index)
16651
16652 .section __ex_table,"a"
16653 .align 8
16654 @@ -1193,11 +1562,12 @@ ENTRY(kernel_thread)
16655 * of hacks for example to fork off the per-CPU idle tasks.
16656 * [Hopefully no generic code relies on the reschedule -AK]
16657 */
16658 - RESTORE_ALL
16659 + RESTORE_REST
16660 UNFAKE_STACK_FRAME
16661 + pax_force_retaddr
16662 ret
16663 CFI_ENDPROC
16664 -END(kernel_thread)
16665 +ENDPROC(kernel_thread)
16666
16667 ENTRY(child_rip)
16668 pushq $0 # fake return address
16669 @@ -1208,13 +1578,14 @@ ENTRY(child_rip)
16670 */
16671 movq %rdi, %rax
16672 movq %rsi, %rdi
16673 + pax_force_fptr %rax
16674 call *%rax
16675 # exit
16676 mov %eax, %edi
16677 call do_exit
16678 ud2 # padding for call trace
16679 CFI_ENDPROC
16680 -END(child_rip)
16681 +ENDPROC(child_rip)
16682
16683 /*
16684 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16685 @@ -1241,11 +1612,11 @@ ENTRY(kernel_execve)
16686 RESTORE_REST
16687 testq %rax,%rax
16688 je int_ret_from_sys_call
16689 - RESTORE_ARGS
16690 UNFAKE_STACK_FRAME
16691 + pax_force_retaddr
16692 ret
16693 CFI_ENDPROC
16694 -END(kernel_execve)
16695 +ENDPROC(kernel_execve)
16696
16697 /* Call softirq on interrupt stack. Interrupts are off. */
16698 ENTRY(call_softirq)
16699 @@ -1263,9 +1634,10 @@ ENTRY(call_softirq)
16700 CFI_DEF_CFA_REGISTER rsp
16701 CFI_ADJUST_CFA_OFFSET -8
16702 decl PER_CPU_VAR(irq_count)
16703 + pax_force_retaddr
16704 ret
16705 CFI_ENDPROC
16706 -END(call_softirq)
16707 +ENDPROC(call_softirq)
16708
16709 #ifdef CONFIG_XEN
16710 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16711 @@ -1303,7 +1675,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16712 decl PER_CPU_VAR(irq_count)
16713 jmp error_exit
16714 CFI_ENDPROC
16715 -END(xen_do_hypervisor_callback)
16716 +ENDPROC(xen_do_hypervisor_callback)
16717
16718 /*
16719 * Hypervisor uses this for application faults while it executes.
16720 @@ -1362,7 +1734,7 @@ ENTRY(xen_failsafe_callback)
16721 SAVE_ALL
16722 jmp error_exit
16723 CFI_ENDPROC
16724 -END(xen_failsafe_callback)
16725 +ENDPROC(xen_failsafe_callback)
16726
16727 #endif /* CONFIG_XEN */
16728
16729 @@ -1405,16 +1777,31 @@ ENTRY(paranoid_exit)
16730 TRACE_IRQS_OFF
16731 testl %ebx,%ebx /* swapgs needed? */
16732 jnz paranoid_restore
16733 - testl $3,CS(%rsp)
16734 + testb $3,CS(%rsp)
16735 jnz paranoid_userspace
16736 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16737 + pax_exit_kernel
16738 + TRACE_IRQS_IRETQ 0
16739 + SWAPGS_UNSAFE_STACK
16740 + RESTORE_ALL 8
16741 + pax_force_retaddr_bts
16742 + jmp irq_return
16743 +#endif
16744 paranoid_swapgs:
16745 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16746 + pax_exit_kernel_user
16747 +#else
16748 + pax_exit_kernel
16749 +#endif
16750 TRACE_IRQS_IRETQ 0
16751 SWAPGS_UNSAFE_STACK
16752 RESTORE_ALL 8
16753 jmp irq_return
16754 paranoid_restore:
16755 + pax_exit_kernel
16756 TRACE_IRQS_IRETQ 0
16757 RESTORE_ALL 8
16758 + pax_force_retaddr_bts
16759 jmp irq_return
16760 paranoid_userspace:
16761 GET_THREAD_INFO(%rcx)
16762 @@ -1443,7 +1830,7 @@ paranoid_schedule:
16763 TRACE_IRQS_OFF
16764 jmp paranoid_userspace
16765 CFI_ENDPROC
16766 -END(paranoid_exit)
16767 +ENDPROC(paranoid_exit)
16768
16769 /*
16770 * Exception entry point. This expects an error code/orig_rax on the stack.
16771 @@ -1470,12 +1857,13 @@ ENTRY(error_entry)
16772 movq_cfi r14, R14+8
16773 movq_cfi r15, R15+8
16774 xorl %ebx,%ebx
16775 - testl $3,CS+8(%rsp)
16776 + testb $3,CS+8(%rsp)
16777 je error_kernelspace
16778 error_swapgs:
16779 SWAPGS
16780 error_sti:
16781 TRACE_IRQS_OFF
16782 + pax_force_retaddr_bts
16783 ret
16784 CFI_ENDPROC
16785
16786 @@ -1497,7 +1885,7 @@ error_kernelspace:
16787 cmpq $gs_change,RIP+8(%rsp)
16788 je error_swapgs
16789 jmp error_sti
16790 -END(error_entry)
16791 +ENDPROC(error_entry)
16792
16793
16794 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16795 @@ -1517,7 +1905,7 @@ ENTRY(error_exit)
16796 jnz retint_careful
16797 jmp retint_swapgs
16798 CFI_ENDPROC
16799 -END(error_exit)
16800 +ENDPROC(error_exit)
16801
16802
16803 /* runs on exception stack */
16804 @@ -1529,6 +1917,16 @@ ENTRY(nmi)
16805 CFI_ADJUST_CFA_OFFSET 15*8
16806 call save_paranoid
16807 DEFAULT_FRAME 0
16808 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16809 + testb $3, CS(%rsp)
16810 + jnz 1f
16811 + pax_enter_kernel
16812 + jmp 2f
16813 +1: pax_enter_kernel_user
16814 +2:
16815 +#else
16816 + pax_enter_kernel
16817 +#endif
16818 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16819 movq %rsp,%rdi
16820 movq $-1,%rsi
16821 @@ -1539,12 +1937,28 @@ ENTRY(nmi)
16822 DISABLE_INTERRUPTS(CLBR_NONE)
16823 testl %ebx,%ebx /* swapgs needed? */
16824 jnz nmi_restore
16825 - testl $3,CS(%rsp)
16826 + testb $3,CS(%rsp)
16827 jnz nmi_userspace
16828 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16829 + pax_exit_kernel
16830 + SWAPGS_UNSAFE_STACK
16831 + RESTORE_ALL 8
16832 + pax_force_retaddr_bts
16833 + jmp irq_return
16834 +#endif
16835 nmi_swapgs:
16836 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16837 + pax_exit_kernel_user
16838 +#else
16839 + pax_exit_kernel
16840 +#endif
16841 SWAPGS_UNSAFE_STACK
16842 + RESTORE_ALL 8
16843 + jmp irq_return
16844 nmi_restore:
16845 + pax_exit_kernel
16846 RESTORE_ALL 8
16847 + pax_force_retaddr_bts
16848 jmp irq_return
16849 nmi_userspace:
16850 GET_THREAD_INFO(%rcx)
16851 @@ -1573,14 +1987,14 @@ nmi_schedule:
16852 jmp paranoid_exit
16853 CFI_ENDPROC
16854 #endif
16855 -END(nmi)
16856 +ENDPROC(nmi)
16857
16858 ENTRY(ignore_sysret)
16859 CFI_STARTPROC
16860 mov $-ENOSYS,%eax
16861 sysret
16862 CFI_ENDPROC
16863 -END(ignore_sysret)
16864 +ENDPROC(ignore_sysret)
16865
16866 /*
16867 * End of kprobes section
16868 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16869 index 9dbb527..7b3615a 100644
16870 --- a/arch/x86/kernel/ftrace.c
16871 +++ b/arch/x86/kernel/ftrace.c
16872 @@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16873 static void *mod_code_newcode; /* holds the text to write to the IP */
16874
16875 static unsigned nmi_wait_count;
16876 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
16877 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16878
16879 int ftrace_arch_read_dyn_info(char *buf, int size)
16880 {
16881 @@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16882
16883 r = snprintf(buf, size, "%u %u",
16884 nmi_wait_count,
16885 - atomic_read(&nmi_update_count));
16886 + atomic_read_unchecked(&nmi_update_count));
16887 return r;
16888 }
16889
16890 @@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
16891 {
16892 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16893 smp_rmb();
16894 + pax_open_kernel();
16895 ftrace_mod_code();
16896 - atomic_inc(&nmi_update_count);
16897 + pax_close_kernel();
16898 + atomic_inc_unchecked(&nmi_update_count);
16899 }
16900 /* Must have previous changes seen before executions */
16901 smp_mb();
16902 @@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
16903
16904
16905
16906 -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
16907 +static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
16908
16909 static unsigned char *ftrace_nop_replace(void)
16910 {
16911 @@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
16912 {
16913 unsigned char replaced[MCOUNT_INSN_SIZE];
16914
16915 + ip = ktla_ktva(ip);
16916 +
16917 /*
16918 * Note: Due to modules and __init, code can
16919 * disappear and change, we need to protect against faulting
16920 @@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16921 unsigned char old[MCOUNT_INSN_SIZE], *new;
16922 int ret;
16923
16924 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16925 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16926 new = ftrace_call_replace(ip, (unsigned long)func);
16927 ret = ftrace_modify_code(ip, old, new);
16928
16929 @@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *data)
16930 switch (faulted) {
16931 case 0:
16932 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
16933 - memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
16934 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
16935 break;
16936 case 1:
16937 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
16938 - memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
16939 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
16940 break;
16941 case 2:
16942 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
16943 - memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
16944 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
16945 break;
16946 }
16947
16948 @@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16949 {
16950 unsigned char code[MCOUNT_INSN_SIZE];
16951
16952 + ip = ktla_ktva(ip);
16953 +
16954 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16955 return -EFAULT;
16956
16957 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16958 index 4f8e250..df24706 100644
16959 --- a/arch/x86/kernel/head32.c
16960 +++ b/arch/x86/kernel/head32.c
16961 @@ -16,6 +16,7 @@
16962 #include <asm/apic.h>
16963 #include <asm/io_apic.h>
16964 #include <asm/bios_ebda.h>
16965 +#include <asm/boot.h>
16966
16967 static void __init i386_default_early_setup(void)
16968 {
16969 @@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
16970 {
16971 reserve_trampoline_memory();
16972
16973 - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16974 + reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16975
16976 #ifdef CONFIG_BLK_DEV_INITRD
16977 /* Reserve INITRD */
16978 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16979 index 34c3308..6fc4e76 100644
16980 --- a/arch/x86/kernel/head_32.S
16981 +++ b/arch/x86/kernel/head_32.S
16982 @@ -19,10 +19,17 @@
16983 #include <asm/setup.h>
16984 #include <asm/processor-flags.h>
16985 #include <asm/percpu.h>
16986 +#include <asm/msr-index.h>
16987
16988 /* Physical address */
16989 #define pa(X) ((X) - __PAGE_OFFSET)
16990
16991 +#ifdef CONFIG_PAX_KERNEXEC
16992 +#define ta(X) (X)
16993 +#else
16994 +#define ta(X) ((X) - __PAGE_OFFSET)
16995 +#endif
16996 +
16997 /*
16998 * References to members of the new_cpu_data structure.
16999 */
17000 @@ -52,11 +59,7 @@
17001 * and small than max_low_pfn, otherwise will waste some page table entries
17002 */
17003
17004 -#if PTRS_PER_PMD > 1
17005 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
17006 -#else
17007 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
17008 -#endif
17009 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
17010
17011 /* Enough space to fit pagetables for the low memory linear map */
17012 MAPPING_BEYOND_END = \
17013 @@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
17014 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
17015
17016 /*
17017 + * Real beginning of normal "text" segment
17018 + */
17019 +ENTRY(stext)
17020 +ENTRY(_stext)
17021 +
17022 +/*
17023 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
17024 * %esi points to the real-mode code as a 32-bit pointer.
17025 * CS and DS must be 4 GB flat segments, but we don't depend on
17026 @@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
17027 * can.
17028 */
17029 __HEAD
17030 +
17031 +#ifdef CONFIG_PAX_KERNEXEC
17032 + jmp startup_32
17033 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
17034 +.fill PAGE_SIZE-5,1,0xcc
17035 +#endif
17036 +
17037 ENTRY(startup_32)
17038 + movl pa(stack_start),%ecx
17039 +
17040 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
17041 us to not reload segments */
17042 testb $(1<<6), BP_loadflags(%esi)
17043 @@ -95,7 +113,60 @@ ENTRY(startup_32)
17044 movl %eax,%es
17045 movl %eax,%fs
17046 movl %eax,%gs
17047 + movl %eax,%ss
17048 2:
17049 + leal -__PAGE_OFFSET(%ecx),%esp
17050 +
17051 +#ifdef CONFIG_SMP
17052 + movl $pa(cpu_gdt_table),%edi
17053 + movl $__per_cpu_load,%eax
17054 + movw %ax,__KERNEL_PERCPU + 2(%edi)
17055 + rorl $16,%eax
17056 + movb %al,__KERNEL_PERCPU + 4(%edi)
17057 + movb %ah,__KERNEL_PERCPU + 7(%edi)
17058 + movl $__per_cpu_end - 1,%eax
17059 + subl $__per_cpu_start,%eax
17060 + movw %ax,__KERNEL_PERCPU + 0(%edi)
17061 +#endif
17062 +
17063 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17064 + movl $NR_CPUS,%ecx
17065 + movl $pa(cpu_gdt_table),%edi
17066 +1:
17067 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
17068 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
17069 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
17070 + addl $PAGE_SIZE_asm,%edi
17071 + loop 1b
17072 +#endif
17073 +
17074 +#ifdef CONFIG_PAX_KERNEXEC
17075 + movl $pa(boot_gdt),%edi
17076 + movl $__LOAD_PHYSICAL_ADDR,%eax
17077 + movw %ax,__BOOT_CS + 2(%edi)
17078 + rorl $16,%eax
17079 + movb %al,__BOOT_CS + 4(%edi)
17080 + movb %ah,__BOOT_CS + 7(%edi)
17081 + rorl $16,%eax
17082 +
17083 + ljmp $(__BOOT_CS),$1f
17084 +1:
17085 +
17086 + movl $NR_CPUS,%ecx
17087 + movl $pa(cpu_gdt_table),%edi
17088 + addl $__PAGE_OFFSET,%eax
17089 +1:
17090 + movw %ax,__KERNEL_CS + 2(%edi)
17091 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
17092 + rorl $16,%eax
17093 + movb %al,__KERNEL_CS + 4(%edi)
17094 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
17095 + movb %ah,__KERNEL_CS + 7(%edi)
17096 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
17097 + rorl $16,%eax
17098 + addl $PAGE_SIZE_asm,%edi
17099 + loop 1b
17100 +#endif
17101
17102 /*
17103 * Clear BSS first so that there are no surprises...
17104 @@ -140,9 +211,7 @@ ENTRY(startup_32)
17105 cmpl $num_subarch_entries, %eax
17106 jae bad_subarch
17107
17108 - movl pa(subarch_entries)(,%eax,4), %eax
17109 - subl $__PAGE_OFFSET, %eax
17110 - jmp *%eax
17111 + jmp *pa(subarch_entries)(,%eax,4)
17112
17113 bad_subarch:
17114 WEAK(lguest_entry)
17115 @@ -154,10 +223,10 @@ WEAK(xen_entry)
17116 __INITDATA
17117
17118 subarch_entries:
17119 - .long default_entry /* normal x86/PC */
17120 - .long lguest_entry /* lguest hypervisor */
17121 - .long xen_entry /* Xen hypervisor */
17122 - .long default_entry /* Moorestown MID */
17123 + .long ta(default_entry) /* normal x86/PC */
17124 + .long ta(lguest_entry) /* lguest hypervisor */
17125 + .long ta(xen_entry) /* Xen hypervisor */
17126 + .long ta(default_entry) /* Moorestown MID */
17127 num_subarch_entries = (. - subarch_entries) / 4
17128 .previous
17129 #endif /* CONFIG_PARAVIRT */
17130 @@ -218,8 +287,11 @@ default_entry:
17131 movl %eax, pa(max_pfn_mapped)
17132
17133 /* Do early initialization of the fixmap area */
17134 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
17135 - movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
17136 +#ifdef CONFIG_COMPAT_VDSO
17137 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
17138 +#else
17139 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
17140 +#endif
17141 #else /* Not PAE */
17142
17143 page_pde_offset = (__PAGE_OFFSET >> 20);
17144 @@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
17145 movl %eax, pa(max_pfn_mapped)
17146
17147 /* Do early initialization of the fixmap area */
17148 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
17149 - movl %eax,pa(swapper_pg_dir+0xffc)
17150 +#ifdef CONFIG_COMPAT_VDSO
17151 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
17152 +#else
17153 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
17154 +#endif
17155 #endif
17156 jmp 3f
17157 /*
17158 @@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
17159 movl %eax,%es
17160 movl %eax,%fs
17161 movl %eax,%gs
17162 + movl pa(stack_start),%ecx
17163 + movl %eax,%ss
17164 + leal -__PAGE_OFFSET(%ecx),%esp
17165 #endif /* CONFIG_SMP */
17166 3:
17167
17168 @@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
17169 orl %edx,%eax
17170 movl %eax,%cr4
17171
17172 +#ifdef CONFIG_X86_PAE
17173 btl $5, %eax # check if PAE is enabled
17174 jnc 6f
17175
17176 @@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
17177 cpuid
17178 cmpl $0x80000000, %eax
17179 jbe 6f
17180 +
17181 + /* Clear bogus XD_DISABLE bits */
17182 + call verify_cpu
17183 +
17184 mov $0x80000001, %eax
17185 cpuid
17186 /* Execute Disable bit supported? */
17187 @@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
17188 jnc 6f
17189
17190 /* Setup EFER (Extended Feature Enable Register) */
17191 - movl $0xc0000080, %ecx
17192 + movl $MSR_EFER, %ecx
17193 rdmsr
17194
17195 btsl $11, %eax
17196 /* Make changes effective */
17197 wrmsr
17198
17199 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
17200 + movl $1,pa(nx_enabled)
17201 +#endif
17202 +
17203 6:
17204
17205 /*
17206 @@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
17207 movl %eax,%cr0 /* ..and set paging (PG) bit */
17208 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
17209 1:
17210 - /* Set up the stack pointer */
17211 - lss stack_start,%esp
17212 + /* Shift the stack pointer to a virtual address */
17213 + addl $__PAGE_OFFSET, %esp
17214
17215 /*
17216 * Initialize eflags. Some BIOS's leave bits like NT set. This would
17217 @@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
17218
17219 #ifdef CONFIG_SMP
17220 cmpb $0, ready
17221 - jz 1f /* Initial CPU cleans BSS */
17222 - jmp checkCPUtype
17223 -1:
17224 + jnz checkCPUtype
17225 #endif /* CONFIG_SMP */
17226
17227 /*
17228 @@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
17229 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
17230 movl %eax,%ss # after changing gdt.
17231
17232 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
17233 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
17234 movl %eax,%ds
17235 movl %eax,%es
17236
17237 @@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
17238 */
17239 cmpb $0,ready
17240 jne 1f
17241 - movl $per_cpu__gdt_page,%eax
17242 + movl $cpu_gdt_table,%eax
17243 movl $per_cpu__stack_canary,%ecx
17244 +#ifdef CONFIG_SMP
17245 + addl $__per_cpu_load,%ecx
17246 +#endif
17247 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
17248 shrl $16, %ecx
17249 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
17250 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
17251 1:
17252 -#endif
17253 movl $(__KERNEL_STACK_CANARY),%eax
17254 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
17255 + movl $(__USER_DS),%eax
17256 +#else
17257 + xorl %eax,%eax
17258 +#endif
17259 movl %eax,%gs
17260
17261 xorl %eax,%eax # Clear LDT
17262 @@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
17263
17264 cld # gcc2 wants the direction flag cleared at all times
17265 pushl $0 # fake return address for unwinder
17266 -#ifdef CONFIG_SMP
17267 - movb ready, %cl
17268 movb $1, ready
17269 - cmpb $0,%cl # the first CPU calls start_kernel
17270 - je 1f
17271 - movl (stack_start), %esp
17272 -1:
17273 -#endif /* CONFIG_SMP */
17274 jmp *(initial_code)
17275
17276 /*
17277 @@ -546,22 +631,22 @@ early_page_fault:
17278 jmp early_fault
17279
17280 early_fault:
17281 - cld
17282 #ifdef CONFIG_PRINTK
17283 + cmpl $1,%ss:early_recursion_flag
17284 + je hlt_loop
17285 + incl %ss:early_recursion_flag
17286 + cld
17287 pusha
17288 movl $(__KERNEL_DS),%eax
17289 movl %eax,%ds
17290 movl %eax,%es
17291 - cmpl $2,early_recursion_flag
17292 - je hlt_loop
17293 - incl early_recursion_flag
17294 movl %cr2,%eax
17295 pushl %eax
17296 pushl %edx /* trapno */
17297 pushl $fault_msg
17298 call printk
17299 +; call dump_stack
17300 #endif
17301 - call dump_stack
17302 hlt_loop:
17303 hlt
17304 jmp hlt_loop
17305 @@ -569,8 +654,11 @@ hlt_loop:
17306 /* This is the default interrupt "handler" :-) */
17307 ALIGN
17308 ignore_int:
17309 - cld
17310 #ifdef CONFIG_PRINTK
17311 + cmpl $2,%ss:early_recursion_flag
17312 + je hlt_loop
17313 + incl %ss:early_recursion_flag
17314 + cld
17315 pushl %eax
17316 pushl %ecx
17317 pushl %edx
17318 @@ -579,9 +667,6 @@ ignore_int:
17319 movl $(__KERNEL_DS),%eax
17320 movl %eax,%ds
17321 movl %eax,%es
17322 - cmpl $2,early_recursion_flag
17323 - je hlt_loop
17324 - incl early_recursion_flag
17325 pushl 16(%esp)
17326 pushl 24(%esp)
17327 pushl 32(%esp)
17328 @@ -600,6 +685,8 @@ ignore_int:
17329 #endif
17330 iret
17331
17332 +#include "verify_cpu.S"
17333 +
17334 __REFDATA
17335 .align 4
17336 ENTRY(initial_code)
17337 @@ -610,31 +697,47 @@ ENTRY(initial_page_table)
17338 /*
17339 * BSS section
17340 */
17341 -__PAGE_ALIGNED_BSS
17342 - .align PAGE_SIZE_asm
17343 #ifdef CONFIG_X86_PAE
17344 +.section .swapper_pg_pmd,"a",@progbits
17345 swapper_pg_pmd:
17346 .fill 1024*KPMDS,4,0
17347 #else
17348 +.section .swapper_pg_dir,"a",@progbits
17349 ENTRY(swapper_pg_dir)
17350 .fill 1024,4,0
17351 #endif
17352 +.section .swapper_pg_fixmap,"a",@progbits
17353 swapper_pg_fixmap:
17354 .fill 1024,4,0
17355 #ifdef CONFIG_X86_TRAMPOLINE
17356 +.section .trampoline_pg_dir,"a",@progbits
17357 ENTRY(trampoline_pg_dir)
17358 +#ifdef CONFIG_X86_PAE
17359 + .fill 4,8,0
17360 +#else
17361 .fill 1024,4,0
17362 #endif
17363 +#endif
17364 +
17365 +.section .empty_zero_page,"a",@progbits
17366 ENTRY(empty_zero_page)
17367 .fill 4096,1,0
17368
17369 /*
17370 + * The IDT has to be page-aligned to simplify the Pentium
17371 + * F0 0F bug workaround.. We have a special link segment
17372 + * for this.
17373 + */
17374 +.section .idt,"a",@progbits
17375 +ENTRY(idt_table)
17376 + .fill 256,8,0
17377 +
17378 +/*
17379 * This starts the data section.
17380 */
17381 #ifdef CONFIG_X86_PAE
17382 -__PAGE_ALIGNED_DATA
17383 - /* Page-aligned for the benefit of paravirt? */
17384 - .align PAGE_SIZE_asm
17385 +.section .swapper_pg_dir,"a",@progbits
17386 +
17387 ENTRY(swapper_pg_dir)
17388 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17389 # if KPMDS == 3
17390 @@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
17391 # error "Kernel PMDs should be 1, 2 or 3"
17392 # endif
17393 .align PAGE_SIZE_asm /* needs to be page-sized too */
17394 +
17395 +#ifdef CONFIG_PAX_PER_CPU_PGD
17396 +ENTRY(cpu_pgd)
17397 + .rept NR_CPUS
17398 + .fill 4,8,0
17399 + .endr
17400 +#endif
17401 +
17402 #endif
17403
17404 .data
17405 +.balign 4
17406 ENTRY(stack_start)
17407 - .long init_thread_union+THREAD_SIZE
17408 - .long __BOOT_DS
17409 + .long init_thread_union+THREAD_SIZE-8
17410
17411 ready: .byte 0
17412
17413 +.section .rodata,"a",@progbits
17414 early_recursion_flag:
17415 .long 0
17416
17417 @@ -697,7 +809,7 @@ fault_msg:
17418 .word 0 # 32 bit align gdt_desc.address
17419 boot_gdt_descr:
17420 .word __BOOT_DS+7
17421 - .long boot_gdt - __PAGE_OFFSET
17422 + .long pa(boot_gdt)
17423
17424 .word 0 # 32-bit align idt_desc.address
17425 idt_descr:
17426 @@ -708,7 +820,7 @@ idt_descr:
17427 .word 0 # 32 bit align gdt_desc.address
17428 ENTRY(early_gdt_descr)
17429 .word GDT_ENTRIES*8-1
17430 - .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
17431 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
17432
17433 /*
17434 * The boot_gdt must mirror the equivalent in setup.S and is
17435 @@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
17436 .align L1_CACHE_BYTES
17437 ENTRY(boot_gdt)
17438 .fill GDT_ENTRY_BOOT_CS,8,0
17439 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17440 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17441 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17442 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17443 +
17444 + .align PAGE_SIZE_asm
17445 +ENTRY(cpu_gdt_table)
17446 + .rept NR_CPUS
17447 + .quad 0x0000000000000000 /* NULL descriptor */
17448 + .quad 0x0000000000000000 /* 0x0b reserved */
17449 + .quad 0x0000000000000000 /* 0x13 reserved */
17450 + .quad 0x0000000000000000 /* 0x1b reserved */
17451 +
17452 +#ifdef CONFIG_PAX_KERNEXEC
17453 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17454 +#else
17455 + .quad 0x0000000000000000 /* 0x20 unused */
17456 +#endif
17457 +
17458 + .quad 0x0000000000000000 /* 0x28 unused */
17459 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17460 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17461 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17462 + .quad 0x0000000000000000 /* 0x4b reserved */
17463 + .quad 0x0000000000000000 /* 0x53 reserved */
17464 + .quad 0x0000000000000000 /* 0x5b reserved */
17465 +
17466 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17467 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17468 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17469 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17470 +
17471 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17472 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17473 +
17474 + /*
17475 + * Segments used for calling PnP BIOS have byte granularity.
17476 + * The code segments and data segments have fixed 64k limits,
17477 + * the transfer segment sizes are set at run time.
17478 + */
17479 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
17480 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
17481 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
17482 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
17483 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
17484 +
17485 + /*
17486 + * The APM segments have byte granularity and their bases
17487 + * are set at run time. All have 64k limits.
17488 + */
17489 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17490 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17491 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
17492 +
17493 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17494 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17495 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17496 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17497 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17498 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17499 +
17500 + /* Be sure this is zeroed to avoid false validations in Xen */
17501 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17502 + .endr
17503 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17504 index 780cd92..758b2a6 100644
17505 --- a/arch/x86/kernel/head_64.S
17506 +++ b/arch/x86/kernel/head_64.S
17507 @@ -19,6 +19,8 @@
17508 #include <asm/cache.h>
17509 #include <asm/processor-flags.h>
17510 #include <asm/percpu.h>
17511 +#include <asm/cpufeature.h>
17512 +#include <asm/alternative-asm.h>
17513
17514 #ifdef CONFIG_PARAVIRT
17515 #include <asm/asm-offsets.h>
17516 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17517 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17518 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17519 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17520 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
17521 +L3_VMALLOC_START = pud_index(VMALLOC_START)
17522 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
17523 +L3_VMALLOC_END = pud_index(VMALLOC_END)
17524 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17525 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17526
17527 .text
17528 __HEAD
17529 @@ -85,35 +93,23 @@ startup_64:
17530 */
17531 addq %rbp, init_level4_pgt + 0(%rip)
17532 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17533 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17534 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
17535 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17536 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17537
17538 addq %rbp, level3_ident_pgt + 0(%rip)
17539 +#ifndef CONFIG_XEN
17540 + addq %rbp, level3_ident_pgt + 8(%rip)
17541 +#endif
17542
17543 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17544 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17545 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
17546 +
17547 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17548 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17549
17550 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17551 -
17552 - /* Add an Identity mapping if I am above 1G */
17553 - leaq _text(%rip), %rdi
17554 - andq $PMD_PAGE_MASK, %rdi
17555 -
17556 - movq %rdi, %rax
17557 - shrq $PUD_SHIFT, %rax
17558 - andq $(PTRS_PER_PUD - 1), %rax
17559 - jz ident_complete
17560 -
17561 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17562 - leaq level3_ident_pgt(%rip), %rbx
17563 - movq %rdx, 0(%rbx, %rax, 8)
17564 -
17565 - movq %rdi, %rax
17566 - shrq $PMD_SHIFT, %rax
17567 - andq $(PTRS_PER_PMD - 1), %rax
17568 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17569 - leaq level2_spare_pgt(%rip), %rbx
17570 - movq %rdx, 0(%rbx, %rax, 8)
17571 -ident_complete:
17572 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17573
17574 /*
17575 * Fixup the kernel text+data virtual addresses. Note that
17576 @@ -161,8 +157,8 @@ ENTRY(secondary_startup_64)
17577 * after the boot processor executes this code.
17578 */
17579
17580 - /* Enable PAE mode and PGE */
17581 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17582 + /* Enable PAE mode and PSE/PGE */
17583 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17584 movq %rax, %cr4
17585
17586 /* Setup early boot stage 4 level pagetables. */
17587 @@ -184,9 +180,16 @@ ENTRY(secondary_startup_64)
17588 movl $MSR_EFER, %ecx
17589 rdmsr
17590 btsl $_EFER_SCE, %eax /* Enable System Call */
17591 - btl $20,%edi /* No Execute supported? */
17592 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17593 jnc 1f
17594 btsl $_EFER_NX, %eax
17595 + leaq init_level4_pgt(%rip), %rdi
17596 +#ifndef CONFIG_EFI
17597 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17598 +#endif
17599 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17600 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
17601 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17602 1: wrmsr /* Make changes effective */
17603
17604 /* Setup cr0 */
17605 @@ -249,6 +252,7 @@ ENTRY(secondary_startup_64)
17606 * jump. In addition we need to ensure %cs is set so we make this
17607 * a far return.
17608 */
17609 + pax_set_fptr_mask
17610 movq initial_code(%rip),%rax
17611 pushq $0 # fake return address to stop unwinder
17612 pushq $__KERNEL_CS # set correct cs
17613 @@ -262,16 +266,16 @@ ENTRY(secondary_startup_64)
17614 .quad x86_64_start_kernel
17615 ENTRY(initial_gs)
17616 .quad INIT_PER_CPU_VAR(irq_stack_union)
17617 - __FINITDATA
17618
17619 ENTRY(stack_start)
17620 .quad init_thread_union+THREAD_SIZE-8
17621 .word 0
17622 + __FINITDATA
17623
17624 bad_address:
17625 jmp bad_address
17626
17627 - .section ".init.text","ax"
17628 + __INIT
17629 #ifdef CONFIG_EARLY_PRINTK
17630 .globl early_idt_handlers
17631 early_idt_handlers:
17632 @@ -316,18 +320,23 @@ ENTRY(early_idt_handler)
17633 #endif /* EARLY_PRINTK */
17634 1: hlt
17635 jmp 1b
17636 + .previous
17637
17638 #ifdef CONFIG_EARLY_PRINTK
17639 + __INITDATA
17640 early_recursion_flag:
17641 .long 0
17642 + .previous
17643
17644 + .section .rodata,"a",@progbits
17645 early_idt_msg:
17646 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17647 early_idt_ripmsg:
17648 .asciz "RIP %s\n"
17649 + .previous
17650 #endif /* CONFIG_EARLY_PRINTK */
17651 - .previous
17652
17653 + .section .rodata,"a",@progbits
17654 #define NEXT_PAGE(name) \
17655 .balign PAGE_SIZE; \
17656 ENTRY(name)
17657 @@ -350,13 +359,41 @@ NEXT_PAGE(init_level4_pgt)
17658 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17659 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17660 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17661 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
17662 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17663 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
17664 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
17665 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17666 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17667 .org init_level4_pgt + L4_START_KERNEL*8, 0
17668 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17669 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17670
17671 +#ifdef CONFIG_PAX_PER_CPU_PGD
17672 +NEXT_PAGE(cpu_pgd)
17673 + .rept NR_CPUS
17674 + .fill 512,8,0
17675 + .endr
17676 +#endif
17677 +
17678 NEXT_PAGE(level3_ident_pgt)
17679 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17680 +#ifdef CONFIG_XEN
17681 .fill 511,8,0
17682 +#else
17683 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17684 + .fill 510,8,0
17685 +#endif
17686 +
17687 +NEXT_PAGE(level3_vmalloc_start_pgt)
17688 + .fill 512,8,0
17689 +
17690 +NEXT_PAGE(level3_vmalloc_end_pgt)
17691 + .fill 512,8,0
17692 +
17693 +NEXT_PAGE(level3_vmemmap_pgt)
17694 + .fill L3_VMEMMAP_START,8,0
17695 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17696
17697 NEXT_PAGE(level3_kernel_pgt)
17698 .fill L3_START_KERNEL,8,0
17699 @@ -364,20 +401,23 @@ NEXT_PAGE(level3_kernel_pgt)
17700 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17701 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17702
17703 +NEXT_PAGE(level2_vmemmap_pgt)
17704 + .fill 512,8,0
17705 +
17706 NEXT_PAGE(level2_fixmap_pgt)
17707 - .fill 506,8,0
17708 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17709 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17710 - .fill 5,8,0
17711 + .fill 507,8,0
17712 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17713 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17714 + .fill 4,8,0
17715
17716 -NEXT_PAGE(level1_fixmap_pgt)
17717 +NEXT_PAGE(level1_vsyscall_pgt)
17718 .fill 512,8,0
17719
17720 -NEXT_PAGE(level2_ident_pgt)
17721 - /* Since I easily can, map the first 1G.
17722 + /* Since I easily can, map the first 2G.
17723 * Don't set NX because code runs from these pages.
17724 */
17725 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17726 +NEXT_PAGE(level2_ident_pgt)
17727 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17728
17729 NEXT_PAGE(level2_kernel_pgt)
17730 /*
17731 @@ -390,33 +430,55 @@ NEXT_PAGE(level2_kernel_pgt)
17732 * If you want to increase this then increase MODULES_VADDR
17733 * too.)
17734 */
17735 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17736 - KERNEL_IMAGE_SIZE/PMD_SIZE)
17737 -
17738 -NEXT_PAGE(level2_spare_pgt)
17739 - .fill 512, 8, 0
17740 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17741
17742 #undef PMDS
17743 #undef NEXT_PAGE
17744
17745 - .data
17746 + .align PAGE_SIZE
17747 +ENTRY(cpu_gdt_table)
17748 + .rept NR_CPUS
17749 + .quad 0x0000000000000000 /* NULL descriptor */
17750 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17751 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
17752 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
17753 + .quad 0x00cffb000000ffff /* __USER32_CS */
17754 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17755 + .quad 0x00affb000000ffff /* __USER_CS */
17756 +
17757 +#ifdef CONFIG_PAX_KERNEXEC
17758 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17759 +#else
17760 + .quad 0x0 /* unused */
17761 +#endif
17762 +
17763 + .quad 0,0 /* TSS */
17764 + .quad 0,0 /* LDT */
17765 + .quad 0,0,0 /* three TLS descriptors */
17766 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
17767 + /* asm/segment.h:GDT_ENTRIES must match this */
17768 +
17769 + /* zero the remaining page */
17770 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17771 + .endr
17772 +
17773 .align 16
17774 .globl early_gdt_descr
17775 early_gdt_descr:
17776 .word GDT_ENTRIES*8-1
17777 early_gdt_descr_base:
17778 - .quad INIT_PER_CPU_VAR(gdt_page)
17779 + .quad cpu_gdt_table
17780
17781 ENTRY(phys_base)
17782 /* This must match the first entry in level2_kernel_pgt */
17783 .quad 0x0000000000000000
17784
17785 #include "../../x86/xen/xen-head.S"
17786 -
17787 - .section .bss, "aw", @nobits
17788 +
17789 + .section .rodata,"a",@progbits
17790 .align L1_CACHE_BYTES
17791 ENTRY(idt_table)
17792 - .skip IDT_ENTRIES * 16
17793 + .fill 512,8,0
17794
17795 __PAGE_ALIGNED_BSS
17796 .align PAGE_SIZE
17797 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17798 index 9c3bd4a..e1d9b35 100644
17799 --- a/arch/x86/kernel/i386_ksyms_32.c
17800 +++ b/arch/x86/kernel/i386_ksyms_32.c
17801 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17802 EXPORT_SYMBOL(cmpxchg8b_emu);
17803 #endif
17804
17805 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
17806 +
17807 /* Networking helper routines. */
17808 EXPORT_SYMBOL(csum_partial_copy_generic);
17809 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17810 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17811
17812 EXPORT_SYMBOL(__get_user_1);
17813 EXPORT_SYMBOL(__get_user_2);
17814 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17815
17816 EXPORT_SYMBOL(csum_partial);
17817 EXPORT_SYMBOL(empty_zero_page);
17818 +
17819 +#ifdef CONFIG_PAX_KERNEXEC
17820 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17821 +#endif
17822 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17823 index df89102..a244320 100644
17824 --- a/arch/x86/kernel/i8259.c
17825 +++ b/arch/x86/kernel/i8259.c
17826 @@ -208,7 +208,7 @@ spurious_8259A_irq:
17827 "spurious 8259A interrupt: IRQ%d.\n", irq);
17828 spurious_irq_mask |= irqmask;
17829 }
17830 - atomic_inc(&irq_err_count);
17831 + atomic_inc_unchecked(&irq_err_count);
17832 /*
17833 * Theoretically we do not have to handle this IRQ,
17834 * but in Linux this does not cause problems and is
17835 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17836 index 3a54dcb..1c22348 100644
17837 --- a/arch/x86/kernel/init_task.c
17838 +++ b/arch/x86/kernel/init_task.c
17839 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17840 * way process stacks are handled. This is done by having a special
17841 * "init_task" linker map entry..
17842 */
17843 -union thread_union init_thread_union __init_task_data =
17844 - { INIT_THREAD_INFO(init_task) };
17845 +union thread_union init_thread_union __init_task_data;
17846
17847 /*
17848 * Initial task structure.
17849 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
17850 * section. Since TSS's are completely CPU-local, we want them
17851 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17852 */
17853 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17854 -
17855 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17856 +EXPORT_SYMBOL(init_tss);
17857 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17858 index 99c4d30..74c84e9 100644
17859 --- a/arch/x86/kernel/ioport.c
17860 +++ b/arch/x86/kernel/ioport.c
17861 @@ -6,6 +6,7 @@
17862 #include <linux/sched.h>
17863 #include <linux/kernel.h>
17864 #include <linux/capability.h>
17865 +#include <linux/security.h>
17866 #include <linux/errno.h>
17867 #include <linux/types.h>
17868 #include <linux/ioport.h>
17869 @@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17870
17871 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17872 return -EINVAL;
17873 +#ifdef CONFIG_GRKERNSEC_IO
17874 + if (turn_on && grsec_disable_privio) {
17875 + gr_handle_ioperm();
17876 + return -EPERM;
17877 + }
17878 +#endif
17879 if (turn_on && !capable(CAP_SYS_RAWIO))
17880 return -EPERM;
17881
17882 @@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17883 * because the ->io_bitmap_max value must match the bitmap
17884 * contents:
17885 */
17886 - tss = &per_cpu(init_tss, get_cpu());
17887 + tss = init_tss + get_cpu();
17888
17889 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
17890
17891 @@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, struct pt_regs *regs)
17892 return -EINVAL;
17893 /* Trying to gain more privileges? */
17894 if (level > old) {
17895 +#ifdef CONFIG_GRKERNSEC_IO
17896 + if (grsec_disable_privio) {
17897 + gr_handle_iopl();
17898 + return -EPERM;
17899 + }
17900 +#endif
17901 if (!capable(CAP_SYS_RAWIO))
17902 return -EPERM;
17903 }
17904 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17905 index 04bbd52..83a07d9 100644
17906 --- a/arch/x86/kernel/irq.c
17907 +++ b/arch/x86/kernel/irq.c
17908 @@ -15,7 +15,7 @@
17909 #include <asm/mce.h>
17910 #include <asm/hw_irq.h>
17911
17912 -atomic_t irq_err_count;
17913 +atomic_unchecked_t irq_err_count;
17914
17915 /* Function pointer for generic interrupt vector handling */
17916 void (*generic_interrupt_extension)(void) = NULL;
17917 @@ -114,9 +114,9 @@ static int show_other_interrupts(struct seq_file *p, int prec)
17918 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17919 seq_printf(p, " Machine check polls\n");
17920 #endif
17921 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17922 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17923 #if defined(CONFIG_X86_IO_APIC)
17924 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17925 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17926 #endif
17927 return 0;
17928 }
17929 @@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17930
17931 u64 arch_irq_stat(void)
17932 {
17933 - u64 sum = atomic_read(&irq_err_count);
17934 + u64 sum = atomic_read_unchecked(&irq_err_count);
17935
17936 #ifdef CONFIG_X86_IO_APIC
17937 - sum += atomic_read(&irq_mis_count);
17938 + sum += atomic_read_unchecked(&irq_mis_count);
17939 #endif
17940 return sum;
17941 }
17942 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17943 index 7d35d0f..03f1d52 100644
17944 --- a/arch/x86/kernel/irq_32.c
17945 +++ b/arch/x86/kernel/irq_32.c
17946 @@ -35,7 +35,7 @@ static int check_stack_overflow(void)
17947 __asm__ __volatile__("andl %%esp,%0" :
17948 "=r" (sp) : "0" (THREAD_SIZE - 1));
17949
17950 - return sp < (sizeof(struct thread_info) + STACK_WARN);
17951 + return sp < STACK_WARN;
17952 }
17953
17954 static void print_stack_overflow(void)
17955 @@ -54,9 +54,9 @@ static inline void print_stack_overflow(void) { }
17956 * per-CPU IRQ handling contexts (thread information and stack)
17957 */
17958 union irq_ctx {
17959 - struct thread_info tinfo;
17960 - u32 stack[THREAD_SIZE/sizeof(u32)];
17961 -} __attribute__((aligned(PAGE_SIZE)));
17962 + unsigned long previous_esp;
17963 + u32 stack[THREAD_SIZE/sizeof(u32)];
17964 +} __attribute__((aligned(THREAD_SIZE)));
17965
17966 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17967 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
17968 @@ -78,10 +78,9 @@ static void call_on_stack(void *func, void *stack)
17969 static inline int
17970 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17971 {
17972 - union irq_ctx *curctx, *irqctx;
17973 + union irq_ctx *irqctx;
17974 u32 *isp, arg1, arg2;
17975
17976 - curctx = (union irq_ctx *) current_thread_info();
17977 irqctx = __get_cpu_var(hardirq_ctx);
17978
17979 /*
17980 @@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17981 * handler) we can't do that and just have to keep using the
17982 * current stack (which is the irq stack already after all)
17983 */
17984 - if (unlikely(curctx == irqctx))
17985 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17986 return 0;
17987
17988 /* build the stack frame on the IRQ stack */
17989 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17990 - irqctx->tinfo.task = curctx->tinfo.task;
17991 - irqctx->tinfo.previous_esp = current_stack_pointer;
17992 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17993 + irqctx->previous_esp = current_stack_pointer;
17994
17995 - /*
17996 - * Copy the softirq bits in preempt_count so that the
17997 - * softirq checks work in the hardirq context.
17998 - */
17999 - irqctx->tinfo.preempt_count =
18000 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
18001 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
18002 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18003 + __set_fs(MAKE_MM_SEG(0));
18004 +#endif
18005
18006 if (unlikely(overflow))
18007 call_on_stack(print_stack_overflow, isp);
18008 @@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18009 : "0" (irq), "1" (desc), "2" (isp),
18010 "D" (desc->handle_irq)
18011 : "memory", "cc", "ecx");
18012 +
18013 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18014 + __set_fs(current_thread_info()->addr_limit);
18015 +#endif
18016 +
18017 return 1;
18018 }
18019
18020 @@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18021 */
18022 void __cpuinit irq_ctx_init(int cpu)
18023 {
18024 - union irq_ctx *irqctx;
18025 -
18026 if (per_cpu(hardirq_ctx, cpu))
18027 return;
18028
18029 - irqctx = &per_cpu(hardirq_stack, cpu);
18030 - irqctx->tinfo.task = NULL;
18031 - irqctx->tinfo.exec_domain = NULL;
18032 - irqctx->tinfo.cpu = cpu;
18033 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
18034 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
18035 -
18036 - per_cpu(hardirq_ctx, cpu) = irqctx;
18037 -
18038 - irqctx = &per_cpu(softirq_stack, cpu);
18039 - irqctx->tinfo.task = NULL;
18040 - irqctx->tinfo.exec_domain = NULL;
18041 - irqctx->tinfo.cpu = cpu;
18042 - irqctx->tinfo.preempt_count = 0;
18043 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
18044 -
18045 - per_cpu(softirq_ctx, cpu) = irqctx;
18046 + per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
18047 + per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
18048
18049 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
18050 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
18051 @@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
18052 asmlinkage void do_softirq(void)
18053 {
18054 unsigned long flags;
18055 - struct thread_info *curctx;
18056 union irq_ctx *irqctx;
18057 u32 *isp;
18058
18059 @@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
18060 local_irq_save(flags);
18061
18062 if (local_softirq_pending()) {
18063 - curctx = current_thread_info();
18064 irqctx = __get_cpu_var(softirq_ctx);
18065 - irqctx->tinfo.task = curctx->task;
18066 - irqctx->tinfo.previous_esp = current_stack_pointer;
18067 + irqctx->previous_esp = current_stack_pointer;
18068
18069 /* build the stack frame on the softirq stack */
18070 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
18071 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
18072 +
18073 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18074 + __set_fs(MAKE_MM_SEG(0));
18075 +#endif
18076
18077 call_on_stack(__do_softirq, isp);
18078 +
18079 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18080 + __set_fs(current_thread_info()->addr_limit);
18081 +#endif
18082 +
18083 /*
18084 * Shouldnt happen, we returned above if in_interrupt():
18085 */
18086 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
18087 index 8d82a77..0baf312 100644
18088 --- a/arch/x86/kernel/kgdb.c
18089 +++ b/arch/x86/kernel/kgdb.c
18090 @@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
18091
18092 /* clear the trace bit */
18093 linux_regs->flags &= ~X86_EFLAGS_TF;
18094 - atomic_set(&kgdb_cpu_doing_single_step, -1);
18095 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
18096
18097 /* set the trace bit if we're stepping */
18098 if (remcomInBuffer[0] == 's') {
18099 linux_regs->flags |= X86_EFLAGS_TF;
18100 kgdb_single_step = 1;
18101 - atomic_set(&kgdb_cpu_doing_single_step,
18102 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
18103 raw_smp_processor_id());
18104 }
18105
18106 @@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
18107 break;
18108
18109 case DIE_DEBUG:
18110 - if (atomic_read(&kgdb_cpu_doing_single_step) ==
18111 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
18112 raw_smp_processor_id()) {
18113 if (user_mode(regs))
18114 return single_step_cont(regs, args);
18115 @@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
18116 return instruction_pointer(regs);
18117 }
18118
18119 -struct kgdb_arch arch_kgdb_ops = {
18120 +const struct kgdb_arch arch_kgdb_ops = {
18121 /* Breakpoint instruction: */
18122 .gdb_bpt_instr = { 0xcc },
18123 .flags = KGDB_HW_BREAKPOINT,
18124 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
18125 index 7a67820..70ea187 100644
18126 --- a/arch/x86/kernel/kprobes.c
18127 +++ b/arch/x86/kernel/kprobes.c
18128 @@ -168,9 +168,13 @@ static void __kprobes set_jmp_op(void *from, void *to)
18129 char op;
18130 s32 raddr;
18131 } __attribute__((packed)) * jop;
18132 - jop = (struct __arch_jmp_op *)from;
18133 +
18134 + jop = (struct __arch_jmp_op *)(ktla_ktva(from));
18135 +
18136 + pax_open_kernel();
18137 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
18138 jop->op = RELATIVEJUMP_INSTRUCTION;
18139 + pax_close_kernel();
18140 }
18141
18142 /*
18143 @@ -195,7 +199,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
18144 kprobe_opcode_t opcode;
18145 kprobe_opcode_t *orig_opcodes = opcodes;
18146
18147 - if (search_exception_tables((unsigned long)opcodes))
18148 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
18149 return 0; /* Page fault may occur on this address. */
18150
18151 retry:
18152 @@ -339,7 +343,9 @@ static void __kprobes fix_riprel(struct kprobe *p)
18153 disp = (u8 *) p->addr + *((s32 *) insn) -
18154 (u8 *) p->ainsn.insn;
18155 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
18156 + pax_open_kernel();
18157 *(s32 *)insn = (s32) disp;
18158 + pax_close_kernel();
18159 }
18160 }
18161 #endif
18162 @@ -347,16 +353,18 @@ static void __kprobes fix_riprel(struct kprobe *p)
18163
18164 static void __kprobes arch_copy_kprobe(struct kprobe *p)
18165 {
18166 - memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
18167 + pax_open_kernel();
18168 + memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
18169 + pax_close_kernel();
18170
18171 fix_riprel(p);
18172
18173 - if (can_boost(p->addr))
18174 + if (can_boost(ktla_ktva(p->addr)))
18175 p->ainsn.boostable = 0;
18176 else
18177 p->ainsn.boostable = -1;
18178
18179 - p->opcode = *p->addr;
18180 + p->opcode = *(ktla_ktva(p->addr));
18181 }
18182
18183 int __kprobes arch_prepare_kprobe(struct kprobe *p)
18184 @@ -434,7 +442,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
18185 if (p->opcode == BREAKPOINT_INSTRUCTION)
18186 regs->ip = (unsigned long)p->addr;
18187 else
18188 - regs->ip = (unsigned long)p->ainsn.insn;
18189 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
18190 }
18191
18192 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
18193 @@ -455,7 +463,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
18194 if (p->ainsn.boostable == 1 && !p->post_handler) {
18195 /* Boost up -- we can execute copied instructions directly */
18196 reset_current_kprobe();
18197 - regs->ip = (unsigned long)p->ainsn.insn;
18198 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
18199 preempt_enable_no_resched();
18200 return;
18201 }
18202 @@ -525,7 +533,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
18203 struct kprobe_ctlblk *kcb;
18204
18205 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
18206 - if (*addr != BREAKPOINT_INSTRUCTION) {
18207 + if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
18208 /*
18209 * The breakpoint instruction was removed right
18210 * after we hit it. Another cpu has removed
18211 @@ -637,6 +645,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
18212 /* Skip orig_ax, ip, cs */
18213 " addq $24, %rsp\n"
18214 " popfq\n"
18215 +#ifdef KERNEXEC_PLUGIN
18216 + " btsq $63,(%rsp)\n"
18217 +#endif
18218 #else
18219 " pushf\n"
18220 /*
18221 @@ -777,7 +788,7 @@ static void __kprobes resume_execution(struct kprobe *p,
18222 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
18223 {
18224 unsigned long *tos = stack_addr(regs);
18225 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
18226 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
18227 unsigned long orig_ip = (unsigned long)p->addr;
18228 kprobe_opcode_t *insn = p->ainsn.insn;
18229
18230 @@ -960,7 +971,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
18231 struct die_args *args = data;
18232 int ret = NOTIFY_DONE;
18233
18234 - if (args->regs && user_mode_vm(args->regs))
18235 + if (args->regs && user_mode(args->regs))
18236 return ret;
18237
18238 switch (val) {
18239 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
18240 index 63b0ec8..6d92227 100644
18241 --- a/arch/x86/kernel/kvm.c
18242 +++ b/arch/x86/kernel/kvm.c
18243 @@ -216,6 +216,7 @@ static void __init paravirt_ops_setup(void)
18244 pv_mmu_ops.set_pud = kvm_set_pud;
18245 #if PAGETABLE_LEVELS == 4
18246 pv_mmu_ops.set_pgd = kvm_set_pgd;
18247 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
18248 #endif
18249 #endif
18250 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
18251 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
18252 index ec6ef60..ab2c824 100644
18253 --- a/arch/x86/kernel/ldt.c
18254 +++ b/arch/x86/kernel/ldt.c
18255 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
18256 if (reload) {
18257 #ifdef CONFIG_SMP
18258 preempt_disable();
18259 - load_LDT(pc);
18260 + load_LDT_nolock(pc);
18261 if (!cpumask_equal(mm_cpumask(current->mm),
18262 cpumask_of(smp_processor_id())))
18263 smp_call_function(flush_ldt, current->mm, 1);
18264 preempt_enable();
18265 #else
18266 - load_LDT(pc);
18267 + load_LDT_nolock(pc);
18268 #endif
18269 }
18270 if (oldsize) {
18271 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
18272 return err;
18273
18274 for (i = 0; i < old->size; i++)
18275 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
18276 + write_ldt_entry(new->ldt, i, old->ldt + i);
18277 return 0;
18278 }
18279
18280 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
18281 retval = copy_ldt(&mm->context, &old_mm->context);
18282 mutex_unlock(&old_mm->context.lock);
18283 }
18284 +
18285 + if (tsk == current) {
18286 + mm->context.vdso = 0;
18287 +
18288 +#ifdef CONFIG_X86_32
18289 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18290 + mm->context.user_cs_base = 0UL;
18291 + mm->context.user_cs_limit = ~0UL;
18292 +
18293 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18294 + cpus_clear(mm->context.cpu_user_cs_mask);
18295 +#endif
18296 +
18297 +#endif
18298 +#endif
18299 +
18300 + }
18301 +
18302 return retval;
18303 }
18304
18305 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
18306 }
18307 }
18308
18309 +#ifdef CONFIG_PAX_SEGMEXEC
18310 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
18311 + error = -EINVAL;
18312 + goto out_unlock;
18313 + }
18314 +#endif
18315 +
18316 fill_ldt(&ldt, &ldt_info);
18317 if (oldmode)
18318 ldt.avl = 0;
18319 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18320 index c1c429d..f02eaf9 100644
18321 --- a/arch/x86/kernel/machine_kexec_32.c
18322 +++ b/arch/x86/kernel/machine_kexec_32.c
18323 @@ -26,7 +26,7 @@
18324 #include <asm/system.h>
18325 #include <asm/cacheflush.h>
18326
18327 -static void set_idt(void *newidt, __u16 limit)
18328 +static void set_idt(struct desc_struct *newidt, __u16 limit)
18329 {
18330 struct desc_ptr curidt;
18331
18332 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
18333 }
18334
18335
18336 -static void set_gdt(void *newgdt, __u16 limit)
18337 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18338 {
18339 struct desc_ptr curgdt;
18340
18341 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
18342 }
18343
18344 control_page = page_address(image->control_code_page);
18345 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18346 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18347
18348 relocate_kernel_ptr = control_page;
18349 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18350 diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
18351 index 1e47679..e73449d 100644
18352 --- a/arch/x86/kernel/microcode_amd.c
18353 +++ b/arch/x86/kernel/microcode_amd.c
18354 @@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int cpu)
18355 uci->mc = NULL;
18356 }
18357
18358 -static struct microcode_ops microcode_amd_ops = {
18359 +static const struct microcode_ops microcode_amd_ops = {
18360 .request_microcode_user = request_microcode_user,
18361 .request_microcode_fw = request_microcode_fw,
18362 .collect_cpu_info = collect_cpu_info_amd,
18363 @@ -372,7 +372,7 @@ static struct microcode_ops microcode_amd_ops = {
18364 .microcode_fini_cpu = microcode_fini_cpu_amd,
18365 };
18366
18367 -struct microcode_ops * __init init_amd_microcode(void)
18368 +const struct microcode_ops * __init init_amd_microcode(void)
18369 {
18370 return &microcode_amd_ops;
18371 }
18372 diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
18373 index 378e9a8..b5a6ea9 100644
18374 --- a/arch/x86/kernel/microcode_core.c
18375 +++ b/arch/x86/kernel/microcode_core.c
18376 @@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
18377
18378 #define MICROCODE_VERSION "2.00"
18379
18380 -static struct microcode_ops *microcode_ops;
18381 +static const struct microcode_ops *microcode_ops;
18382
18383 /*
18384 * Synchronization.
18385 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18386 index 0d334dd..14cedaf 100644
18387 --- a/arch/x86/kernel/microcode_intel.c
18388 +++ b/arch/x86/kernel/microcode_intel.c
18389 @@ -443,13 +443,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18390
18391 static int get_ucode_user(void *to, const void *from, size_t n)
18392 {
18393 - return copy_from_user(to, from, n);
18394 + return copy_from_user(to, (const void __force_user *)from, n);
18395 }
18396
18397 static enum ucode_state
18398 request_microcode_user(int cpu, const void __user *buf, size_t size)
18399 {
18400 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18401 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18402 }
18403
18404 static void microcode_fini_cpu(int cpu)
18405 @@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
18406 uci->mc = NULL;
18407 }
18408
18409 -static struct microcode_ops microcode_intel_ops = {
18410 +static const struct microcode_ops microcode_intel_ops = {
18411 .request_microcode_user = request_microcode_user,
18412 .request_microcode_fw = request_microcode_fw,
18413 .collect_cpu_info = collect_cpu_info,
18414 @@ -468,7 +468,7 @@ static struct microcode_ops microcode_intel_ops = {
18415 .microcode_fini_cpu = microcode_fini_cpu,
18416 };
18417
18418 -struct microcode_ops * __init init_intel_microcode(void)
18419 +const struct microcode_ops * __init init_intel_microcode(void)
18420 {
18421 return &microcode_intel_ops;
18422 }
18423 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18424 index 89f386f..9028f51 100644
18425 --- a/arch/x86/kernel/module.c
18426 +++ b/arch/x86/kernel/module.c
18427 @@ -34,7 +34,7 @@
18428 #define DEBUGP(fmt...)
18429 #endif
18430
18431 -void *module_alloc(unsigned long size)
18432 +static void *__module_alloc(unsigned long size, pgprot_t prot)
18433 {
18434 struct vm_struct *area;
18435
18436 @@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
18437 if (!area)
18438 return NULL;
18439
18440 - return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
18441 - PAGE_KERNEL_EXEC);
18442 + return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
18443 +}
18444 +
18445 +void *module_alloc(unsigned long size)
18446 +{
18447 +
18448 +#ifdef CONFIG_PAX_KERNEXEC
18449 + return __module_alloc(size, PAGE_KERNEL);
18450 +#else
18451 + return __module_alloc(size, PAGE_KERNEL_EXEC);
18452 +#endif
18453 +
18454 }
18455
18456 /* Free memory returned from module_alloc */
18457 @@ -58,6 +68,40 @@ void module_free(struct module *mod, void *module_region)
18458 vfree(module_region);
18459 }
18460
18461 +#ifdef CONFIG_PAX_KERNEXEC
18462 +#ifdef CONFIG_X86_32
18463 +void *module_alloc_exec(unsigned long size)
18464 +{
18465 + struct vm_struct *area;
18466 +
18467 + if (size == 0)
18468 + return NULL;
18469 +
18470 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18471 + return area ? area->addr : NULL;
18472 +}
18473 +EXPORT_SYMBOL(module_alloc_exec);
18474 +
18475 +void module_free_exec(struct module *mod, void *module_region)
18476 +{
18477 + vunmap(module_region);
18478 +}
18479 +EXPORT_SYMBOL(module_free_exec);
18480 +#else
18481 +void module_free_exec(struct module *mod, void *module_region)
18482 +{
18483 + module_free(mod, module_region);
18484 +}
18485 +EXPORT_SYMBOL(module_free_exec);
18486 +
18487 +void *module_alloc_exec(unsigned long size)
18488 +{
18489 + return __module_alloc(size, PAGE_KERNEL_RX);
18490 +}
18491 +EXPORT_SYMBOL(module_alloc_exec);
18492 +#endif
18493 +#endif
18494 +
18495 /* We don't need anything special. */
18496 int module_frob_arch_sections(Elf_Ehdr *hdr,
18497 Elf_Shdr *sechdrs,
18498 @@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18499 unsigned int i;
18500 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18501 Elf32_Sym *sym;
18502 - uint32_t *location;
18503 + uint32_t *plocation, location;
18504
18505 DEBUGP("Applying relocate section %u to %u\n", relsec,
18506 sechdrs[relsec].sh_info);
18507 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18508 /* This is where to make the change */
18509 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18510 - + rel[i].r_offset;
18511 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18512 + location = (uint32_t)plocation;
18513 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18514 + plocation = ktla_ktva((void *)plocation);
18515 /* This is the symbol it is referring to. Note that all
18516 undefined symbols have been resolved. */
18517 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18518 @@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18519 switch (ELF32_R_TYPE(rel[i].r_info)) {
18520 case R_386_32:
18521 /* We add the value into the location given */
18522 - *location += sym->st_value;
18523 + pax_open_kernel();
18524 + *plocation += sym->st_value;
18525 + pax_close_kernel();
18526 break;
18527 case R_386_PC32:
18528 /* Add the value, subtract its postition */
18529 - *location += sym->st_value - (uint32_t)location;
18530 + pax_open_kernel();
18531 + *plocation += sym->st_value - location;
18532 + pax_close_kernel();
18533 break;
18534 default:
18535 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18536 @@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18537 case R_X86_64_NONE:
18538 break;
18539 case R_X86_64_64:
18540 + pax_open_kernel();
18541 *(u64 *)loc = val;
18542 + pax_close_kernel();
18543 break;
18544 case R_X86_64_32:
18545 + pax_open_kernel();
18546 *(u32 *)loc = val;
18547 + pax_close_kernel();
18548 if (val != *(u32 *)loc)
18549 goto overflow;
18550 break;
18551 case R_X86_64_32S:
18552 + pax_open_kernel();
18553 *(s32 *)loc = val;
18554 + pax_close_kernel();
18555 if ((s64)val != *(s32 *)loc)
18556 goto overflow;
18557 break;
18558 case R_X86_64_PC32:
18559 val -= (u64)loc;
18560 + pax_open_kernel();
18561 *(u32 *)loc = val;
18562 + pax_close_kernel();
18563 +
18564 #if 0
18565 if ((s64)val != *(s32 *)loc)
18566 goto overflow;
18567 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18568 index 3a7c5a4..9191528 100644
18569 --- a/arch/x86/kernel/paravirt-spinlocks.c
18570 +++ b/arch/x86/kernel/paravirt-spinlocks.c
18571 @@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
18572 __raw_spin_lock(lock);
18573 }
18574
18575 -struct pv_lock_ops pv_lock_ops = {
18576 +struct pv_lock_ops pv_lock_ops __read_only = {
18577 #ifdef CONFIG_SMP
18578 .spin_is_locked = __ticket_spin_is_locked,
18579 .spin_is_contended = __ticket_spin_is_contended,
18580 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18581 index 1b1739d..dea6077 100644
18582 --- a/arch/x86/kernel/paravirt.c
18583 +++ b/arch/x86/kernel/paravirt.c
18584 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
18585 {
18586 return x;
18587 }
18588 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18589 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18590 +#endif
18591
18592 void __init default_banner(void)
18593 {
18594 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
18595 * corresponding structure. */
18596 static void *get_call_destination(u8 type)
18597 {
18598 - struct paravirt_patch_template tmpl = {
18599 + const struct paravirt_patch_template tmpl = {
18600 .pv_init_ops = pv_init_ops,
18601 .pv_time_ops = pv_time_ops,
18602 .pv_cpu_ops = pv_cpu_ops,
18603 @@ -133,6 +136,8 @@ static void *get_call_destination(u8 type)
18604 .pv_lock_ops = pv_lock_ops,
18605 #endif
18606 };
18607 +
18608 + pax_track_stack();
18609 return *((void **)&tmpl + type);
18610 }
18611
18612 @@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18613 if (opfunc == NULL)
18614 /* If there's no function, patch it with a ud2a (BUG) */
18615 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18616 - else if (opfunc == _paravirt_nop)
18617 + else if (opfunc == (void *)_paravirt_nop)
18618 /* If the operation is a nop, then nop the callsite */
18619 ret = paravirt_patch_nop();
18620
18621 /* identity functions just return their single argument */
18622 - else if (opfunc == _paravirt_ident_32)
18623 + else if (opfunc == (void *)_paravirt_ident_32)
18624 ret = paravirt_patch_ident_32(insnbuf, len);
18625 - else if (opfunc == _paravirt_ident_64)
18626 + else if (opfunc == (void *)_paravirt_ident_64)
18627 ret = paravirt_patch_ident_64(insnbuf, len);
18628 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18629 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18630 + ret = paravirt_patch_ident_64(insnbuf, len);
18631 +#endif
18632
18633 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18634 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18635 @@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18636 if (insn_len > len || start == NULL)
18637 insn_len = len;
18638 else
18639 - memcpy(insnbuf, start, insn_len);
18640 + memcpy(insnbuf, ktla_ktva(start), insn_len);
18641
18642 return insn_len;
18643 }
18644 @@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
18645 preempt_enable();
18646 }
18647
18648 -struct pv_info pv_info = {
18649 +struct pv_info pv_info __read_only = {
18650 .name = "bare hardware",
18651 .paravirt_enabled = 0,
18652 .kernel_rpl = 0,
18653 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
18654 };
18655
18656 -struct pv_init_ops pv_init_ops = {
18657 +struct pv_init_ops pv_init_ops __read_only = {
18658 .patch = native_patch,
18659 };
18660
18661 -struct pv_time_ops pv_time_ops = {
18662 +struct pv_time_ops pv_time_ops __read_only = {
18663 .sched_clock = native_sched_clock,
18664 };
18665
18666 -struct pv_irq_ops pv_irq_ops = {
18667 +struct pv_irq_ops pv_irq_ops __read_only = {
18668 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18669 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18670 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18671 @@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
18672 #endif
18673 };
18674
18675 -struct pv_cpu_ops pv_cpu_ops = {
18676 +struct pv_cpu_ops pv_cpu_ops __read_only = {
18677 .cpuid = native_cpuid,
18678 .get_debugreg = native_get_debugreg,
18679 .set_debugreg = native_set_debugreg,
18680 @@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18681 .end_context_switch = paravirt_nop,
18682 };
18683
18684 -struct pv_apic_ops pv_apic_ops = {
18685 +struct pv_apic_ops pv_apic_ops __read_only = {
18686 #ifdef CONFIG_X86_LOCAL_APIC
18687 .startup_ipi_hook = paravirt_nop,
18688 #endif
18689 };
18690
18691 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18692 +#ifdef CONFIG_X86_32
18693 +#ifdef CONFIG_X86_PAE
18694 +/* 64-bit pagetable entries */
18695 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18696 +#else
18697 /* 32-bit pagetable entries */
18698 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18699 +#endif
18700 #else
18701 /* 64-bit pagetable entries */
18702 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18703 #endif
18704
18705 -struct pv_mmu_ops pv_mmu_ops = {
18706 +struct pv_mmu_ops pv_mmu_ops __read_only = {
18707
18708 .read_cr2 = native_read_cr2,
18709 .write_cr2 = native_write_cr2,
18710 @@ -448,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18711 .make_pud = PTE_IDENT,
18712
18713 .set_pgd = native_set_pgd,
18714 + .set_pgd_batched = native_set_pgd_batched,
18715 #endif
18716 #endif /* PAGETABLE_LEVELS >= 3 */
18717
18718 @@ -467,6 +482,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18719 },
18720
18721 .set_fixmap = native_set_fixmap,
18722 +
18723 +#ifdef CONFIG_PAX_KERNEXEC
18724 + .pax_open_kernel = native_pax_open_kernel,
18725 + .pax_close_kernel = native_pax_close_kernel,
18726 +#endif
18727 +
18728 };
18729
18730 EXPORT_SYMBOL_GPL(pv_time_ops);
18731 diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
18732 index 1a2d4b1..6a0dd55 100644
18733 --- a/arch/x86/kernel/pci-calgary_64.c
18734 +++ b/arch/x86/kernel/pci-calgary_64.c
18735 @@ -477,7 +477,7 @@ static void calgary_free_coherent(struct device *dev, size_t size,
18736 free_pages((unsigned long)vaddr, get_order(size));
18737 }
18738
18739 -static struct dma_map_ops calgary_dma_ops = {
18740 +static const struct dma_map_ops calgary_dma_ops = {
18741 .alloc_coherent = calgary_alloc_coherent,
18742 .free_coherent = calgary_free_coherent,
18743 .map_sg = calgary_map_sg,
18744 diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
18745 index 6ac3931..42b4414 100644
18746 --- a/arch/x86/kernel/pci-dma.c
18747 +++ b/arch/x86/kernel/pci-dma.c
18748 @@ -14,7 +14,7 @@
18749
18750 static int forbid_dac __read_mostly;
18751
18752 -struct dma_map_ops *dma_ops;
18753 +const struct dma_map_ops *dma_ops;
18754 EXPORT_SYMBOL(dma_ops);
18755
18756 static int iommu_sac_force __read_mostly;
18757 @@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
18758
18759 int dma_supported(struct device *dev, u64 mask)
18760 {
18761 - struct dma_map_ops *ops = get_dma_ops(dev);
18762 + const struct dma_map_ops *ops = get_dma_ops(dev);
18763
18764 #ifdef CONFIG_PCI
18765 if (mask > 0xffffffff && forbid_dac > 0) {
18766 diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
18767 index 1c76691..e3632db 100644
18768 --- a/arch/x86/kernel/pci-gart_64.c
18769 +++ b/arch/x86/kernel/pci-gart_64.c
18770 @@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
18771 return -1;
18772 }
18773
18774 -static struct dma_map_ops gart_dma_ops = {
18775 +static const struct dma_map_ops gart_dma_ops = {
18776 .map_sg = gart_map_sg,
18777 .unmap_sg = gart_unmap_sg,
18778 .map_page = gart_map_page,
18779 diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
18780 index a3933d4..c898869 100644
18781 --- a/arch/x86/kernel/pci-nommu.c
18782 +++ b/arch/x86/kernel/pci-nommu.c
18783 @@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(struct device *dev,
18784 flush_write_buffers();
18785 }
18786
18787 -struct dma_map_ops nommu_dma_ops = {
18788 +const struct dma_map_ops nommu_dma_ops = {
18789 .alloc_coherent = dma_generic_alloc_coherent,
18790 .free_coherent = nommu_free_coherent,
18791 .map_sg = nommu_map_sg,
18792 diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
18793 index aaa6b78..4de1881 100644
18794 --- a/arch/x86/kernel/pci-swiotlb.c
18795 +++ b/arch/x86/kernel/pci-swiotlb.c
18796 @@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
18797 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
18798 }
18799
18800 -static struct dma_map_ops swiotlb_dma_ops = {
18801 +static const struct dma_map_ops swiotlb_dma_ops = {
18802 .mapping_error = swiotlb_dma_mapping_error,
18803 .alloc_coherent = x86_swiotlb_alloc_coherent,
18804 .free_coherent = swiotlb_free_coherent,
18805 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18806 index fc6c84d..0312ca2 100644
18807 --- a/arch/x86/kernel/process.c
18808 +++ b/arch/x86/kernel/process.c
18809 @@ -51,16 +51,33 @@ void free_thread_xstate(struct task_struct *tsk)
18810
18811 void free_thread_info(struct thread_info *ti)
18812 {
18813 - free_thread_xstate(ti->task);
18814 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
18815 }
18816
18817 +static struct kmem_cache *task_struct_cachep;
18818 +
18819 void arch_task_cache_init(void)
18820 {
18821 - task_xstate_cachep =
18822 - kmem_cache_create("task_xstate", xstate_size,
18823 + /* create a slab on which task_structs can be allocated */
18824 + task_struct_cachep =
18825 + kmem_cache_create("task_struct", sizeof(struct task_struct),
18826 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18827 +
18828 + task_xstate_cachep =
18829 + kmem_cache_create("task_xstate", xstate_size,
18830 __alignof__(union thread_xstate),
18831 - SLAB_PANIC | SLAB_NOTRACK, NULL);
18832 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18833 +}
18834 +
18835 +struct task_struct *alloc_task_struct(void)
18836 +{
18837 + return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
18838 +}
18839 +
18840 +void free_task_struct(struct task_struct *task)
18841 +{
18842 + free_thread_xstate(task);
18843 + kmem_cache_free(task_struct_cachep, task);
18844 }
18845
18846 /*
18847 @@ -73,7 +90,7 @@ void exit_thread(void)
18848 unsigned long *bp = t->io_bitmap_ptr;
18849
18850 if (bp) {
18851 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18852 + struct tss_struct *tss = init_tss + get_cpu();
18853
18854 t->io_bitmap_ptr = NULL;
18855 clear_thread_flag(TIF_IO_BITMAP);
18856 @@ -93,6 +110,9 @@ void flush_thread(void)
18857
18858 clear_tsk_thread_flag(tsk, TIF_DEBUG);
18859
18860 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18861 + loadsegment(gs, 0);
18862 +#endif
18863 tsk->thread.debugreg0 = 0;
18864 tsk->thread.debugreg1 = 0;
18865 tsk->thread.debugreg2 = 0;
18866 @@ -307,7 +327,7 @@ void default_idle(void)
18867 EXPORT_SYMBOL(default_idle);
18868 #endif
18869
18870 -void stop_this_cpu(void *dummy)
18871 +__noreturn void stop_this_cpu(void *dummy)
18872 {
18873 local_irq_disable();
18874 /*
18875 @@ -568,16 +588,38 @@ static int __init idle_setup(char *str)
18876 }
18877 early_param("idle", idle_setup);
18878
18879 -unsigned long arch_align_stack(unsigned long sp)
18880 +#ifdef CONFIG_PAX_RANDKSTACK
18881 +void pax_randomize_kstack(struct pt_regs *regs)
18882 {
18883 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18884 - sp -= get_random_int() % 8192;
18885 - return sp & ~0xf;
18886 -}
18887 + struct thread_struct *thread = &current->thread;
18888 + unsigned long time;
18889
18890 -unsigned long arch_randomize_brk(struct mm_struct *mm)
18891 -{
18892 - unsigned long range_end = mm->brk + 0x02000000;
18893 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18894 + if (!randomize_va_space)
18895 + return;
18896 +
18897 + if (v8086_mode(regs))
18898 + return;
18899 +
18900 + rdtscl(time);
18901 +
18902 + /* P4 seems to return a 0 LSB, ignore it */
18903 +#ifdef CONFIG_MPENTIUM4
18904 + time &= 0x3EUL;
18905 + time <<= 2;
18906 +#elif defined(CONFIG_X86_64)
18907 + time &= 0xFUL;
18908 + time <<= 4;
18909 +#else
18910 + time &= 0x1FUL;
18911 + time <<= 3;
18912 +#endif
18913 +
18914 + thread->sp0 ^= time;
18915 + load_sp0(init_tss + smp_processor_id(), thread);
18916 +
18917 +#ifdef CONFIG_X86_64
18918 + percpu_write(kernel_stack, thread->sp0);
18919 +#endif
18920 }
18921 +#endif
18922
18923 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18924 index c40c432..6e1df72 100644
18925 --- a/arch/x86/kernel/process_32.c
18926 +++ b/arch/x86/kernel/process_32.c
18927 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18928 unsigned long thread_saved_pc(struct task_struct *tsk)
18929 {
18930 return ((unsigned long *)tsk->thread.sp)[3];
18931 +//XXX return tsk->thread.eip;
18932 }
18933
18934 #ifndef CONFIG_SMP
18935 @@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, int all)
18936 unsigned short ss, gs;
18937 const char *board;
18938
18939 - if (user_mode_vm(regs)) {
18940 + if (user_mode(regs)) {
18941 sp = regs->sp;
18942 ss = regs->ss & 0xffff;
18943 - gs = get_user_gs(regs);
18944 } else {
18945 sp = (unsigned long) (&regs->sp);
18946 savesegment(ss, ss);
18947 - savesegment(gs, gs);
18948 }
18949 + gs = get_user_gs(regs);
18950
18951 printk("\n");
18952
18953 @@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18954 regs.bx = (unsigned long) fn;
18955 regs.dx = (unsigned long) arg;
18956
18957 - regs.ds = __USER_DS;
18958 - regs.es = __USER_DS;
18959 + regs.ds = __KERNEL_DS;
18960 + regs.es = __KERNEL_DS;
18961 regs.fs = __KERNEL_PERCPU;
18962 - regs.gs = __KERNEL_STACK_CANARY;
18963 + savesegment(gs, regs.gs);
18964 regs.orig_ax = -1;
18965 regs.ip = (unsigned long) kernel_thread_helper;
18966 regs.cs = __KERNEL_CS | get_kernel_rpl();
18967 @@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18968 struct task_struct *tsk;
18969 int err;
18970
18971 - childregs = task_pt_regs(p);
18972 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18973 *childregs = *regs;
18974 childregs->ax = 0;
18975 childregs->sp = sp;
18976
18977 p->thread.sp = (unsigned long) childregs;
18978 p->thread.sp0 = (unsigned long) (childregs+1);
18979 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18980
18981 p->thread.ip = (unsigned long) ret_from_fork;
18982
18983 @@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18984 struct thread_struct *prev = &prev_p->thread,
18985 *next = &next_p->thread;
18986 int cpu = smp_processor_id();
18987 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18988 + struct tss_struct *tss = init_tss + cpu;
18989 bool preload_fpu;
18990
18991 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18992 @@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18993 */
18994 lazy_save_gs(prev->gs);
18995
18996 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18997 + __set_fs(task_thread_info(next_p)->addr_limit);
18998 +#endif
18999 +
19000 /*
19001 * Load the per-thread Thread-Local Storage descriptor.
19002 */
19003 @@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19004 */
19005 arch_end_context_switch(next_p);
19006
19007 + percpu_write(current_task, next_p);
19008 + percpu_write(current_tinfo, &next_p->tinfo);
19009 +
19010 if (preload_fpu)
19011 __math_state_restore();
19012
19013 @@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19014 if (prev->gs | next->gs)
19015 lazy_load_gs(next->gs);
19016
19017 - percpu_write(current_task, next_p);
19018 -
19019 return prev_p;
19020 }
19021
19022 @@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_struct *p)
19023 } while (count++ < 16);
19024 return 0;
19025 }
19026 -
19027 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
19028 index 39493bc..196816d 100644
19029 --- a/arch/x86/kernel/process_64.c
19030 +++ b/arch/x86/kernel/process_64.c
19031 @@ -91,7 +91,7 @@ static void __exit_idle(void)
19032 void exit_idle(void)
19033 {
19034 /* idle loop has pid 0 */
19035 - if (current->pid)
19036 + if (task_pid_nr(current))
19037 return;
19038 __exit_idle();
19039 }
19040 @@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, int all)
19041 if (!board)
19042 board = "";
19043 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
19044 - current->pid, current->comm, print_tainted(),
19045 + task_pid_nr(current), current->comm, print_tainted(),
19046 init_utsname()->release,
19047 (int)strcspn(init_utsname()->version, " "),
19048 init_utsname()->version, board);
19049 @@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
19050 struct pt_regs *childregs;
19051 struct task_struct *me = current;
19052
19053 - childregs = ((struct pt_regs *)
19054 - (THREAD_SIZE + task_stack_page(p))) - 1;
19055 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
19056 *childregs = *regs;
19057
19058 childregs->ax = 0;
19059 @@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
19060 p->thread.sp = (unsigned long) childregs;
19061 p->thread.sp0 = (unsigned long) (childregs+1);
19062 p->thread.usersp = me->thread.usersp;
19063 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
19064
19065 set_tsk_thread_flag(p, TIF_FORK);
19066
19067 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19068 struct thread_struct *prev = &prev_p->thread;
19069 struct thread_struct *next = &next_p->thread;
19070 int cpu = smp_processor_id();
19071 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
19072 + struct tss_struct *tss = init_tss + cpu;
19073 unsigned fsindex, gsindex;
19074 bool preload_fpu;
19075
19076 @@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19077 prev->usersp = percpu_read(old_rsp);
19078 percpu_write(old_rsp, next->usersp);
19079 percpu_write(current_task, next_p);
19080 + percpu_write(current_tinfo, &next_p->tinfo);
19081
19082 - percpu_write(kernel_stack,
19083 - (unsigned long)task_stack_page(next_p) +
19084 - THREAD_SIZE - KERNEL_STACK_OFFSET);
19085 + percpu_write(kernel_stack, next->sp0);
19086
19087 /*
19088 * Now maybe reload the debug registers and handle I/O bitmaps
19089 @@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_struct *p)
19090 if (!p || p == current || p->state == TASK_RUNNING)
19091 return 0;
19092 stack = (unsigned long)task_stack_page(p);
19093 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
19094 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
19095 return 0;
19096 fp = *(u64 *)(p->thread.sp);
19097 do {
19098 - if (fp < (unsigned long)stack ||
19099 - fp >= (unsigned long)stack+THREAD_SIZE)
19100 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
19101 return 0;
19102 ip = *(u64 *)(fp+8);
19103 if (!in_sched_functions(ip))
19104 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
19105 index c06acdd..3f5fff5 100644
19106 --- a/arch/x86/kernel/ptrace.c
19107 +++ b/arch/x86/kernel/ptrace.c
19108 @@ -925,7 +925,7 @@ static const struct user_regset_view user_x86_32_view; /* Initialized below. */
19109 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19110 {
19111 int ret;
19112 - unsigned long __user *datap = (unsigned long __user *)data;
19113 + unsigned long __user *datap = (__force unsigned long __user *)data;
19114
19115 switch (request) {
19116 /* read the word at location addr in the USER area. */
19117 @@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19118 if (addr < 0)
19119 return -EIO;
19120 ret = do_get_thread_area(child, addr,
19121 - (struct user_desc __user *) data);
19122 + (__force struct user_desc __user *) data);
19123 break;
19124
19125 case PTRACE_SET_THREAD_AREA:
19126 if (addr < 0)
19127 return -EIO;
19128 ret = do_set_thread_area(child, addr,
19129 - (struct user_desc __user *) data, 0);
19130 + (__force struct user_desc __user *) data, 0);
19131 break;
19132 #endif
19133
19134 @@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19135 #ifdef CONFIG_X86_PTRACE_BTS
19136 case PTRACE_BTS_CONFIG:
19137 ret = ptrace_bts_config
19138 - (child, data, (struct ptrace_bts_config __user *)addr);
19139 + (child, data, (__force struct ptrace_bts_config __user *)addr);
19140 break;
19141
19142 case PTRACE_BTS_STATUS:
19143 ret = ptrace_bts_status
19144 - (child, data, (struct ptrace_bts_config __user *)addr);
19145 + (child, data, (__force struct ptrace_bts_config __user *)addr);
19146 break;
19147
19148 case PTRACE_BTS_SIZE:
19149 @@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19150
19151 case PTRACE_BTS_GET:
19152 ret = ptrace_bts_read_record
19153 - (child, data, (struct bts_struct __user *) addr);
19154 + (child, data, (__force struct bts_struct __user *) addr);
19155 break;
19156
19157 case PTRACE_BTS_CLEAR:
19158 @@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19159
19160 case PTRACE_BTS_DRAIN:
19161 ret = ptrace_bts_drain
19162 - (child, data, (struct bts_struct __user *) addr);
19163 + (child, data, (__force struct bts_struct __user *) addr);
19164 break;
19165 #endif /* CONFIG_X86_PTRACE_BTS */
19166
19167 @@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
19168 info.si_code = si_code;
19169
19170 /* User-mode ip? */
19171 - info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
19172 + info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
19173
19174 /* Send us the fake SIGTRAP */
19175 force_sig_info(SIGTRAP, &info, tsk);
19176 @@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
19177 * We must return the syscall number to actually look up in the table.
19178 * This can be -1L to skip running any syscall at all.
19179 */
19180 -asmregparm long syscall_trace_enter(struct pt_regs *regs)
19181 +long syscall_trace_enter(struct pt_regs *regs)
19182 {
19183 long ret = 0;
19184
19185 @@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
19186 return ret ?: regs->orig_ax;
19187 }
19188
19189 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
19190 +void syscall_trace_leave(struct pt_regs *regs)
19191 {
19192 if (unlikely(current->audit_context))
19193 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
19194 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
19195 index cf98100..e76e03d 100644
19196 --- a/arch/x86/kernel/reboot.c
19197 +++ b/arch/x86/kernel/reboot.c
19198 @@ -33,7 +33,7 @@ void (*pm_power_off)(void);
19199 EXPORT_SYMBOL(pm_power_off);
19200
19201 static const struct desc_ptr no_idt = {};
19202 -static int reboot_mode;
19203 +static unsigned short reboot_mode;
19204 enum reboot_type reboot_type = BOOT_KBD;
19205 int reboot_force;
19206
19207 @@ -292,12 +292,12 @@ core_initcall(reboot_init);
19208 controller to pulse the CPU reset line, which is more thorough, but
19209 doesn't work with at least one type of 486 motherboard. It is easy
19210 to stop this code working; hence the copious comments. */
19211 -static const unsigned long long
19212 -real_mode_gdt_entries [3] =
19213 +static struct desc_struct
19214 +real_mode_gdt_entries [3] __read_only =
19215 {
19216 - 0x0000000000000000ULL, /* Null descriptor */
19217 - 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
19218 - 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
19219 + GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
19220 + GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
19221 + GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
19222 };
19223
19224 static const struct desc_ptr
19225 @@ -346,7 +346,7 @@ static const unsigned char jump_to_bios [] =
19226 * specified by the code and length parameters.
19227 * We assume that length will aways be less that 100!
19228 */
19229 -void machine_real_restart(const unsigned char *code, int length)
19230 +__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
19231 {
19232 local_irq_disable();
19233
19234 @@ -366,8 +366,8 @@ void machine_real_restart(const unsigned char *code, int length)
19235 /* Remap the kernel at virtual address zero, as well as offset zero
19236 from the kernel segment. This assumes the kernel segment starts at
19237 virtual address PAGE_OFFSET. */
19238 - memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19239 - sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
19240 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19241 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
19242
19243 /*
19244 * Use `swapper_pg_dir' as our page directory.
19245 @@ -379,16 +379,15 @@ void machine_real_restart(const unsigned char *code, int length)
19246 boot)". This seems like a fairly standard thing that gets set by
19247 REBOOT.COM programs, and the previous reset routine did this
19248 too. */
19249 - *((unsigned short *)0x472) = reboot_mode;
19250 + *(unsigned short *)(__va(0x472)) = reboot_mode;
19251
19252 /* For the switch to real mode, copy some code to low memory. It has
19253 to be in the first 64k because it is running in 16-bit mode, and it
19254 has to have the same physical and virtual address, because it turns
19255 off paging. Copy it near the end of the first page, out of the way
19256 of BIOS variables. */
19257 - memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
19258 - real_mode_switch, sizeof (real_mode_switch));
19259 - memcpy((void *)(0x1000 - 100), code, length);
19260 + memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
19261 + memcpy(__va(0x1000 - 100), code, length);
19262
19263 /* Set up the IDT for real mode. */
19264 load_idt(&real_mode_idt);
19265 @@ -416,6 +415,7 @@ void machine_real_restart(const unsigned char *code, int length)
19266 __asm__ __volatile__ ("ljmp $0x0008,%0"
19267 :
19268 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
19269 + do { } while (1);
19270 }
19271 #ifdef CONFIG_APM_MODULE
19272 EXPORT_SYMBOL(machine_real_restart);
19273 @@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
19274 {
19275 }
19276
19277 -static void native_machine_emergency_restart(void)
19278 +__noreturn static void native_machine_emergency_restart(void)
19279 {
19280 int i;
19281
19282 @@ -659,13 +659,13 @@ void native_machine_shutdown(void)
19283 #endif
19284 }
19285
19286 -static void __machine_emergency_restart(int emergency)
19287 +static __noreturn void __machine_emergency_restart(int emergency)
19288 {
19289 reboot_emergency = emergency;
19290 machine_ops.emergency_restart();
19291 }
19292
19293 -static void native_machine_restart(char *__unused)
19294 +static __noreturn void native_machine_restart(char *__unused)
19295 {
19296 printk("machine restart\n");
19297
19298 @@ -674,7 +674,7 @@ static void native_machine_restart(char *__unused)
19299 __machine_emergency_restart(0);
19300 }
19301
19302 -static void native_machine_halt(void)
19303 +static __noreturn void native_machine_halt(void)
19304 {
19305 /* stop other cpus and apics */
19306 machine_shutdown();
19307 @@ -685,7 +685,7 @@ static void native_machine_halt(void)
19308 stop_this_cpu(NULL);
19309 }
19310
19311 -static void native_machine_power_off(void)
19312 +__noreturn static void native_machine_power_off(void)
19313 {
19314 if (pm_power_off) {
19315 if (!reboot_force)
19316 @@ -694,6 +694,7 @@ static void native_machine_power_off(void)
19317 }
19318 /* a fallback in case there is no PM info available */
19319 tboot_shutdown(TB_SHUTDOWN_HALT);
19320 + do { } while (1);
19321 }
19322
19323 struct machine_ops machine_ops = {
19324 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
19325 index 7a6f3b3..976a959 100644
19326 --- a/arch/x86/kernel/relocate_kernel_64.S
19327 +++ b/arch/x86/kernel/relocate_kernel_64.S
19328 @@ -11,6 +11,7 @@
19329 #include <asm/kexec.h>
19330 #include <asm/processor-flags.h>
19331 #include <asm/pgtable_types.h>
19332 +#include <asm/alternative-asm.h>
19333
19334 /*
19335 * Must be relocatable PIC code callable as a C function
19336 @@ -167,6 +168,7 @@ identity_mapped:
19337 xorq %r14, %r14
19338 xorq %r15, %r15
19339
19340 + pax_force_retaddr 0, 1
19341 ret
19342
19343 1:
19344 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
19345 index 5449a26..0b6c759 100644
19346 --- a/arch/x86/kernel/setup.c
19347 +++ b/arch/x86/kernel/setup.c
19348 @@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
19349
19350 if (!boot_params.hdr.root_flags)
19351 root_mountflags &= ~MS_RDONLY;
19352 - init_mm.start_code = (unsigned long) _text;
19353 - init_mm.end_code = (unsigned long) _etext;
19354 + init_mm.start_code = ktla_ktva((unsigned long) _text);
19355 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
19356 init_mm.end_data = (unsigned long) _edata;
19357 init_mm.brk = _brk_end;
19358
19359 - code_resource.start = virt_to_phys(_text);
19360 - code_resource.end = virt_to_phys(_etext)-1;
19361 - data_resource.start = virt_to_phys(_etext);
19362 + code_resource.start = virt_to_phys(ktla_ktva(_text));
19363 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19364 + data_resource.start = virt_to_phys(_sdata);
19365 data_resource.end = virt_to_phys(_edata)-1;
19366 bss_resource.start = virt_to_phys(&__bss_start);
19367 bss_resource.end = virt_to_phys(&__bss_stop)-1;
19368 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
19369 index d559af9..524c6ad 100644
19370 --- a/arch/x86/kernel/setup_percpu.c
19371 +++ b/arch/x86/kernel/setup_percpu.c
19372 @@ -25,19 +25,17 @@
19373 # define DBG(x...)
19374 #endif
19375
19376 -DEFINE_PER_CPU(int, cpu_number);
19377 +#ifdef CONFIG_SMP
19378 +DEFINE_PER_CPU(unsigned int, cpu_number);
19379 EXPORT_PER_CPU_SYMBOL(cpu_number);
19380 +#endif
19381
19382 -#ifdef CONFIG_X86_64
19383 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19384 -#else
19385 -#define BOOT_PERCPU_OFFSET 0
19386 -#endif
19387
19388 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19389 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19390
19391 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19392 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19393 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19394 };
19395 EXPORT_SYMBOL(__per_cpu_offset);
19396 @@ -159,10 +157,10 @@ static inline void setup_percpu_segment(int cpu)
19397 {
19398 #ifdef CONFIG_X86_32
19399 struct desc_struct gdt;
19400 + unsigned long base = per_cpu_offset(cpu);
19401
19402 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19403 - 0x2 | DESCTYPE_S, 0x8);
19404 - gdt.s = 1;
19405 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19406 + 0x83 | DESCTYPE_S, 0xC);
19407 write_gdt_entry(get_cpu_gdt_table(cpu),
19408 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19409 #endif
19410 @@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
19411 /* alrighty, percpu areas up and running */
19412 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19413 for_each_possible_cpu(cpu) {
19414 +#ifdef CONFIG_CC_STACKPROTECTOR
19415 +#ifdef CONFIG_X86_32
19416 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
19417 +#endif
19418 +#endif
19419 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19420 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19421 per_cpu(cpu_number, cpu) = cpu;
19422 @@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
19423 early_per_cpu_map(x86_cpu_to_node_map, cpu);
19424 #endif
19425 #endif
19426 +#ifdef CONFIG_CC_STACKPROTECTOR
19427 +#ifdef CONFIG_X86_32
19428 + if (!cpu)
19429 + per_cpu(stack_canary.canary, cpu) = canary;
19430 +#endif
19431 +#endif
19432 /*
19433 * Up to this point, the boot CPU has been using .data.init
19434 * area. Reload any changed state for the boot CPU.
19435 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19436 index 6a44a76..a9287a1 100644
19437 --- a/arch/x86/kernel/signal.c
19438 +++ b/arch/x86/kernel/signal.c
19439 @@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsigned long sp)
19440 * Align the stack pointer according to the i386 ABI,
19441 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19442 */
19443 - sp = ((sp + 4) & -16ul) - 4;
19444 + sp = ((sp - 12) & -16ul) - 4;
19445 #else /* !CONFIG_X86_32 */
19446 sp = round_down(sp, 16) - 8;
19447 #endif
19448 @@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19449 * Return an always-bogus address instead so we will die with SIGSEGV.
19450 */
19451 if (onsigstack && !likely(on_sig_stack(sp)))
19452 - return (void __user *)-1L;
19453 + return (__force void __user *)-1L;
19454
19455 /* save i387 state */
19456 if (used_math() && save_i387_xstate(*fpstate) < 0)
19457 - return (void __user *)-1L;
19458 + return (__force void __user *)-1L;
19459
19460 return (void __user *)sp;
19461 }
19462 @@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19463 }
19464
19465 if (current->mm->context.vdso)
19466 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19467 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19468 else
19469 - restorer = &frame->retcode;
19470 + restorer = (void __user *)&frame->retcode;
19471 if (ka->sa.sa_flags & SA_RESTORER)
19472 restorer = ka->sa.sa_restorer;
19473
19474 @@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19475 * reasons and because gdb uses it as a signature to notice
19476 * signal handler stack frames.
19477 */
19478 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19479 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19480
19481 if (err)
19482 return -EFAULT;
19483 @@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19484 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19485
19486 /* Set up to return from userspace. */
19487 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19488 + if (current->mm->context.vdso)
19489 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19490 + else
19491 + restorer = (void __user *)&frame->retcode;
19492 if (ka->sa.sa_flags & SA_RESTORER)
19493 restorer = ka->sa.sa_restorer;
19494 put_user_ex(restorer, &frame->pretcode);
19495 @@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19496 * reasons and because gdb uses it as a signature to notice
19497 * signal handler stack frames.
19498 */
19499 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19500 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19501 } put_user_catch(err);
19502
19503 if (err)
19504 @@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *regs)
19505 int signr;
19506 sigset_t *oldset;
19507
19508 + pax_track_stack();
19509 +
19510 /*
19511 * We want the common case to go fast, which is why we may in certain
19512 * cases get here from kernel mode. Just return without doing anything
19513 @@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *regs)
19514 * X86_32: vm86 regs switched out by assembly code before reaching
19515 * here, so testing against kernel CS suffices.
19516 */
19517 - if (!user_mode(regs))
19518 + if (!user_mode_novm(regs))
19519 return;
19520
19521 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
19522 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19523 index 7e8e905..64d5c32 100644
19524 --- a/arch/x86/kernel/smpboot.c
19525 +++ b/arch/x86/kernel/smpboot.c
19526 @@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
19527 */
19528 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
19529
19530 -void cpu_hotplug_driver_lock()
19531 +void cpu_hotplug_driver_lock(void)
19532 {
19533 - mutex_lock(&x86_cpu_hotplug_driver_mutex);
19534 + mutex_lock(&x86_cpu_hotplug_driver_mutex);
19535 }
19536
19537 -void cpu_hotplug_driver_unlock()
19538 +void cpu_hotplug_driver_unlock(void)
19539 {
19540 - mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19541 + mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19542 }
19543
19544 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
19545 @@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
19546 * target processor state.
19547 */
19548 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
19549 - (unsigned long)stack_start.sp);
19550 + stack_start);
19551
19552 /*
19553 * Run STARTUP IPI loop.
19554 @@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
19555 set_idle_for_cpu(cpu, c_idle.idle);
19556 do_rest:
19557 per_cpu(current_task, cpu) = c_idle.idle;
19558 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
19559 #ifdef CONFIG_X86_32
19560 /* Stack for startup_32 can be just as for start_secondary onwards */
19561 irq_ctx_init(cpu);
19562 @@ -750,13 +751,15 @@ do_rest:
19563 #else
19564 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
19565 initial_gs = per_cpu_offset(cpu);
19566 - per_cpu(kernel_stack, cpu) =
19567 - (unsigned long)task_stack_page(c_idle.idle) -
19568 - KERNEL_STACK_OFFSET + THREAD_SIZE;
19569 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
19570 #endif
19571 +
19572 + pax_open_kernel();
19573 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19574 + pax_close_kernel();
19575 +
19576 initial_code = (unsigned long)start_secondary;
19577 - stack_start.sp = (void *) c_idle.idle->thread.sp;
19578 + stack_start = c_idle.idle->thread.sp;
19579
19580 /* start_ip had better be page-aligned! */
19581 start_ip = setup_trampoline();
19582 @@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
19583
19584 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19585
19586 +#ifdef CONFIG_PAX_PER_CPU_PGD
19587 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19588 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19589 + KERNEL_PGD_PTRS);
19590 +#endif
19591 +
19592 err = do_boot_cpu(apicid, cpu);
19593
19594 if (err) {
19595 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19596 index 3149032..14f1053 100644
19597 --- a/arch/x86/kernel/step.c
19598 +++ b/arch/x86/kernel/step.c
19599 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19600 struct desc_struct *desc;
19601 unsigned long base;
19602
19603 - seg &= ~7UL;
19604 + seg >>= 3;
19605
19606 mutex_lock(&child->mm->context.lock);
19607 - if (unlikely((seg >> 3) >= child->mm->context.size))
19608 + if (unlikely(seg >= child->mm->context.size))
19609 addr = -1L; /* bogus selector, access would fault */
19610 else {
19611 desc = child->mm->context.ldt + seg;
19612 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19613 addr += base;
19614 }
19615 mutex_unlock(&child->mm->context.lock);
19616 - }
19617 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19618 + addr = ktla_ktva(addr);
19619
19620 return addr;
19621 }
19622 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19623 unsigned char opcode[15];
19624 unsigned long addr = convert_ip_to_linear(child, regs);
19625
19626 + if (addr == -EINVAL)
19627 + return 0;
19628 +
19629 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19630 for (i = 0; i < copied; i++) {
19631 switch (opcode[i]) {
19632 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19633
19634 #ifdef CONFIG_X86_64
19635 case 0x40 ... 0x4f:
19636 - if (regs->cs != __USER_CS)
19637 + if ((regs->cs & 0xffff) != __USER_CS)
19638 /* 32-bit mode: register increment */
19639 return 0;
19640 /* 64-bit mode: REX prefix */
19641 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19642 index dee1ff7..a397f7f 100644
19643 --- a/arch/x86/kernel/sys_i386_32.c
19644 +++ b/arch/x86/kernel/sys_i386_32.c
19645 @@ -24,6 +24,21 @@
19646
19647 #include <asm/syscalls.h>
19648
19649 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19650 +{
19651 + unsigned long pax_task_size = TASK_SIZE;
19652 +
19653 +#ifdef CONFIG_PAX_SEGMEXEC
19654 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19655 + pax_task_size = SEGMEXEC_TASK_SIZE;
19656 +#endif
19657 +
19658 + if (len > pax_task_size || addr > pax_task_size - len)
19659 + return -EINVAL;
19660 +
19661 + return 0;
19662 +}
19663 +
19664 /*
19665 * Perform the select(nd, in, out, ex, tv) and mmap() system
19666 * calls. Linux/i386 didn't use to be able to handle more than
19667 @@ -58,6 +73,212 @@ out:
19668 return err;
19669 }
19670
19671 +unsigned long
19672 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
19673 + unsigned long len, unsigned long pgoff, unsigned long flags)
19674 +{
19675 + struct mm_struct *mm = current->mm;
19676 + struct vm_area_struct *vma;
19677 + unsigned long start_addr, pax_task_size = TASK_SIZE;
19678 +
19679 +#ifdef CONFIG_PAX_SEGMEXEC
19680 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19681 + pax_task_size = SEGMEXEC_TASK_SIZE;
19682 +#endif
19683 +
19684 + pax_task_size -= PAGE_SIZE;
19685 +
19686 + if (len > pax_task_size)
19687 + return -ENOMEM;
19688 +
19689 + if (flags & MAP_FIXED)
19690 + return addr;
19691 +
19692 +#ifdef CONFIG_PAX_RANDMMAP
19693 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19694 +#endif
19695 +
19696 + if (addr) {
19697 + addr = PAGE_ALIGN(addr);
19698 + if (pax_task_size - len >= addr) {
19699 + vma = find_vma(mm, addr);
19700 + if (check_heap_stack_gap(vma, addr, len))
19701 + return addr;
19702 + }
19703 + }
19704 + if (len > mm->cached_hole_size) {
19705 + start_addr = addr = mm->free_area_cache;
19706 + } else {
19707 + start_addr = addr = mm->mmap_base;
19708 + mm->cached_hole_size = 0;
19709 + }
19710 +
19711 +#ifdef CONFIG_PAX_PAGEEXEC
19712 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19713 + start_addr = 0x00110000UL;
19714 +
19715 +#ifdef CONFIG_PAX_RANDMMAP
19716 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19717 + start_addr += mm->delta_mmap & 0x03FFF000UL;
19718 +#endif
19719 +
19720 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19721 + start_addr = addr = mm->mmap_base;
19722 + else
19723 + addr = start_addr;
19724 + }
19725 +#endif
19726 +
19727 +full_search:
19728 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19729 + /* At this point: (!vma || addr < vma->vm_end). */
19730 + if (pax_task_size - len < addr) {
19731 + /*
19732 + * Start a new search - just in case we missed
19733 + * some holes.
19734 + */
19735 + if (start_addr != mm->mmap_base) {
19736 + start_addr = addr = mm->mmap_base;
19737 + mm->cached_hole_size = 0;
19738 + goto full_search;
19739 + }
19740 + return -ENOMEM;
19741 + }
19742 + if (check_heap_stack_gap(vma, addr, len))
19743 + break;
19744 + if (addr + mm->cached_hole_size < vma->vm_start)
19745 + mm->cached_hole_size = vma->vm_start - addr;
19746 + addr = vma->vm_end;
19747 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
19748 + start_addr = addr = mm->mmap_base;
19749 + mm->cached_hole_size = 0;
19750 + goto full_search;
19751 + }
19752 + }
19753 +
19754 + /*
19755 + * Remember the place where we stopped the search:
19756 + */
19757 + mm->free_area_cache = addr + len;
19758 + return addr;
19759 +}
19760 +
19761 +unsigned long
19762 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19763 + const unsigned long len, const unsigned long pgoff,
19764 + const unsigned long flags)
19765 +{
19766 + struct vm_area_struct *vma;
19767 + struct mm_struct *mm = current->mm;
19768 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19769 +
19770 +#ifdef CONFIG_PAX_SEGMEXEC
19771 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19772 + pax_task_size = SEGMEXEC_TASK_SIZE;
19773 +#endif
19774 +
19775 + pax_task_size -= PAGE_SIZE;
19776 +
19777 + /* requested length too big for entire address space */
19778 + if (len > pax_task_size)
19779 + return -ENOMEM;
19780 +
19781 + if (flags & MAP_FIXED)
19782 + return addr;
19783 +
19784 +#ifdef CONFIG_PAX_PAGEEXEC
19785 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19786 + goto bottomup;
19787 +#endif
19788 +
19789 +#ifdef CONFIG_PAX_RANDMMAP
19790 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19791 +#endif
19792 +
19793 + /* requesting a specific address */
19794 + if (addr) {
19795 + addr = PAGE_ALIGN(addr);
19796 + if (pax_task_size - len >= addr) {
19797 + vma = find_vma(mm, addr);
19798 + if (check_heap_stack_gap(vma, addr, len))
19799 + return addr;
19800 + }
19801 + }
19802 +
19803 + /* check if free_area_cache is useful for us */
19804 + if (len <= mm->cached_hole_size) {
19805 + mm->cached_hole_size = 0;
19806 + mm->free_area_cache = mm->mmap_base;
19807 + }
19808 +
19809 + /* either no address requested or can't fit in requested address hole */
19810 + addr = mm->free_area_cache;
19811 +
19812 + /* make sure it can fit in the remaining address space */
19813 + if (addr > len) {
19814 + vma = find_vma(mm, addr-len);
19815 + if (check_heap_stack_gap(vma, addr - len, len))
19816 + /* remember the address as a hint for next time */
19817 + return (mm->free_area_cache = addr-len);
19818 + }
19819 +
19820 + if (mm->mmap_base < len)
19821 + goto bottomup;
19822 +
19823 + addr = mm->mmap_base-len;
19824 +
19825 + do {
19826 + /*
19827 + * Lookup failure means no vma is above this address,
19828 + * else if new region fits below vma->vm_start,
19829 + * return with success:
19830 + */
19831 + vma = find_vma(mm, addr);
19832 + if (check_heap_stack_gap(vma, addr, len))
19833 + /* remember the address as a hint for next time */
19834 + return (mm->free_area_cache = addr);
19835 +
19836 + /* remember the largest hole we saw so far */
19837 + if (addr + mm->cached_hole_size < vma->vm_start)
19838 + mm->cached_hole_size = vma->vm_start - addr;
19839 +
19840 + /* try just below the current vma->vm_start */
19841 + addr = skip_heap_stack_gap(vma, len);
19842 + } while (!IS_ERR_VALUE(addr));
19843 +
19844 +bottomup:
19845 + /*
19846 + * A failed mmap() very likely causes application failure,
19847 + * so fall back to the bottom-up function here. This scenario
19848 + * can happen with large stack limits and large mmap()
19849 + * allocations.
19850 + */
19851 +
19852 +#ifdef CONFIG_PAX_SEGMEXEC
19853 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19854 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19855 + else
19856 +#endif
19857 +
19858 + mm->mmap_base = TASK_UNMAPPED_BASE;
19859 +
19860 +#ifdef CONFIG_PAX_RANDMMAP
19861 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19862 + mm->mmap_base += mm->delta_mmap;
19863 +#endif
19864 +
19865 + mm->free_area_cache = mm->mmap_base;
19866 + mm->cached_hole_size = ~0UL;
19867 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19868 + /*
19869 + * Restore the topdown base:
19870 + */
19871 + mm->mmap_base = base;
19872 + mm->free_area_cache = base;
19873 + mm->cached_hole_size = ~0UL;
19874 +
19875 + return addr;
19876 +}
19877
19878 struct sel_arg_struct {
19879 unsigned long n;
19880 @@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19881 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
19882 case SEMTIMEDOP:
19883 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
19884 - (const struct timespec __user *)fifth);
19885 + (__force const struct timespec __user *)fifth);
19886
19887 case SEMGET:
19888 return sys_semget(first, second, third);
19889 @@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19890 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
19891 if (ret)
19892 return ret;
19893 - return put_user(raddr, (ulong __user *) third);
19894 + return put_user(raddr, (__force ulong __user *) third);
19895 }
19896 case 1: /* iBCS2 emulator entry point */
19897 if (!segment_eq(get_fs(), get_ds()))
19898 @@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldold_utsname __user *name)
19899
19900 return error;
19901 }
19902 -
19903 -
19904 -/*
19905 - * Do a system call from kernel instead of calling sys_execve so we
19906 - * end up with proper pt_regs.
19907 - */
19908 -int kernel_execve(const char *filename, char *const argv[], char *const envp[])
19909 -{
19910 - long __res;
19911 - asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
19912 - : "=a" (__res)
19913 - : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
19914 - return __res;
19915 -}
19916 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19917 index 8aa2057..b604bc1 100644
19918 --- a/arch/x86/kernel/sys_x86_64.c
19919 +++ b/arch/x86/kernel/sys_x86_64.c
19920 @@ -32,8 +32,8 @@ out:
19921 return error;
19922 }
19923
19924 -static void find_start_end(unsigned long flags, unsigned long *begin,
19925 - unsigned long *end)
19926 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
19927 + unsigned long *begin, unsigned long *end)
19928 {
19929 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
19930 unsigned long new_begin;
19931 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19932 *begin = new_begin;
19933 }
19934 } else {
19935 - *begin = TASK_UNMAPPED_BASE;
19936 + *begin = mm->mmap_base;
19937 *end = TASK_SIZE;
19938 }
19939 }
19940 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19941 if (flags & MAP_FIXED)
19942 return addr;
19943
19944 - find_start_end(flags, &begin, &end);
19945 + find_start_end(mm, flags, &begin, &end);
19946
19947 if (len > end)
19948 return -ENOMEM;
19949
19950 +#ifdef CONFIG_PAX_RANDMMAP
19951 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19952 +#endif
19953 +
19954 if (addr) {
19955 addr = PAGE_ALIGN(addr);
19956 vma = find_vma(mm, addr);
19957 - if (end - len >= addr &&
19958 - (!vma || addr + len <= vma->vm_start))
19959 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19960 return addr;
19961 }
19962 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
19963 @@ -106,7 +109,7 @@ full_search:
19964 }
19965 return -ENOMEM;
19966 }
19967 - if (!vma || addr + len <= vma->vm_start) {
19968 + if (check_heap_stack_gap(vma, addr, len)) {
19969 /*
19970 * Remember the place where we stopped the search:
19971 */
19972 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19973 {
19974 struct vm_area_struct *vma;
19975 struct mm_struct *mm = current->mm;
19976 - unsigned long addr = addr0;
19977 + unsigned long base = mm->mmap_base, addr = addr0;
19978
19979 /* requested length too big for entire address space */
19980 if (len > TASK_SIZE)
19981 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19982 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
19983 goto bottomup;
19984
19985 +#ifdef CONFIG_PAX_RANDMMAP
19986 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19987 +#endif
19988 +
19989 /* requesting a specific address */
19990 if (addr) {
19991 addr = PAGE_ALIGN(addr);
19992 - vma = find_vma(mm, addr);
19993 - if (TASK_SIZE - len >= addr &&
19994 - (!vma || addr + len <= vma->vm_start))
19995 - return addr;
19996 + if (TASK_SIZE - len >= addr) {
19997 + vma = find_vma(mm, addr);
19998 + if (check_heap_stack_gap(vma, addr, len))
19999 + return addr;
20000 + }
20001 }
20002
20003 /* check if free_area_cache is useful for us */
20004 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20005 /* make sure it can fit in the remaining address space */
20006 if (addr > len) {
20007 vma = find_vma(mm, addr-len);
20008 - if (!vma || addr <= vma->vm_start)
20009 + if (check_heap_stack_gap(vma, addr - len, len))
20010 /* remember the address as a hint for next time */
20011 return mm->free_area_cache = addr-len;
20012 }
20013 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20014 * return with success:
20015 */
20016 vma = find_vma(mm, addr);
20017 - if (!vma || addr+len <= vma->vm_start)
20018 + if (check_heap_stack_gap(vma, addr, len))
20019 /* remember the address as a hint for next time */
20020 return mm->free_area_cache = addr;
20021
20022 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20023 mm->cached_hole_size = vma->vm_start - addr;
20024
20025 /* try just below the current vma->vm_start */
20026 - addr = vma->vm_start-len;
20027 - } while (len < vma->vm_start);
20028 + addr = skip_heap_stack_gap(vma, len);
20029 + } while (!IS_ERR_VALUE(addr));
20030
20031 bottomup:
20032 /*
20033 @@ -198,13 +206,21 @@ bottomup:
20034 * can happen with large stack limits and large mmap()
20035 * allocations.
20036 */
20037 + mm->mmap_base = TASK_UNMAPPED_BASE;
20038 +
20039 +#ifdef CONFIG_PAX_RANDMMAP
20040 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20041 + mm->mmap_base += mm->delta_mmap;
20042 +#endif
20043 +
20044 + mm->free_area_cache = mm->mmap_base;
20045 mm->cached_hole_size = ~0UL;
20046 - mm->free_area_cache = TASK_UNMAPPED_BASE;
20047 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
20048 /*
20049 * Restore the topdown base:
20050 */
20051 - mm->free_area_cache = mm->mmap_base;
20052 + mm->mmap_base = base;
20053 + mm->free_area_cache = base;
20054 mm->cached_hole_size = ~0UL;
20055
20056 return addr;
20057 diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
20058 index 76d70a4..4c94a44 100644
20059 --- a/arch/x86/kernel/syscall_table_32.S
20060 +++ b/arch/x86/kernel/syscall_table_32.S
20061 @@ -1,3 +1,4 @@
20062 +.section .rodata,"a",@progbits
20063 ENTRY(sys_call_table)
20064 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
20065 .long sys_exit
20066 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
20067 index 46b8277..3349d55 100644
20068 --- a/arch/x86/kernel/tboot.c
20069 +++ b/arch/x86/kernel/tboot.c
20070 @@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
20071
20072 void tboot_shutdown(u32 shutdown_type)
20073 {
20074 - void (*shutdown)(void);
20075 + void (* __noreturn shutdown)(void);
20076
20077 if (!tboot_enabled())
20078 return;
20079 @@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
20080
20081 switch_to_tboot_pt();
20082
20083 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
20084 + shutdown = (void *)tboot->shutdown_entry;
20085 shutdown();
20086
20087 /* should not reach here */
20088 @@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
20089 tboot_shutdown(acpi_shutdown_map[sleep_state]);
20090 }
20091
20092 -static atomic_t ap_wfs_count;
20093 +static atomic_unchecked_t ap_wfs_count;
20094
20095 static int tboot_wait_for_aps(int num_aps)
20096 {
20097 @@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
20098 {
20099 switch (action) {
20100 case CPU_DYING:
20101 - atomic_inc(&ap_wfs_count);
20102 + atomic_inc_unchecked(&ap_wfs_count);
20103 if (num_online_cpus() == 1)
20104 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
20105 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
20106 return NOTIFY_BAD;
20107 break;
20108 }
20109 @@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
20110
20111 tboot_create_trampoline();
20112
20113 - atomic_set(&ap_wfs_count, 0);
20114 + atomic_set_unchecked(&ap_wfs_count, 0);
20115 register_hotcpu_notifier(&tboot_cpu_notifier);
20116 return 0;
20117 }
20118 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
20119 index be25734..87fe232 100644
20120 --- a/arch/x86/kernel/time.c
20121 +++ b/arch/x86/kernel/time.c
20122 @@ -26,17 +26,13 @@
20123 int timer_ack;
20124 #endif
20125
20126 -#ifdef CONFIG_X86_64
20127 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
20128 -#endif
20129 -
20130 unsigned long profile_pc(struct pt_regs *regs)
20131 {
20132 unsigned long pc = instruction_pointer(regs);
20133
20134 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
20135 + if (!user_mode(regs) && in_lock_functions(pc)) {
20136 #ifdef CONFIG_FRAME_POINTER
20137 - return *(unsigned long *)(regs->bp + sizeof(long));
20138 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
20139 #else
20140 unsigned long *sp =
20141 (unsigned long *)kernel_stack_pointer(regs);
20142 @@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
20143 * or above a saved flags. Eflags has bits 22-31 zero,
20144 * kernel addresses don't.
20145 */
20146 +
20147 +#ifdef CONFIG_PAX_KERNEXEC
20148 + return ktla_ktva(sp[0]);
20149 +#else
20150 if (sp[0] >> 22)
20151 return sp[0];
20152 if (sp[1] >> 22)
20153 return sp[1];
20154 #endif
20155 +
20156 +#endif
20157 }
20158 return pc;
20159 }
20160 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
20161 index 6bb7b85..dd853e1 100644
20162 --- a/arch/x86/kernel/tls.c
20163 +++ b/arch/x86/kernel/tls.c
20164 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
20165 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
20166 return -EINVAL;
20167
20168 +#ifdef CONFIG_PAX_SEGMEXEC
20169 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
20170 + return -EINVAL;
20171 +#endif
20172 +
20173 set_tls_desc(p, idx, &info, 1);
20174
20175 return 0;
20176 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
20177 index 8508237..229b664 100644
20178 --- a/arch/x86/kernel/trampoline_32.S
20179 +++ b/arch/x86/kernel/trampoline_32.S
20180 @@ -32,6 +32,12 @@
20181 #include <asm/segment.h>
20182 #include <asm/page_types.h>
20183
20184 +#ifdef CONFIG_PAX_KERNEXEC
20185 +#define ta(X) (X)
20186 +#else
20187 +#define ta(X) ((X) - __PAGE_OFFSET)
20188 +#endif
20189 +
20190 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
20191 __CPUINITRODATA
20192 .code16
20193 @@ -60,7 +66,7 @@ r_base = .
20194 inc %ax # protected mode (PE) bit
20195 lmsw %ax # into protected mode
20196 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
20197 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
20198 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
20199
20200 # These need to be in the same 64K segment as the above;
20201 # hence we don't use the boot_gdt_descr defined in head.S
20202 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
20203 index 3af2dff..ba8aa49 100644
20204 --- a/arch/x86/kernel/trampoline_64.S
20205 +++ b/arch/x86/kernel/trampoline_64.S
20206 @@ -91,7 +91,7 @@ startup_32:
20207 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
20208 movl %eax, %ds
20209
20210 - movl $X86_CR4_PAE, %eax
20211 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
20212 movl %eax, %cr4 # Enable PAE mode
20213
20214 # Setup trampoline 4 level pagetables
20215 @@ -127,7 +127,7 @@ startup_64:
20216 no_longmode:
20217 hlt
20218 jmp no_longmode
20219 -#include "verify_cpu_64.S"
20220 +#include "verify_cpu.S"
20221
20222 # Careful these need to be in the same 64K segment as the above;
20223 tidt:
20224 @@ -138,7 +138,7 @@ tidt:
20225 # so the kernel can live anywhere
20226 .balign 4
20227 tgdt:
20228 - .short tgdt_end - tgdt # gdt limit
20229 + .short tgdt_end - tgdt - 1 # gdt limit
20230 .long tgdt - r_base
20231 .short 0
20232 .quad 0x00cf9b000000ffff # __KERNEL32_CS
20233 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
20234 index 7e37dce..ec3f8e5 100644
20235 --- a/arch/x86/kernel/traps.c
20236 +++ b/arch/x86/kernel/traps.c
20237 @@ -69,12 +69,6 @@ asmlinkage int system_call(void);
20238
20239 /* Do we ignore FPU interrupts ? */
20240 char ignore_fpu_irq;
20241 -
20242 -/*
20243 - * The IDT has to be page-aligned to simplify the Pentium
20244 - * F0 0F bug workaround.
20245 - */
20246 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
20247 #endif
20248
20249 DECLARE_BITMAP(used_vectors, NR_VECTORS);
20250 @@ -112,19 +106,19 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
20251 static inline void
20252 die_if_kernel(const char *str, struct pt_regs *regs, long err)
20253 {
20254 - if (!user_mode_vm(regs))
20255 + if (!user_mode(regs))
20256 die(str, regs, err);
20257 }
20258 #endif
20259
20260 static void __kprobes
20261 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20262 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
20263 long error_code, siginfo_t *info)
20264 {
20265 struct task_struct *tsk = current;
20266
20267 #ifdef CONFIG_X86_32
20268 - if (regs->flags & X86_VM_MASK) {
20269 + if (v8086_mode(regs)) {
20270 /*
20271 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
20272 * On nmi (interrupt 2), do_trap should not be called.
20273 @@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20274 }
20275 #endif
20276
20277 - if (!user_mode(regs))
20278 + if (!user_mode_novm(regs))
20279 goto kernel_trap;
20280
20281 #ifdef CONFIG_X86_32
20282 @@ -158,7 +152,7 @@ trap_signal:
20283 printk_ratelimit()) {
20284 printk(KERN_INFO
20285 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
20286 - tsk->comm, tsk->pid, str,
20287 + tsk->comm, task_pid_nr(tsk), str,
20288 regs->ip, regs->sp, error_code);
20289 print_vma_addr(" in ", regs->ip);
20290 printk("\n");
20291 @@ -175,8 +169,20 @@ kernel_trap:
20292 if (!fixup_exception(regs)) {
20293 tsk->thread.error_code = error_code;
20294 tsk->thread.trap_no = trapnr;
20295 +
20296 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20297 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
20298 + str = "PAX: suspicious stack segment fault";
20299 +#endif
20300 +
20301 die(str, regs, error_code);
20302 }
20303 +
20304 +#ifdef CONFIG_PAX_REFCOUNT
20305 + if (trapnr == 4)
20306 + pax_report_refcount_overflow(regs);
20307 +#endif
20308 +
20309 return;
20310
20311 #ifdef CONFIG_X86_32
20312 @@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
20313 conditional_sti(regs);
20314
20315 #ifdef CONFIG_X86_32
20316 - if (regs->flags & X86_VM_MASK)
20317 + if (v8086_mode(regs))
20318 goto gp_in_vm86;
20319 #endif
20320
20321 tsk = current;
20322 - if (!user_mode(regs))
20323 + if (!user_mode_novm(regs))
20324 goto gp_in_kernel;
20325
20326 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20327 + if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
20328 + struct mm_struct *mm = tsk->mm;
20329 + unsigned long limit;
20330 +
20331 + down_write(&mm->mmap_sem);
20332 + limit = mm->context.user_cs_limit;
20333 + if (limit < TASK_SIZE) {
20334 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
20335 + up_write(&mm->mmap_sem);
20336 + return;
20337 + }
20338 + up_write(&mm->mmap_sem);
20339 + }
20340 +#endif
20341 +
20342 tsk->thread.error_code = error_code;
20343 tsk->thread.trap_no = 13;
20344
20345 @@ -305,6 +327,13 @@ gp_in_kernel:
20346 if (notify_die(DIE_GPF, "general protection fault", regs,
20347 error_code, 13, SIGSEGV) == NOTIFY_STOP)
20348 return;
20349 +
20350 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20351 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
20352 + die("PAX: suspicious general protection fault", regs, error_code);
20353 + else
20354 +#endif
20355 +
20356 die("general protection fault", regs, error_code);
20357 }
20358
20359 @@ -435,6 +464,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
20360 dotraplinkage notrace __kprobes void
20361 do_nmi(struct pt_regs *regs, long error_code)
20362 {
20363 +
20364 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20365 + if (!user_mode(regs)) {
20366 + unsigned long cs = regs->cs & 0xFFFF;
20367 + unsigned long ip = ktva_ktla(regs->ip);
20368 +
20369 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
20370 + regs->ip = ip;
20371 + }
20372 +#endif
20373 +
20374 nmi_enter();
20375
20376 inc_irq_stat(__nmi_count);
20377 @@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20378 }
20379
20380 #ifdef CONFIG_X86_32
20381 - if (regs->flags & X86_VM_MASK)
20382 + if (v8086_mode(regs))
20383 goto debug_vm86;
20384 #endif
20385
20386 @@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20387 * kernel space (but re-enable TF when returning to user mode).
20388 */
20389 if (condition & DR_STEP) {
20390 - if (!user_mode(regs))
20391 + if (!user_mode_novm(regs))
20392 goto clear_TF_reenable;
20393 }
20394
20395 @@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
20396 * Handle strange cache flush from user space exception
20397 * in all other cases. This is undocumented behaviour.
20398 */
20399 - if (regs->flags & X86_VM_MASK) {
20400 + if (v8086_mode(regs)) {
20401 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
20402 return;
20403 }
20404 @@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
20405 void __math_state_restore(void)
20406 {
20407 struct thread_info *thread = current_thread_info();
20408 - struct task_struct *tsk = thread->task;
20409 + struct task_struct *tsk = current;
20410
20411 /*
20412 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
20413 @@ -825,8 +865,7 @@ void __math_state_restore(void)
20414 */
20415 asmlinkage void math_state_restore(void)
20416 {
20417 - struct thread_info *thread = current_thread_info();
20418 - struct task_struct *tsk = thread->task;
20419 + struct task_struct *tsk = current;
20420
20421 if (!tsk_used_math(tsk)) {
20422 local_irq_enable();
20423 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
20424 new file mode 100644
20425 index 0000000..50c5edd
20426 --- /dev/null
20427 +++ b/arch/x86/kernel/verify_cpu.S
20428 @@ -0,0 +1,140 @@
20429 +/*
20430 + *
20431 + * verify_cpu.S - Code for cpu long mode and SSE verification. This
20432 + * code has been borrowed from boot/setup.S and was introduced by
20433 + * Andi Kleen.
20434 + *
20435 + * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20436 + * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20437 + * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20438 + * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
20439 + *
20440 + * This source code is licensed under the GNU General Public License,
20441 + * Version 2. See the file COPYING for more details.
20442 + *
20443 + * This is a common code for verification whether CPU supports
20444 + * long mode and SSE or not. It is not called directly instead this
20445 + * file is included at various places and compiled in that context.
20446 + * This file is expected to run in 32bit code. Currently:
20447 + *
20448 + * arch/x86/boot/compressed/head_64.S: Boot cpu verification
20449 + * arch/x86/kernel/trampoline_64.S: secondary processor verification
20450 + * arch/x86/kernel/head_32.S: processor startup
20451 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
20452 + *
20453 + * verify_cpu, returns the status of longmode and SSE in register %eax.
20454 + * 0: Success 1: Failure
20455 + *
20456 + * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
20457 + *
20458 + * The caller needs to check for the error code and take the action
20459 + * appropriately. Either display a message or halt.
20460 + */
20461 +
20462 +#include <asm/cpufeature.h>
20463 +#include <asm/msr-index.h>
20464 +
20465 +verify_cpu:
20466 + pushfl # Save caller passed flags
20467 + pushl $0 # Kill any dangerous flags
20468 + popfl
20469 +
20470 + pushfl # standard way to check for cpuid
20471 + popl %eax
20472 + movl %eax,%ebx
20473 + xorl $0x200000,%eax
20474 + pushl %eax
20475 + popfl
20476 + pushfl
20477 + popl %eax
20478 + cmpl %eax,%ebx
20479 + jz verify_cpu_no_longmode # cpu has no cpuid
20480 +
20481 + movl $0x0,%eax # See if cpuid 1 is implemented
20482 + cpuid
20483 + cmpl $0x1,%eax
20484 + jb verify_cpu_no_longmode # no cpuid 1
20485 +
20486 + xor %di,%di
20487 + cmpl $0x68747541,%ebx # AuthenticAMD
20488 + jnz verify_cpu_noamd
20489 + cmpl $0x69746e65,%edx
20490 + jnz verify_cpu_noamd
20491 + cmpl $0x444d4163,%ecx
20492 + jnz verify_cpu_noamd
20493 + mov $1,%di # cpu is from AMD
20494 + jmp verify_cpu_check
20495 +
20496 +verify_cpu_noamd:
20497 + cmpl $0x756e6547,%ebx # GenuineIntel?
20498 + jnz verify_cpu_check
20499 + cmpl $0x49656e69,%edx
20500 + jnz verify_cpu_check
20501 + cmpl $0x6c65746e,%ecx
20502 + jnz verify_cpu_check
20503 +
20504 + # only call IA32_MISC_ENABLE when:
20505 + # family > 6 || (family == 6 && model >= 0xd)
20506 + movl $0x1, %eax # check CPU family and model
20507 + cpuid
20508 + movl %eax, %ecx
20509 +
20510 + andl $0x0ff00f00, %eax # mask family and extended family
20511 + shrl $8, %eax
20512 + cmpl $6, %eax
20513 + ja verify_cpu_clear_xd # family > 6, ok
20514 + jb verify_cpu_check # family < 6, skip
20515 +
20516 + andl $0x000f00f0, %ecx # mask model and extended model
20517 + shrl $4, %ecx
20518 + cmpl $0xd, %ecx
20519 + jb verify_cpu_check # family == 6, model < 0xd, skip
20520 +
20521 +verify_cpu_clear_xd:
20522 + movl $MSR_IA32_MISC_ENABLE, %ecx
20523 + rdmsr
20524 + btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
20525 + jnc verify_cpu_check # only write MSR if bit was changed
20526 + wrmsr
20527 +
20528 +verify_cpu_check:
20529 + movl $0x1,%eax # Does the cpu have what it takes
20530 + cpuid
20531 + andl $REQUIRED_MASK0,%edx
20532 + xorl $REQUIRED_MASK0,%edx
20533 + jnz verify_cpu_no_longmode
20534 +
20535 + movl $0x80000000,%eax # See if extended cpuid is implemented
20536 + cpuid
20537 + cmpl $0x80000001,%eax
20538 + jb verify_cpu_no_longmode # no extended cpuid
20539 +
20540 + movl $0x80000001,%eax # Does the cpu have what it takes
20541 + cpuid
20542 + andl $REQUIRED_MASK1,%edx
20543 + xorl $REQUIRED_MASK1,%edx
20544 + jnz verify_cpu_no_longmode
20545 +
20546 +verify_cpu_sse_test:
20547 + movl $1,%eax
20548 + cpuid
20549 + andl $SSE_MASK,%edx
20550 + cmpl $SSE_MASK,%edx
20551 + je verify_cpu_sse_ok
20552 + test %di,%di
20553 + jz verify_cpu_no_longmode # only try to force SSE on AMD
20554 + movl $MSR_K7_HWCR,%ecx
20555 + rdmsr
20556 + btr $15,%eax # enable SSE
20557 + wrmsr
20558 + xor %di,%di # don't loop
20559 + jmp verify_cpu_sse_test # try again
20560 +
20561 +verify_cpu_no_longmode:
20562 + popfl # Restore caller passed flags
20563 + movl $1,%eax
20564 + ret
20565 +verify_cpu_sse_ok:
20566 + popfl # Restore caller passed flags
20567 + xorl %eax, %eax
20568 + ret
20569 diff --git a/arch/x86/kernel/verify_cpu_64.S b/arch/x86/kernel/verify_cpu_64.S
20570 deleted file mode 100644
20571 index 45b6f8a..0000000
20572 --- a/arch/x86/kernel/verify_cpu_64.S
20573 +++ /dev/null
20574 @@ -1,105 +0,0 @@
20575 -/*
20576 - *
20577 - * verify_cpu.S - Code for cpu long mode and SSE verification. This
20578 - * code has been borrowed from boot/setup.S and was introduced by
20579 - * Andi Kleen.
20580 - *
20581 - * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20582 - * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20583 - * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20584 - *
20585 - * This source code is licensed under the GNU General Public License,
20586 - * Version 2. See the file COPYING for more details.
20587 - *
20588 - * This is a common code for verification whether CPU supports
20589 - * long mode and SSE or not. It is not called directly instead this
20590 - * file is included at various places and compiled in that context.
20591 - * Following are the current usage.
20592 - *
20593 - * This file is included by both 16bit and 32bit code.
20594 - *
20595 - * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
20596 - * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
20597 - * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
20598 - * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
20599 - *
20600 - * verify_cpu, returns the status of cpu check in register %eax.
20601 - * 0: Success 1: Failure
20602 - *
20603 - * The caller needs to check for the error code and take the action
20604 - * appropriately. Either display a message or halt.
20605 - */
20606 -
20607 -#include <asm/cpufeature.h>
20608 -
20609 -verify_cpu:
20610 - pushfl # Save caller passed flags
20611 - pushl $0 # Kill any dangerous flags
20612 - popfl
20613 -
20614 - pushfl # standard way to check for cpuid
20615 - popl %eax
20616 - movl %eax,%ebx
20617 - xorl $0x200000,%eax
20618 - pushl %eax
20619 - popfl
20620 - pushfl
20621 - popl %eax
20622 - cmpl %eax,%ebx
20623 - jz verify_cpu_no_longmode # cpu has no cpuid
20624 -
20625 - movl $0x0,%eax # See if cpuid 1 is implemented
20626 - cpuid
20627 - cmpl $0x1,%eax
20628 - jb verify_cpu_no_longmode # no cpuid 1
20629 -
20630 - xor %di,%di
20631 - cmpl $0x68747541,%ebx # AuthenticAMD
20632 - jnz verify_cpu_noamd
20633 - cmpl $0x69746e65,%edx
20634 - jnz verify_cpu_noamd
20635 - cmpl $0x444d4163,%ecx
20636 - jnz verify_cpu_noamd
20637 - mov $1,%di # cpu is from AMD
20638 -
20639 -verify_cpu_noamd:
20640 - movl $0x1,%eax # Does the cpu have what it takes
20641 - cpuid
20642 - andl $REQUIRED_MASK0,%edx
20643 - xorl $REQUIRED_MASK0,%edx
20644 - jnz verify_cpu_no_longmode
20645 -
20646 - movl $0x80000000,%eax # See if extended cpuid is implemented
20647 - cpuid
20648 - cmpl $0x80000001,%eax
20649 - jb verify_cpu_no_longmode # no extended cpuid
20650 -
20651 - movl $0x80000001,%eax # Does the cpu have what it takes
20652 - cpuid
20653 - andl $REQUIRED_MASK1,%edx
20654 - xorl $REQUIRED_MASK1,%edx
20655 - jnz verify_cpu_no_longmode
20656 -
20657 -verify_cpu_sse_test:
20658 - movl $1,%eax
20659 - cpuid
20660 - andl $SSE_MASK,%edx
20661 - cmpl $SSE_MASK,%edx
20662 - je verify_cpu_sse_ok
20663 - test %di,%di
20664 - jz verify_cpu_no_longmode # only try to force SSE on AMD
20665 - movl $0xc0010015,%ecx # HWCR
20666 - rdmsr
20667 - btr $15,%eax # enable SSE
20668 - wrmsr
20669 - xor %di,%di # don't loop
20670 - jmp verify_cpu_sse_test # try again
20671 -
20672 -verify_cpu_no_longmode:
20673 - popfl # Restore caller passed flags
20674 - movl $1,%eax
20675 - ret
20676 -verify_cpu_sse_ok:
20677 - popfl # Restore caller passed flags
20678 - xorl %eax, %eax
20679 - ret
20680 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
20681 index 9c4e625..c992817 100644
20682 --- a/arch/x86/kernel/vm86_32.c
20683 +++ b/arch/x86/kernel/vm86_32.c
20684 @@ -41,6 +41,7 @@
20685 #include <linux/ptrace.h>
20686 #include <linux/audit.h>
20687 #include <linux/stddef.h>
20688 +#include <linux/grsecurity.h>
20689
20690 #include <asm/uaccess.h>
20691 #include <asm/io.h>
20692 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
20693 do_exit(SIGSEGV);
20694 }
20695
20696 - tss = &per_cpu(init_tss, get_cpu());
20697 + tss = init_tss + get_cpu();
20698 current->thread.sp0 = current->thread.saved_sp0;
20699 current->thread.sysenter_cs = __KERNEL_CS;
20700 load_sp0(tss, &current->thread);
20701 @@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
20702 struct task_struct *tsk;
20703 int tmp, ret = -EPERM;
20704
20705 +#ifdef CONFIG_GRKERNSEC_VM86
20706 + if (!capable(CAP_SYS_RAWIO)) {
20707 + gr_handle_vm86();
20708 + goto out;
20709 + }
20710 +#endif
20711 +
20712 tsk = current;
20713 if (tsk->thread.saved_sp0)
20714 goto out;
20715 @@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
20716 int tmp, ret;
20717 struct vm86plus_struct __user *v86;
20718
20719 +#ifdef CONFIG_GRKERNSEC_VM86
20720 + if (!capable(CAP_SYS_RAWIO)) {
20721 + gr_handle_vm86();
20722 + ret = -EPERM;
20723 + goto out;
20724 + }
20725 +#endif
20726 +
20727 tsk = current;
20728 switch (regs->bx) {
20729 case VM86_REQUEST_IRQ:
20730 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
20731 tsk->thread.saved_fs = info->regs32->fs;
20732 tsk->thread.saved_gs = get_user_gs(info->regs32);
20733
20734 - tss = &per_cpu(init_tss, get_cpu());
20735 + tss = init_tss + get_cpu();
20736 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
20737 if (cpu_has_sep)
20738 tsk->thread.sysenter_cs = 0;
20739 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
20740 goto cannot_handle;
20741 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
20742 goto cannot_handle;
20743 - intr_ptr = (unsigned long __user *) (i << 2);
20744 + intr_ptr = (__force unsigned long __user *) (i << 2);
20745 if (get_user(segoffs, intr_ptr))
20746 goto cannot_handle;
20747 if ((segoffs >> 16) == BIOSSEG)
20748 diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
20749 index d430e4c..831f817 100644
20750 --- a/arch/x86/kernel/vmi_32.c
20751 +++ b/arch/x86/kernel/vmi_32.c
20752 @@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
20753 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
20754
20755 #define call_vrom_func(rom,func) \
20756 - (((VROMFUNC *)(rom->func))())
20757 + (((VROMFUNC *)(ktva_ktla(rom.func)))())
20758
20759 #define call_vrom_long_func(rom,func,arg) \
20760 - (((VROMLONGFUNC *)(rom->func)) (arg))
20761 +({\
20762 + u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
20763 + struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
20764 + __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
20765 + __reloc;\
20766 +})
20767
20768 -static struct vrom_header *vmi_rom;
20769 +static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
20770 static int disable_pge;
20771 static int disable_pse;
20772 static int disable_sep;
20773 @@ -76,10 +81,10 @@ static struct {
20774 void (*set_initial_ap_state)(int, int);
20775 void (*halt)(void);
20776 void (*set_lazy_mode)(int mode);
20777 -} vmi_ops;
20778 +} __no_const vmi_ops __read_only;
20779
20780 /* Cached VMI operations */
20781 -struct vmi_timer_ops vmi_timer_ops;
20782 +struct vmi_timer_ops vmi_timer_ops __read_only;
20783
20784 /*
20785 * VMI patching routines.
20786 @@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
20787 static inline void patch_offset(void *insnbuf,
20788 unsigned long ip, unsigned long dest)
20789 {
20790 - *(unsigned long *)(insnbuf+1) = dest-ip-5;
20791 + *(unsigned long *)(insnbuf+1) = dest-ip-5;
20792 }
20793
20794 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20795 @@ -102,6 +107,7 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20796 {
20797 u64 reloc;
20798 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
20799 +
20800 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
20801 switch(rel->type) {
20802 case VMI_RELOCATION_CALL_REL:
20803 @@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud_t pudval)
20804
20805 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
20806 {
20807 - const pte_t pte = { .pte = 0 };
20808 + const pte_t pte = __pte(0ULL);
20809 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
20810 }
20811
20812 static void vmi_pmd_clear(pmd_t *pmd)
20813 {
20814 - const pte_t pte = { .pte = 0 };
20815 + const pte_t pte = __pte(0ULL);
20816 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
20817 }
20818 #endif
20819 @@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
20820 ap.ss = __KERNEL_DS;
20821 ap.esp = (unsigned long) start_esp;
20822
20823 - ap.ds = __USER_DS;
20824 - ap.es = __USER_DS;
20825 + ap.ds = __KERNEL_DS;
20826 + ap.es = __KERNEL_DS;
20827 ap.fs = __KERNEL_PERCPU;
20828 - ap.gs = __KERNEL_STACK_CANARY;
20829 + savesegment(gs, ap.gs);
20830
20831 ap.eflags = 0;
20832
20833 @@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
20834 paravirt_leave_lazy_mmu();
20835 }
20836
20837 +#ifdef CONFIG_PAX_KERNEXEC
20838 +static unsigned long vmi_pax_open_kernel(void)
20839 +{
20840 + return 0;
20841 +}
20842 +
20843 +static unsigned long vmi_pax_close_kernel(void)
20844 +{
20845 + return 0;
20846 +}
20847 +#endif
20848 +
20849 static inline int __init check_vmi_rom(struct vrom_header *rom)
20850 {
20851 struct pci_header *pci;
20852 @@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(struct vrom_header *rom)
20853 return 0;
20854 if (rom->vrom_signature != VMI_SIGNATURE)
20855 return 0;
20856 + if (rom->rom_length * 512 > sizeof(*rom)) {
20857 + printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
20858 + return 0;
20859 + }
20860 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
20861 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
20862 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
20863 @@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(void)
20864 struct vrom_header *romstart;
20865 romstart = (struct vrom_header *)isa_bus_to_virt(base);
20866 if (check_vmi_rom(romstart)) {
20867 - vmi_rom = romstart;
20868 + vmi_rom = *romstart;
20869 return 1;
20870 }
20871 }
20872 @@ -836,6 +858,11 @@ static inline int __init activate_vmi(void)
20873
20874 para_fill(pv_irq_ops.safe_halt, Halt);
20875
20876 +#ifdef CONFIG_PAX_KERNEXEC
20877 + pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
20878 + pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
20879 +#endif
20880 +
20881 /*
20882 * Alternative instruction rewriting doesn't happen soon enough
20883 * to convert VMI_IRET to a call instead of a jump; so we have
20884 @@ -853,16 +880,16 @@ static inline int __init activate_vmi(void)
20885
20886 void __init vmi_init(void)
20887 {
20888 - if (!vmi_rom)
20889 + if (!vmi_rom.rom_signature)
20890 probe_vmi_rom();
20891 else
20892 - check_vmi_rom(vmi_rom);
20893 + check_vmi_rom(&vmi_rom);
20894
20895 /* In case probing for or validating the ROM failed, basil */
20896 - if (!vmi_rom)
20897 + if (!vmi_rom.rom_signature)
20898 return;
20899
20900 - reserve_top_address(-vmi_rom->virtual_top);
20901 + reserve_top_address(-vmi_rom.virtual_top);
20902
20903 #ifdef CONFIG_X86_IO_APIC
20904 /* This is virtual hardware; timer routing is wired correctly */
20905 @@ -874,7 +901,7 @@ void __init vmi_activate(void)
20906 {
20907 unsigned long flags;
20908
20909 - if (!vmi_rom)
20910 + if (!vmi_rom.rom_signature)
20911 return;
20912
20913 local_irq_save(flags);
20914 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
20915 index 3c68fe2..12c8280 100644
20916 --- a/arch/x86/kernel/vmlinux.lds.S
20917 +++ b/arch/x86/kernel/vmlinux.lds.S
20918 @@ -26,6 +26,13 @@
20919 #include <asm/page_types.h>
20920 #include <asm/cache.h>
20921 #include <asm/boot.h>
20922 +#include <asm/segment.h>
20923 +
20924 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20925 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
20926 +#else
20927 +#define __KERNEL_TEXT_OFFSET 0
20928 +#endif
20929
20930 #undef i386 /* in case the preprocessor is a 32bit one */
20931
20932 @@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
20933 #ifdef CONFIG_X86_32
20934 OUTPUT_ARCH(i386)
20935 ENTRY(phys_startup_32)
20936 -jiffies = jiffies_64;
20937 #else
20938 OUTPUT_ARCH(i386:x86-64)
20939 ENTRY(phys_startup_64)
20940 -jiffies_64 = jiffies;
20941 #endif
20942
20943 PHDRS {
20944 text PT_LOAD FLAGS(5); /* R_E */
20945 - data PT_LOAD FLAGS(7); /* RWE */
20946 +#ifdef CONFIG_X86_32
20947 + module PT_LOAD FLAGS(5); /* R_E */
20948 +#endif
20949 +#ifdef CONFIG_XEN
20950 + rodata PT_LOAD FLAGS(5); /* R_E */
20951 +#else
20952 + rodata PT_LOAD FLAGS(4); /* R__ */
20953 +#endif
20954 + data PT_LOAD FLAGS(6); /* RW_ */
20955 #ifdef CONFIG_X86_64
20956 user PT_LOAD FLAGS(5); /* R_E */
20957 +#endif
20958 + init.begin PT_LOAD FLAGS(6); /* RW_ */
20959 #ifdef CONFIG_SMP
20960 percpu PT_LOAD FLAGS(6); /* RW_ */
20961 #endif
20962 + text.init PT_LOAD FLAGS(5); /* R_E */
20963 + text.exit PT_LOAD FLAGS(5); /* R_E */
20964 init PT_LOAD FLAGS(7); /* RWE */
20965 -#endif
20966 note PT_NOTE FLAGS(0); /* ___ */
20967 }
20968
20969 SECTIONS
20970 {
20971 #ifdef CONFIG_X86_32
20972 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20973 - phys_startup_32 = startup_32 - LOAD_OFFSET;
20974 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20975 #else
20976 - . = __START_KERNEL;
20977 - phys_startup_64 = startup_64 - LOAD_OFFSET;
20978 + . = __START_KERNEL;
20979 #endif
20980
20981 /* Text and read-only data */
20982 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
20983 - _text = .;
20984 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20985 /* bootstrapping code */
20986 +#ifdef CONFIG_X86_32
20987 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20988 +#else
20989 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20990 +#endif
20991 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20992 + _text = .;
20993 HEAD_TEXT
20994 #ifdef CONFIG_X86_32
20995 . = ALIGN(PAGE_SIZE);
20996 @@ -82,28 +102,71 @@ SECTIONS
20997 IRQENTRY_TEXT
20998 *(.fixup)
20999 *(.gnu.warning)
21000 - /* End of text section */
21001 - _etext = .;
21002 } :text = 0x9090
21003
21004 - NOTES :text :note
21005 + . += __KERNEL_TEXT_OFFSET;
21006
21007 - EXCEPTION_TABLE(16) :text = 0x9090
21008 +#ifdef CONFIG_X86_32
21009 + . = ALIGN(PAGE_SIZE);
21010 + .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
21011 + *(.vmi.rom)
21012 + } :module
21013 +
21014 + . = ALIGN(PAGE_SIZE);
21015 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
21016 +
21017 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
21018 + MODULES_EXEC_VADDR = .;
21019 + BYTE(0)
21020 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
21021 + . = ALIGN(HPAGE_SIZE);
21022 + MODULES_EXEC_END = . - 1;
21023 +#endif
21024 +
21025 + } :module
21026 +#endif
21027 +
21028 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
21029 + /* End of text section */
21030 + _etext = . - __KERNEL_TEXT_OFFSET;
21031 + }
21032 +
21033 +#ifdef CONFIG_X86_32
21034 + . = ALIGN(PAGE_SIZE);
21035 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
21036 + *(.idt)
21037 + . = ALIGN(PAGE_SIZE);
21038 + *(.empty_zero_page)
21039 + *(.swapper_pg_fixmap)
21040 + *(.swapper_pg_pmd)
21041 + *(.swapper_pg_dir)
21042 + *(.trampoline_pg_dir)
21043 + } :rodata
21044 +#endif
21045 +
21046 + . = ALIGN(PAGE_SIZE);
21047 + NOTES :rodata :note
21048 +
21049 + EXCEPTION_TABLE(16) :rodata
21050
21051 RO_DATA(PAGE_SIZE)
21052
21053 /* Data */
21054 .data : AT(ADDR(.data) - LOAD_OFFSET) {
21055 +
21056 +#ifdef CONFIG_PAX_KERNEXEC
21057 + . = ALIGN(HPAGE_SIZE);
21058 +#else
21059 + . = ALIGN(PAGE_SIZE);
21060 +#endif
21061 +
21062 /* Start of data section */
21063 _sdata = .;
21064
21065 /* init_task */
21066 INIT_TASK_DATA(THREAD_SIZE)
21067
21068 -#ifdef CONFIG_X86_32
21069 - /* 32 bit has nosave before _edata */
21070 NOSAVE_DATA
21071 -#endif
21072
21073 PAGE_ALIGNED_DATA(PAGE_SIZE)
21074
21075 @@ -112,6 +175,8 @@ SECTIONS
21076 DATA_DATA
21077 CONSTRUCTORS
21078
21079 + jiffies = jiffies_64;
21080 +
21081 /* rarely changed data like cpu maps */
21082 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
21083
21084 @@ -166,12 +231,6 @@ SECTIONS
21085 }
21086 vgetcpu_mode = VVIRT(.vgetcpu_mode);
21087
21088 - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
21089 - .jiffies : AT(VLOAD(.jiffies)) {
21090 - *(.jiffies)
21091 - }
21092 - jiffies = VVIRT(.jiffies);
21093 -
21094 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
21095 *(.vsyscall_3)
21096 }
21097 @@ -187,12 +246,19 @@ SECTIONS
21098 #endif /* CONFIG_X86_64 */
21099
21100 /* Init code and data - will be freed after init */
21101 - . = ALIGN(PAGE_SIZE);
21102 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
21103 + BYTE(0)
21104 +
21105 +#ifdef CONFIG_PAX_KERNEXEC
21106 + . = ALIGN(HPAGE_SIZE);
21107 +#else
21108 + . = ALIGN(PAGE_SIZE);
21109 +#endif
21110 +
21111 __init_begin = .; /* paired with __init_end */
21112 - }
21113 + } :init.begin
21114
21115 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
21116 +#ifdef CONFIG_SMP
21117 /*
21118 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
21119 * output PHDR, so the next output section - .init.text - should
21120 @@ -201,12 +267,27 @@ SECTIONS
21121 PERCPU_VADDR(0, :percpu)
21122 #endif
21123
21124 - INIT_TEXT_SECTION(PAGE_SIZE)
21125 -#ifdef CONFIG_X86_64
21126 - :init
21127 -#endif
21128 + . = ALIGN(PAGE_SIZE);
21129 + init_begin = .;
21130 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
21131 + VMLINUX_SYMBOL(_sinittext) = .;
21132 + INIT_TEXT
21133 + VMLINUX_SYMBOL(_einittext) = .;
21134 + . = ALIGN(PAGE_SIZE);
21135 + } :text.init
21136
21137 - INIT_DATA_SECTION(16)
21138 + /*
21139 + * .exit.text is discard at runtime, not link time, to deal with
21140 + * references from .altinstructions and .eh_frame
21141 + */
21142 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
21143 + EXIT_TEXT
21144 + . = ALIGN(16);
21145 + } :text.exit
21146 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
21147 +
21148 + . = ALIGN(PAGE_SIZE);
21149 + INIT_DATA_SECTION(16) :init
21150
21151 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
21152 __x86_cpu_dev_start = .;
21153 @@ -232,19 +313,11 @@ SECTIONS
21154 *(.altinstr_replacement)
21155 }
21156
21157 - /*
21158 - * .exit.text is discard at runtime, not link time, to deal with
21159 - * references from .altinstructions and .eh_frame
21160 - */
21161 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
21162 - EXIT_TEXT
21163 - }
21164 -
21165 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
21166 EXIT_DATA
21167 }
21168
21169 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
21170 +#ifndef CONFIG_SMP
21171 PERCPU(PAGE_SIZE)
21172 #endif
21173
21174 @@ -267,12 +340,6 @@ SECTIONS
21175 . = ALIGN(PAGE_SIZE);
21176 }
21177
21178 -#ifdef CONFIG_X86_64
21179 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
21180 - NOSAVE_DATA
21181 - }
21182 -#endif
21183 -
21184 /* BSS */
21185 . = ALIGN(PAGE_SIZE);
21186 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
21187 @@ -288,6 +355,7 @@ SECTIONS
21188 __brk_base = .;
21189 . += 64 * 1024; /* 64k alignment slop space */
21190 *(.brk_reservation) /* areas brk users have reserved */
21191 + . = ALIGN(HPAGE_SIZE);
21192 __brk_limit = .;
21193 }
21194
21195 @@ -316,13 +384,12 @@ SECTIONS
21196 * for the boot processor.
21197 */
21198 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
21199 -INIT_PER_CPU(gdt_page);
21200 INIT_PER_CPU(irq_stack_union);
21201
21202 /*
21203 * Build-time check on the image size:
21204 */
21205 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
21206 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
21207 "kernel image bigger than KERNEL_IMAGE_SIZE");
21208
21209 #ifdef CONFIG_SMP
21210 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
21211 index 62f39d7..3bc46a1 100644
21212 --- a/arch/x86/kernel/vsyscall_64.c
21213 +++ b/arch/x86/kernel/vsyscall_64.c
21214 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
21215
21216 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
21217 /* copy vsyscall data */
21218 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
21219 vsyscall_gtod_data.clock.vread = clock->vread;
21220 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
21221 vsyscall_gtod_data.clock.mask = clock->mask;
21222 @@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
21223 We do this here because otherwise user space would do it on
21224 its own in a likely inferior way (no access to jiffies).
21225 If you don't like it pass NULL. */
21226 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
21227 + if (tcache && tcache->blob[0] == (j = jiffies)) {
21228 p = tcache->blob[1];
21229 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
21230 /* Load per CPU data from RDTSCP */
21231 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
21232 index 3909e3b..5433a97 100644
21233 --- a/arch/x86/kernel/x8664_ksyms_64.c
21234 +++ b/arch/x86/kernel/x8664_ksyms_64.c
21235 @@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
21236
21237 EXPORT_SYMBOL(copy_user_generic);
21238 EXPORT_SYMBOL(__copy_user_nocache);
21239 -EXPORT_SYMBOL(copy_from_user);
21240 -EXPORT_SYMBOL(copy_to_user);
21241 EXPORT_SYMBOL(__copy_from_user_inatomic);
21242
21243 EXPORT_SYMBOL(copy_page);
21244 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
21245 index c5ee17e..d63218f 100644
21246 --- a/arch/x86/kernel/xsave.c
21247 +++ b/arch/x86/kernel/xsave.c
21248 @@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
21249 fx_sw_user->xstate_size > fx_sw_user->extended_size)
21250 return -1;
21251
21252 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
21253 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
21254 fx_sw_user->extended_size -
21255 FP_XSTATE_MAGIC2_SIZE));
21256 /*
21257 @@ -196,7 +196,7 @@ fx_only:
21258 * the other extended state.
21259 */
21260 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
21261 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
21262 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
21263 }
21264
21265 /*
21266 @@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf)
21267 if (task_thread_info(tsk)->status & TS_XSAVE)
21268 err = restore_user_xstate(buf);
21269 else
21270 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
21271 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
21272 buf);
21273 if (unlikely(err)) {
21274 /*
21275 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
21276 index 1350e43..a94b011 100644
21277 --- a/arch/x86/kvm/emulate.c
21278 +++ b/arch/x86/kvm/emulate.c
21279 @@ -81,8 +81,8 @@
21280 #define Src2CL (1<<29)
21281 #define Src2ImmByte (2<<29)
21282 #define Src2One (3<<29)
21283 -#define Src2Imm16 (4<<29)
21284 -#define Src2Mask (7<<29)
21285 +#define Src2Imm16 (4U<<29)
21286 +#define Src2Mask (7U<<29)
21287
21288 enum {
21289 Group1_80, Group1_81, Group1_82, Group1_83,
21290 @@ -411,6 +411,7 @@ static u32 group2_table[] = {
21291
21292 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
21293 do { \
21294 + unsigned long _tmp; \
21295 __asm__ __volatile__ ( \
21296 _PRE_EFLAGS("0", "4", "2") \
21297 _op _suffix " %"_x"3,%1; " \
21298 @@ -424,8 +425,6 @@ static u32 group2_table[] = {
21299 /* Raw emulation: instruction has two explicit operands. */
21300 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
21301 do { \
21302 - unsigned long _tmp; \
21303 - \
21304 switch ((_dst).bytes) { \
21305 case 2: \
21306 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
21307 @@ -441,7 +440,6 @@ static u32 group2_table[] = {
21308
21309 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
21310 do { \
21311 - unsigned long _tmp; \
21312 switch ((_dst).bytes) { \
21313 case 1: \
21314 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
21315 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
21316 index 8dfeaaa..4daa395 100644
21317 --- a/arch/x86/kvm/lapic.c
21318 +++ b/arch/x86/kvm/lapic.c
21319 @@ -52,7 +52,7 @@
21320 #define APIC_BUS_CYCLE_NS 1
21321
21322 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
21323 -#define apic_debug(fmt, arg...)
21324 +#define apic_debug(fmt, arg...) do {} while (0)
21325
21326 #define APIC_LVT_NUM 6
21327 /* 14 is the version for Xeon and Pentium 8.4.8*/
21328 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
21329 index 3bc2707..dd157e2 100644
21330 --- a/arch/x86/kvm/paging_tmpl.h
21331 +++ b/arch/x86/kvm/paging_tmpl.h
21332 @@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21333 int level = PT_PAGE_TABLE_LEVEL;
21334 unsigned long mmu_seq;
21335
21336 + pax_track_stack();
21337 +
21338 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
21339 kvm_mmu_audit(vcpu, "pre page fault");
21340
21341 @@ -461,6 +463,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21342 kvm_mmu_free_some_pages(vcpu);
21343 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
21344 level, &write_pt, pfn);
21345 + (void)sptep;
21346 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
21347 sptep, *sptep, write_pt);
21348
21349 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
21350 index 7c6e63e..c5d92c1 100644
21351 --- a/arch/x86/kvm/svm.c
21352 +++ b/arch/x86/kvm/svm.c
21353 @@ -2486,7 +2486,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
21354 int cpu = raw_smp_processor_id();
21355
21356 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
21357 +
21358 + pax_open_kernel();
21359 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
21360 + pax_close_kernel();
21361 +
21362 load_TR_desc();
21363 }
21364
21365 @@ -2947,7 +2951,7 @@ static bool svm_gb_page_enable(void)
21366 return true;
21367 }
21368
21369 -static struct kvm_x86_ops svm_x86_ops = {
21370 +static const struct kvm_x86_ops svm_x86_ops = {
21371 .cpu_has_kvm_support = has_svm,
21372 .disabled_by_bios = is_disabled,
21373 .hardware_setup = svm_hardware_setup,
21374 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
21375 index e6d925f..e7a4af8 100644
21376 --- a/arch/x86/kvm/vmx.c
21377 +++ b/arch/x86/kvm/vmx.c
21378 @@ -570,7 +570,11 @@ static void reload_tss(void)
21379
21380 kvm_get_gdt(&gdt);
21381 descs = (void *)gdt.base;
21382 +
21383 + pax_open_kernel();
21384 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
21385 + pax_close_kernel();
21386 +
21387 load_TR_desc();
21388 }
21389
21390 @@ -1410,8 +1414,11 @@ static __init int hardware_setup(void)
21391 if (!cpu_has_vmx_flexpriority())
21392 flexpriority_enabled = 0;
21393
21394 - if (!cpu_has_vmx_tpr_shadow())
21395 - kvm_x86_ops->update_cr8_intercept = NULL;
21396 + if (!cpu_has_vmx_tpr_shadow()) {
21397 + pax_open_kernel();
21398 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
21399 + pax_close_kernel();
21400 + }
21401
21402 if (enable_ept && !cpu_has_vmx_ept_2m_page())
21403 kvm_disable_largepages();
21404 @@ -2362,7 +2369,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
21405 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
21406
21407 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
21408 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
21409 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
21410 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
21411 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
21412 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
21413 @@ -3718,6 +3725,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21414 "jmp .Lkvm_vmx_return \n\t"
21415 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
21416 ".Lkvm_vmx_return: "
21417 +
21418 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21419 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
21420 + ".Lkvm_vmx_return2: "
21421 +#endif
21422 +
21423 /* Save guest registers, load host registers, keep flags */
21424 "xchg %0, (%%"R"sp) \n\t"
21425 "mov %%"R"ax, %c[rax](%0) \n\t"
21426 @@ -3764,8 +3777,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21427 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
21428 #endif
21429 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
21430 +
21431 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21432 + ,[cs]"i"(__KERNEL_CS)
21433 +#endif
21434 +
21435 : "cc", "memory"
21436 - , R"bx", R"di", R"si"
21437 + , R"ax", R"bx", R"di", R"si"
21438 #ifdef CONFIG_X86_64
21439 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
21440 #endif
21441 @@ -3782,7 +3800,16 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21442 if (vmx->rmode.irq.pending)
21443 fixup_rmode_irq(vmx);
21444
21445 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
21446 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
21447 +
21448 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21449 + loadsegment(fs, __KERNEL_PERCPU);
21450 +#endif
21451 +
21452 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21453 + __set_fs(current_thread_info()->addr_limit);
21454 +#endif
21455 +
21456 vmx->launched = 1;
21457
21458 vmx_complete_interrupts(vmx);
21459 @@ -3957,7 +3984,7 @@ static bool vmx_gb_page_enable(void)
21460 return false;
21461 }
21462
21463 -static struct kvm_x86_ops vmx_x86_ops = {
21464 +static const struct kvm_x86_ops vmx_x86_ops = {
21465 .cpu_has_kvm_support = cpu_has_kvm_support,
21466 .disabled_by_bios = vmx_disabled_by_bios,
21467 .hardware_setup = hardware_setup,
21468 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
21469 index df1cefb..5e882ad 100644
21470 --- a/arch/x86/kvm/x86.c
21471 +++ b/arch/x86/kvm/x86.c
21472 @@ -82,7 +82,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
21473 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
21474 struct kvm_cpuid_entry2 __user *entries);
21475
21476 -struct kvm_x86_ops *kvm_x86_ops;
21477 +const struct kvm_x86_ops *kvm_x86_ops;
21478 EXPORT_SYMBOL_GPL(kvm_x86_ops);
21479
21480 int ignore_msrs = 0;
21481 @@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
21482 struct kvm_cpuid2 *cpuid,
21483 struct kvm_cpuid_entry2 __user *entries)
21484 {
21485 - int r;
21486 + int r, i;
21487
21488 r = -E2BIG;
21489 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
21490 goto out;
21491 r = -EFAULT;
21492 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
21493 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21494 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21495 goto out;
21496 + for (i = 0; i < cpuid->nent; ++i) {
21497 + struct kvm_cpuid_entry2 cpuid_entry;
21498 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
21499 + goto out;
21500 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
21501 + }
21502 vcpu->arch.cpuid_nent = cpuid->nent;
21503 kvm_apic_set_version(vcpu);
21504 return 0;
21505 @@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
21506 struct kvm_cpuid2 *cpuid,
21507 struct kvm_cpuid_entry2 __user *entries)
21508 {
21509 - int r;
21510 + int r, i;
21511
21512 vcpu_load(vcpu);
21513 r = -E2BIG;
21514 if (cpuid->nent < vcpu->arch.cpuid_nent)
21515 goto out;
21516 r = -EFAULT;
21517 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
21518 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21519 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21520 goto out;
21521 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
21522 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
21523 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
21524 + goto out;
21525 + }
21526 return 0;
21527
21528 out:
21529 @@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
21530 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
21531 struct kvm_interrupt *irq)
21532 {
21533 - if (irq->irq < 0 || irq->irq >= 256)
21534 + if (irq->irq >= 256)
21535 return -EINVAL;
21536 if (irqchip_in_kernel(vcpu->kvm))
21537 return -ENXIO;
21538 @@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = {
21539 .notifier_call = kvmclock_cpufreq_notifier
21540 };
21541
21542 -int kvm_arch_init(void *opaque)
21543 +int kvm_arch_init(const void *opaque)
21544 {
21545 int r, cpu;
21546 - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
21547 + const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
21548
21549 if (kvm_x86_ops) {
21550 printk(KERN_ERR "kvm: already loaded the other module\n");
21551 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
21552 index 7e59dc1..b88c98f 100644
21553 --- a/arch/x86/lguest/boot.c
21554 +++ b/arch/x86/lguest/boot.c
21555 @@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
21556 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
21557 * Launcher to reboot us.
21558 */
21559 -static void lguest_restart(char *reason)
21560 +static __noreturn void lguest_restart(char *reason)
21561 {
21562 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
21563 + BUG();
21564 }
21565
21566 /*G:050
21567 diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
21568 index 824fa0b..c619e96 100644
21569 --- a/arch/x86/lib/atomic64_32.c
21570 +++ b/arch/x86/lib/atomic64_32.c
21571 @@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val)
21572 }
21573 EXPORT_SYMBOL(atomic64_cmpxchg);
21574
21575 +u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
21576 +{
21577 + return cmpxchg8b(&ptr->counter, old_val, new_val);
21578 +}
21579 +EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
21580 +
21581 /**
21582 * atomic64_xchg - xchg atomic64 variable
21583 * @ptr: pointer to type atomic64_t
21584 @@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 new_val)
21585 EXPORT_SYMBOL(atomic64_xchg);
21586
21587 /**
21588 + * atomic64_xchg_unchecked - xchg atomic64 variable
21589 + * @ptr: pointer to type atomic64_unchecked_t
21590 + * @new_val: value to assign
21591 + *
21592 + * Atomically xchgs the value of @ptr to @new_val and returns
21593 + * the old value.
21594 + */
21595 +u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21596 +{
21597 + /*
21598 + * Try first with a (possibly incorrect) assumption about
21599 + * what we have there. We'll do two loops most likely,
21600 + * but we'll get an ownership MESI transaction straight away
21601 + * instead of a read transaction followed by a
21602 + * flush-for-ownership transaction:
21603 + */
21604 + u64 old_val, real_val = 0;
21605 +
21606 + do {
21607 + old_val = real_val;
21608 +
21609 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21610 +
21611 + } while (real_val != old_val);
21612 +
21613 + return old_val;
21614 +}
21615 +EXPORT_SYMBOL(atomic64_xchg_unchecked);
21616 +
21617 +/**
21618 * atomic64_set - set atomic64 variable
21619 * @ptr: pointer to type atomic64_t
21620 * @new_val: value to assign
21621 @@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 new_val)
21622 EXPORT_SYMBOL(atomic64_set);
21623
21624 /**
21625 -EXPORT_SYMBOL(atomic64_read);
21626 + * atomic64_unchecked_set - set atomic64 variable
21627 + * @ptr: pointer to type atomic64_unchecked_t
21628 + * @new_val: value to assign
21629 + *
21630 + * Atomically sets the value of @ptr to @new_val.
21631 + */
21632 +void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21633 +{
21634 + atomic64_xchg_unchecked(ptr, new_val);
21635 +}
21636 +EXPORT_SYMBOL(atomic64_set_unchecked);
21637 +
21638 +/**
21639 * atomic64_add_return - add and return
21640 * @delta: integer value to add
21641 * @ptr: pointer to type atomic64_t
21642 @@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 delta, atomic64_t *ptr)
21643 }
21644 EXPORT_SYMBOL(atomic64_add_return);
21645
21646 +/**
21647 + * atomic64_add_return_unchecked - add and return
21648 + * @delta: integer value to add
21649 + * @ptr: pointer to type atomic64_unchecked_t
21650 + *
21651 + * Atomically adds @delta to @ptr and returns @delta + *@ptr
21652 + */
21653 +noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21654 +{
21655 + /*
21656 + * Try first with a (possibly incorrect) assumption about
21657 + * what we have there. We'll do two loops most likely,
21658 + * but we'll get an ownership MESI transaction straight away
21659 + * instead of a read transaction followed by a
21660 + * flush-for-ownership transaction:
21661 + */
21662 + u64 old_val, new_val, real_val = 0;
21663 +
21664 + do {
21665 + old_val = real_val;
21666 + new_val = old_val + delta;
21667 +
21668 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21669 +
21670 + } while (real_val != old_val);
21671 +
21672 + return new_val;
21673 +}
21674 +EXPORT_SYMBOL(atomic64_add_return_unchecked);
21675 +
21676 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
21677 {
21678 return atomic64_add_return(-delta, ptr);
21679 }
21680 EXPORT_SYMBOL(atomic64_sub_return);
21681
21682 +u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21683 +{
21684 + return atomic64_add_return_unchecked(-delta, ptr);
21685 +}
21686 +EXPORT_SYMBOL(atomic64_sub_return_unchecked);
21687 +
21688 u64 atomic64_inc_return(atomic64_t *ptr)
21689 {
21690 return atomic64_add_return(1, ptr);
21691 }
21692 EXPORT_SYMBOL(atomic64_inc_return);
21693
21694 +u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
21695 +{
21696 + return atomic64_add_return_unchecked(1, ptr);
21697 +}
21698 +EXPORT_SYMBOL(atomic64_inc_return_unchecked);
21699 +
21700 u64 atomic64_dec_return(atomic64_t *ptr)
21701 {
21702 return atomic64_sub_return(1, ptr);
21703 }
21704 EXPORT_SYMBOL(atomic64_dec_return);
21705
21706 +u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
21707 +{
21708 + return atomic64_sub_return_unchecked(1, ptr);
21709 +}
21710 +EXPORT_SYMBOL(atomic64_dec_return_unchecked);
21711 +
21712 /**
21713 * atomic64_add - add integer to atomic64 variable
21714 * @delta: integer value to add
21715 @@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t *ptr)
21716 EXPORT_SYMBOL(atomic64_add);
21717
21718 /**
21719 + * atomic64_add_unchecked - add integer to atomic64 variable
21720 + * @delta: integer value to add
21721 + * @ptr: pointer to type atomic64_unchecked_t
21722 + *
21723 + * Atomically adds @delta to @ptr.
21724 + */
21725 +void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21726 +{
21727 + atomic64_add_return_unchecked(delta, ptr);
21728 +}
21729 +EXPORT_SYMBOL(atomic64_add_unchecked);
21730 +
21731 +/**
21732 * atomic64_sub - subtract the atomic64 variable
21733 * @delta: integer value to subtract
21734 * @ptr: pointer to type atomic64_t
21735 @@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t *ptr)
21736 EXPORT_SYMBOL(atomic64_sub);
21737
21738 /**
21739 + * atomic64_sub_unchecked - subtract the atomic64 variable
21740 + * @delta: integer value to subtract
21741 + * @ptr: pointer to type atomic64_unchecked_t
21742 + *
21743 + * Atomically subtracts @delta from @ptr.
21744 + */
21745 +void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21746 +{
21747 + atomic64_add_unchecked(-delta, ptr);
21748 +}
21749 +EXPORT_SYMBOL(atomic64_sub_unchecked);
21750 +
21751 +/**
21752 * atomic64_sub_and_test - subtract value from variable and test result
21753 * @delta: integer value to subtract
21754 * @ptr: pointer to type atomic64_t
21755 @@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
21756 EXPORT_SYMBOL(atomic64_inc);
21757
21758 /**
21759 + * atomic64_inc_unchecked - increment atomic64 variable
21760 + * @ptr: pointer to type atomic64_unchecked_t
21761 + *
21762 + * Atomically increments @ptr by 1.
21763 + */
21764 +void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
21765 +{
21766 + atomic64_add_unchecked(1, ptr);
21767 +}
21768 +EXPORT_SYMBOL(atomic64_inc_unchecked);
21769 +
21770 +/**
21771 * atomic64_dec - decrement atomic64 variable
21772 * @ptr: pointer to type atomic64_t
21773 *
21774 @@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
21775 EXPORT_SYMBOL(atomic64_dec);
21776
21777 /**
21778 + * atomic64_dec_unchecked - decrement atomic64 variable
21779 + * @ptr: pointer to type atomic64_unchecked_t
21780 + *
21781 + * Atomically decrements @ptr by 1.
21782 + */
21783 +void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
21784 +{
21785 + atomic64_sub_unchecked(1, ptr);
21786 +}
21787 +EXPORT_SYMBOL(atomic64_dec_unchecked);
21788 +
21789 +/**
21790 * atomic64_dec_and_test - decrement and test
21791 * @ptr: pointer to type atomic64_t
21792 *
21793 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21794 index adbccd0..98f96c8 100644
21795 --- a/arch/x86/lib/checksum_32.S
21796 +++ b/arch/x86/lib/checksum_32.S
21797 @@ -28,7 +28,8 @@
21798 #include <linux/linkage.h>
21799 #include <asm/dwarf2.h>
21800 #include <asm/errno.h>
21801 -
21802 +#include <asm/segment.h>
21803 +
21804 /*
21805 * computes a partial checksum, e.g. for TCP/UDP fragments
21806 */
21807 @@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21808
21809 #define ARGBASE 16
21810 #define FP 12
21811 -
21812 -ENTRY(csum_partial_copy_generic)
21813 +
21814 +ENTRY(csum_partial_copy_generic_to_user)
21815 CFI_STARTPROC
21816 +
21817 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21818 + pushl %gs
21819 + CFI_ADJUST_CFA_OFFSET 4
21820 + popl %es
21821 + CFI_ADJUST_CFA_OFFSET -4
21822 + jmp csum_partial_copy_generic
21823 +#endif
21824 +
21825 +ENTRY(csum_partial_copy_generic_from_user)
21826 +
21827 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21828 + pushl %gs
21829 + CFI_ADJUST_CFA_OFFSET 4
21830 + popl %ds
21831 + CFI_ADJUST_CFA_OFFSET -4
21832 +#endif
21833 +
21834 +ENTRY(csum_partial_copy_generic)
21835 subl $4,%esp
21836 CFI_ADJUST_CFA_OFFSET 4
21837 pushl %edi
21838 @@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
21839 jmp 4f
21840 SRC(1: movw (%esi), %bx )
21841 addl $2, %esi
21842 -DST( movw %bx, (%edi) )
21843 +DST( movw %bx, %es:(%edi) )
21844 addl $2, %edi
21845 addw %bx, %ax
21846 adcl $0, %eax
21847 @@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
21848 SRC(1: movl (%esi), %ebx )
21849 SRC( movl 4(%esi), %edx )
21850 adcl %ebx, %eax
21851 -DST( movl %ebx, (%edi) )
21852 +DST( movl %ebx, %es:(%edi) )
21853 adcl %edx, %eax
21854 -DST( movl %edx, 4(%edi) )
21855 +DST( movl %edx, %es:4(%edi) )
21856
21857 SRC( movl 8(%esi), %ebx )
21858 SRC( movl 12(%esi), %edx )
21859 adcl %ebx, %eax
21860 -DST( movl %ebx, 8(%edi) )
21861 +DST( movl %ebx, %es:8(%edi) )
21862 adcl %edx, %eax
21863 -DST( movl %edx, 12(%edi) )
21864 +DST( movl %edx, %es:12(%edi) )
21865
21866 SRC( movl 16(%esi), %ebx )
21867 SRC( movl 20(%esi), %edx )
21868 adcl %ebx, %eax
21869 -DST( movl %ebx, 16(%edi) )
21870 +DST( movl %ebx, %es:16(%edi) )
21871 adcl %edx, %eax
21872 -DST( movl %edx, 20(%edi) )
21873 +DST( movl %edx, %es:20(%edi) )
21874
21875 SRC( movl 24(%esi), %ebx )
21876 SRC( movl 28(%esi), %edx )
21877 adcl %ebx, %eax
21878 -DST( movl %ebx, 24(%edi) )
21879 +DST( movl %ebx, %es:24(%edi) )
21880 adcl %edx, %eax
21881 -DST( movl %edx, 28(%edi) )
21882 +DST( movl %edx, %es:28(%edi) )
21883
21884 lea 32(%esi), %esi
21885 lea 32(%edi), %edi
21886 @@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
21887 shrl $2, %edx # This clears CF
21888 SRC(3: movl (%esi), %ebx )
21889 adcl %ebx, %eax
21890 -DST( movl %ebx, (%edi) )
21891 +DST( movl %ebx, %es:(%edi) )
21892 lea 4(%esi), %esi
21893 lea 4(%edi), %edi
21894 dec %edx
21895 @@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
21896 jb 5f
21897 SRC( movw (%esi), %cx )
21898 leal 2(%esi), %esi
21899 -DST( movw %cx, (%edi) )
21900 +DST( movw %cx, %es:(%edi) )
21901 leal 2(%edi), %edi
21902 je 6f
21903 shll $16,%ecx
21904 SRC(5: movb (%esi), %cl )
21905 -DST( movb %cl, (%edi) )
21906 +DST( movb %cl, %es:(%edi) )
21907 6: addl %ecx, %eax
21908 adcl $0, %eax
21909 7:
21910 @@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
21911
21912 6001:
21913 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21914 - movl $-EFAULT, (%ebx)
21915 + movl $-EFAULT, %ss:(%ebx)
21916
21917 # zero the complete destination - computing the rest
21918 # is too much work
21919 @@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
21920
21921 6002:
21922 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21923 - movl $-EFAULT,(%ebx)
21924 + movl $-EFAULT,%ss:(%ebx)
21925 jmp 5000b
21926
21927 .previous
21928
21929 + pushl %ss
21930 + CFI_ADJUST_CFA_OFFSET 4
21931 + popl %ds
21932 + CFI_ADJUST_CFA_OFFSET -4
21933 + pushl %ss
21934 + CFI_ADJUST_CFA_OFFSET 4
21935 + popl %es
21936 + CFI_ADJUST_CFA_OFFSET -4
21937 popl %ebx
21938 CFI_ADJUST_CFA_OFFSET -4
21939 CFI_RESTORE ebx
21940 @@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
21941 CFI_ADJUST_CFA_OFFSET -4
21942 ret
21943 CFI_ENDPROC
21944 -ENDPROC(csum_partial_copy_generic)
21945 +ENDPROC(csum_partial_copy_generic_to_user)
21946
21947 #else
21948
21949 /* Version for PentiumII/PPro */
21950
21951 #define ROUND1(x) \
21952 + nop; nop; nop; \
21953 SRC(movl x(%esi), %ebx ) ; \
21954 addl %ebx, %eax ; \
21955 - DST(movl %ebx, x(%edi) ) ;
21956 + DST(movl %ebx, %es:x(%edi)) ;
21957
21958 #define ROUND(x) \
21959 + nop; nop; nop; \
21960 SRC(movl x(%esi), %ebx ) ; \
21961 adcl %ebx, %eax ; \
21962 - DST(movl %ebx, x(%edi) ) ;
21963 + DST(movl %ebx, %es:x(%edi)) ;
21964
21965 #define ARGBASE 12
21966 -
21967 -ENTRY(csum_partial_copy_generic)
21968 +
21969 +ENTRY(csum_partial_copy_generic_to_user)
21970 CFI_STARTPROC
21971 +
21972 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21973 + pushl %gs
21974 + CFI_ADJUST_CFA_OFFSET 4
21975 + popl %es
21976 + CFI_ADJUST_CFA_OFFSET -4
21977 + jmp csum_partial_copy_generic
21978 +#endif
21979 +
21980 +ENTRY(csum_partial_copy_generic_from_user)
21981 +
21982 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21983 + pushl %gs
21984 + CFI_ADJUST_CFA_OFFSET 4
21985 + popl %ds
21986 + CFI_ADJUST_CFA_OFFSET -4
21987 +#endif
21988 +
21989 +ENTRY(csum_partial_copy_generic)
21990 pushl %ebx
21991 CFI_ADJUST_CFA_OFFSET 4
21992 CFI_REL_OFFSET ebx, 0
21993 @@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
21994 subl %ebx, %edi
21995 lea -1(%esi),%edx
21996 andl $-32,%edx
21997 - lea 3f(%ebx,%ebx), %ebx
21998 + lea 3f(%ebx,%ebx,2), %ebx
21999 testl %esi, %esi
22000 jmp *%ebx
22001 1: addl $64,%esi
22002 @@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
22003 jb 5f
22004 SRC( movw (%esi), %dx )
22005 leal 2(%esi), %esi
22006 -DST( movw %dx, (%edi) )
22007 +DST( movw %dx, %es:(%edi) )
22008 leal 2(%edi), %edi
22009 je 6f
22010 shll $16,%edx
22011 5:
22012 SRC( movb (%esi), %dl )
22013 -DST( movb %dl, (%edi) )
22014 +DST( movb %dl, %es:(%edi) )
22015 6: addl %edx, %eax
22016 adcl $0, %eax
22017 7:
22018 .section .fixup, "ax"
22019 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
22020 - movl $-EFAULT, (%ebx)
22021 + movl $-EFAULT, %ss:(%ebx)
22022 # zero the complete destination (computing the rest is too much work)
22023 movl ARGBASE+8(%esp),%edi # dst
22024 movl ARGBASE+12(%esp),%ecx # len
22025 @@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
22026 rep; stosb
22027 jmp 7b
22028 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
22029 - movl $-EFAULT, (%ebx)
22030 + movl $-EFAULT, %ss:(%ebx)
22031 jmp 7b
22032 .previous
22033
22034 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22035 + pushl %ss
22036 + CFI_ADJUST_CFA_OFFSET 4
22037 + popl %ds
22038 + CFI_ADJUST_CFA_OFFSET -4
22039 + pushl %ss
22040 + CFI_ADJUST_CFA_OFFSET 4
22041 + popl %es
22042 + CFI_ADJUST_CFA_OFFSET -4
22043 +#endif
22044 +
22045 popl %esi
22046 CFI_ADJUST_CFA_OFFSET -4
22047 CFI_RESTORE esi
22048 @@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
22049 CFI_RESTORE ebx
22050 ret
22051 CFI_ENDPROC
22052 -ENDPROC(csum_partial_copy_generic)
22053 +ENDPROC(csum_partial_copy_generic_to_user)
22054
22055 #undef ROUND
22056 #undef ROUND1
22057 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
22058 index ebeafcc..1e3a402 100644
22059 --- a/arch/x86/lib/clear_page_64.S
22060 +++ b/arch/x86/lib/clear_page_64.S
22061 @@ -1,5 +1,6 @@
22062 #include <linux/linkage.h>
22063 #include <asm/dwarf2.h>
22064 +#include <asm/alternative-asm.h>
22065
22066 /*
22067 * Zero a page.
22068 @@ -10,6 +11,7 @@ ENTRY(clear_page_c)
22069 movl $4096/8,%ecx
22070 xorl %eax,%eax
22071 rep stosq
22072 + pax_force_retaddr
22073 ret
22074 CFI_ENDPROC
22075 ENDPROC(clear_page_c)
22076 @@ -33,6 +35,7 @@ ENTRY(clear_page)
22077 leaq 64(%rdi),%rdi
22078 jnz .Lloop
22079 nop
22080 + pax_force_retaddr
22081 ret
22082 CFI_ENDPROC
22083 .Lclear_page_end:
22084 @@ -43,7 +46,7 @@ ENDPROC(clear_page)
22085
22086 #include <asm/cpufeature.h>
22087
22088 - .section .altinstr_replacement,"ax"
22089 + .section .altinstr_replacement,"a"
22090 1: .byte 0xeb /* jmp <disp8> */
22091 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
22092 2:
22093 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
22094 index 727a5d4..333818a 100644
22095 --- a/arch/x86/lib/copy_page_64.S
22096 +++ b/arch/x86/lib/copy_page_64.S
22097 @@ -2,12 +2,14 @@
22098
22099 #include <linux/linkage.h>
22100 #include <asm/dwarf2.h>
22101 +#include <asm/alternative-asm.h>
22102
22103 ALIGN
22104 copy_page_c:
22105 CFI_STARTPROC
22106 movl $4096/8,%ecx
22107 rep movsq
22108 + pax_force_retaddr
22109 ret
22110 CFI_ENDPROC
22111 ENDPROC(copy_page_c)
22112 @@ -38,7 +40,7 @@ ENTRY(copy_page)
22113 movq 16 (%rsi), %rdx
22114 movq 24 (%rsi), %r8
22115 movq 32 (%rsi), %r9
22116 - movq 40 (%rsi), %r10
22117 + movq 40 (%rsi), %r13
22118 movq 48 (%rsi), %r11
22119 movq 56 (%rsi), %r12
22120
22121 @@ -49,7 +51,7 @@ ENTRY(copy_page)
22122 movq %rdx, 16 (%rdi)
22123 movq %r8, 24 (%rdi)
22124 movq %r9, 32 (%rdi)
22125 - movq %r10, 40 (%rdi)
22126 + movq %r13, 40 (%rdi)
22127 movq %r11, 48 (%rdi)
22128 movq %r12, 56 (%rdi)
22129
22130 @@ -68,7 +70,7 @@ ENTRY(copy_page)
22131 movq 16 (%rsi), %rdx
22132 movq 24 (%rsi), %r8
22133 movq 32 (%rsi), %r9
22134 - movq 40 (%rsi), %r10
22135 + movq 40 (%rsi), %r13
22136 movq 48 (%rsi), %r11
22137 movq 56 (%rsi), %r12
22138
22139 @@ -77,7 +79,7 @@ ENTRY(copy_page)
22140 movq %rdx, 16 (%rdi)
22141 movq %r8, 24 (%rdi)
22142 movq %r9, 32 (%rdi)
22143 - movq %r10, 40 (%rdi)
22144 + movq %r13, 40 (%rdi)
22145 movq %r11, 48 (%rdi)
22146 movq %r12, 56 (%rdi)
22147
22148 @@ -94,6 +96,7 @@ ENTRY(copy_page)
22149 CFI_RESTORE r13
22150 addq $3*8,%rsp
22151 CFI_ADJUST_CFA_OFFSET -3*8
22152 + pax_force_retaddr
22153 ret
22154 .Lcopy_page_end:
22155 CFI_ENDPROC
22156 @@ -104,7 +107,7 @@ ENDPROC(copy_page)
22157
22158 #include <asm/cpufeature.h>
22159
22160 - .section .altinstr_replacement,"ax"
22161 + .section .altinstr_replacement,"a"
22162 1: .byte 0xeb /* jmp <disp8> */
22163 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
22164 2:
22165 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
22166 index af8debd..40c75f3 100644
22167 --- a/arch/x86/lib/copy_user_64.S
22168 +++ b/arch/x86/lib/copy_user_64.S
22169 @@ -15,13 +15,15 @@
22170 #include <asm/asm-offsets.h>
22171 #include <asm/thread_info.h>
22172 #include <asm/cpufeature.h>
22173 +#include <asm/pgtable.h>
22174 +#include <asm/alternative-asm.h>
22175
22176 .macro ALTERNATIVE_JUMP feature,orig,alt
22177 0:
22178 .byte 0xe9 /* 32bit jump */
22179 .long \orig-1f /* by default jump to orig */
22180 1:
22181 - .section .altinstr_replacement,"ax"
22182 + .section .altinstr_replacement,"a"
22183 2: .byte 0xe9 /* near jump with 32bit immediate */
22184 .long \alt-1b /* offset */ /* or alternatively to alt */
22185 .previous
22186 @@ -64,55 +66,26 @@
22187 #endif
22188 .endm
22189
22190 -/* Standard copy_to_user with segment limit checking */
22191 -ENTRY(copy_to_user)
22192 - CFI_STARTPROC
22193 - GET_THREAD_INFO(%rax)
22194 - movq %rdi,%rcx
22195 - addq %rdx,%rcx
22196 - jc bad_to_user
22197 - cmpq TI_addr_limit(%rax),%rcx
22198 - ja bad_to_user
22199 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22200 - CFI_ENDPROC
22201 -ENDPROC(copy_to_user)
22202 -
22203 -/* Standard copy_from_user with segment limit checking */
22204 -ENTRY(copy_from_user)
22205 - CFI_STARTPROC
22206 - GET_THREAD_INFO(%rax)
22207 - movq %rsi,%rcx
22208 - addq %rdx,%rcx
22209 - jc bad_from_user
22210 - cmpq TI_addr_limit(%rax),%rcx
22211 - ja bad_from_user
22212 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22213 - CFI_ENDPROC
22214 -ENDPROC(copy_from_user)
22215 -
22216 ENTRY(copy_user_generic)
22217 CFI_STARTPROC
22218 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22219 CFI_ENDPROC
22220 ENDPROC(copy_user_generic)
22221
22222 -ENTRY(__copy_from_user_inatomic)
22223 - CFI_STARTPROC
22224 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22225 - CFI_ENDPROC
22226 -ENDPROC(__copy_from_user_inatomic)
22227 -
22228 .section .fixup,"ax"
22229 /* must zero dest */
22230 ENTRY(bad_from_user)
22231 bad_from_user:
22232 CFI_STARTPROC
22233 + testl %edx,%edx
22234 + js bad_to_user
22235 movl %edx,%ecx
22236 xorl %eax,%eax
22237 rep
22238 stosb
22239 bad_to_user:
22240 movl %edx,%eax
22241 + pax_force_retaddr
22242 ret
22243 CFI_ENDPROC
22244 ENDPROC(bad_from_user)
22245 @@ -142,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
22246 jz 17f
22247 1: movq (%rsi),%r8
22248 2: movq 1*8(%rsi),%r9
22249 -3: movq 2*8(%rsi),%r10
22250 +3: movq 2*8(%rsi),%rax
22251 4: movq 3*8(%rsi),%r11
22252 5: movq %r8,(%rdi)
22253 6: movq %r9,1*8(%rdi)
22254 -7: movq %r10,2*8(%rdi)
22255 +7: movq %rax,2*8(%rdi)
22256 8: movq %r11,3*8(%rdi)
22257 9: movq 4*8(%rsi),%r8
22258 10: movq 5*8(%rsi),%r9
22259 -11: movq 6*8(%rsi),%r10
22260 +11: movq 6*8(%rsi),%rax
22261 12: movq 7*8(%rsi),%r11
22262 13: movq %r8,4*8(%rdi)
22263 14: movq %r9,5*8(%rdi)
22264 -15: movq %r10,6*8(%rdi)
22265 +15: movq %rax,6*8(%rdi)
22266 16: movq %r11,7*8(%rdi)
22267 leaq 64(%rsi),%rsi
22268 leaq 64(%rdi),%rdi
22269 @@ -180,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
22270 decl %ecx
22271 jnz 21b
22272 23: xor %eax,%eax
22273 + pax_force_retaddr
22274 ret
22275
22276 .section .fixup,"ax"
22277 @@ -252,6 +226,7 @@ ENTRY(copy_user_generic_string)
22278 3: rep
22279 movsb
22280 4: xorl %eax,%eax
22281 + pax_force_retaddr
22282 ret
22283
22284 .section .fixup,"ax"
22285 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
22286 index cb0c112..e3a6895 100644
22287 --- a/arch/x86/lib/copy_user_nocache_64.S
22288 +++ b/arch/x86/lib/copy_user_nocache_64.S
22289 @@ -8,12 +8,14 @@
22290
22291 #include <linux/linkage.h>
22292 #include <asm/dwarf2.h>
22293 +#include <asm/alternative-asm.h>
22294
22295 #define FIX_ALIGNMENT 1
22296
22297 #include <asm/current.h>
22298 #include <asm/asm-offsets.h>
22299 #include <asm/thread_info.h>
22300 +#include <asm/pgtable.h>
22301
22302 .macro ALIGN_DESTINATION
22303 #ifdef FIX_ALIGNMENT
22304 @@ -50,6 +52,15 @@
22305 */
22306 ENTRY(__copy_user_nocache)
22307 CFI_STARTPROC
22308 +
22309 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22310 + mov $PAX_USER_SHADOW_BASE,%rcx
22311 + cmp %rcx,%rsi
22312 + jae 1f
22313 + add %rcx,%rsi
22314 +1:
22315 +#endif
22316 +
22317 cmpl $8,%edx
22318 jb 20f /* less then 8 bytes, go to byte copy loop */
22319 ALIGN_DESTINATION
22320 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
22321 jz 17f
22322 1: movq (%rsi),%r8
22323 2: movq 1*8(%rsi),%r9
22324 -3: movq 2*8(%rsi),%r10
22325 +3: movq 2*8(%rsi),%rax
22326 4: movq 3*8(%rsi),%r11
22327 5: movnti %r8,(%rdi)
22328 6: movnti %r9,1*8(%rdi)
22329 -7: movnti %r10,2*8(%rdi)
22330 +7: movnti %rax,2*8(%rdi)
22331 8: movnti %r11,3*8(%rdi)
22332 9: movq 4*8(%rsi),%r8
22333 10: movq 5*8(%rsi),%r9
22334 -11: movq 6*8(%rsi),%r10
22335 +11: movq 6*8(%rsi),%rax
22336 12: movq 7*8(%rsi),%r11
22337 13: movnti %r8,4*8(%rdi)
22338 14: movnti %r9,5*8(%rdi)
22339 -15: movnti %r10,6*8(%rdi)
22340 +15: movnti %rax,6*8(%rdi)
22341 16: movnti %r11,7*8(%rdi)
22342 leaq 64(%rsi),%rsi
22343 leaq 64(%rdi),%rdi
22344 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
22345 jnz 21b
22346 23: xorl %eax,%eax
22347 sfence
22348 + pax_force_retaddr
22349 ret
22350
22351 .section .fixup,"ax"
22352 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
22353 index f0dba36..48cb4d6 100644
22354 --- a/arch/x86/lib/csum-copy_64.S
22355 +++ b/arch/x86/lib/csum-copy_64.S
22356 @@ -8,6 +8,7 @@
22357 #include <linux/linkage.h>
22358 #include <asm/dwarf2.h>
22359 #include <asm/errno.h>
22360 +#include <asm/alternative-asm.h>
22361
22362 /*
22363 * Checksum copy with exception handling.
22364 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
22365 CFI_RESTORE rbp
22366 addq $7*8,%rsp
22367 CFI_ADJUST_CFA_OFFSET -7*8
22368 + pax_force_retaddr 0, 1
22369 ret
22370 CFI_RESTORE_STATE
22371
22372 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
22373 index 459b58a..9570bc7 100644
22374 --- a/arch/x86/lib/csum-wrappers_64.c
22375 +++ b/arch/x86/lib/csum-wrappers_64.c
22376 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
22377 len -= 2;
22378 }
22379 }
22380 - isum = csum_partial_copy_generic((__force const void *)src,
22381 +
22382 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22383 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
22384 + src += PAX_USER_SHADOW_BASE;
22385 +#endif
22386 +
22387 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
22388 dst, len, isum, errp, NULL);
22389 if (unlikely(*errp))
22390 goto out_err;
22391 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
22392 }
22393
22394 *errp = 0;
22395 - return csum_partial_copy_generic(src, (void __force *)dst,
22396 +
22397 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22398 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
22399 + dst += PAX_USER_SHADOW_BASE;
22400 +#endif
22401 +
22402 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
22403 len, isum, NULL, errp);
22404 }
22405 EXPORT_SYMBOL(csum_partial_copy_to_user);
22406 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
22407 index 51f1504..ddac4c1 100644
22408 --- a/arch/x86/lib/getuser.S
22409 +++ b/arch/x86/lib/getuser.S
22410 @@ -33,15 +33,38 @@
22411 #include <asm/asm-offsets.h>
22412 #include <asm/thread_info.h>
22413 #include <asm/asm.h>
22414 +#include <asm/segment.h>
22415 +#include <asm/pgtable.h>
22416 +#include <asm/alternative-asm.h>
22417 +
22418 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22419 +#define __copyuser_seg gs;
22420 +#else
22421 +#define __copyuser_seg
22422 +#endif
22423
22424 .text
22425 ENTRY(__get_user_1)
22426 CFI_STARTPROC
22427 +
22428 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22429 GET_THREAD_INFO(%_ASM_DX)
22430 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22431 jae bad_get_user
22432 -1: movzb (%_ASM_AX),%edx
22433 +
22434 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22435 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22436 + cmp %_ASM_DX,%_ASM_AX
22437 + jae 1234f
22438 + add %_ASM_DX,%_ASM_AX
22439 +1234:
22440 +#endif
22441 +
22442 +#endif
22443 +
22444 +1: __copyuser_seg movzb (%_ASM_AX),%edx
22445 xor %eax,%eax
22446 + pax_force_retaddr
22447 ret
22448 CFI_ENDPROC
22449 ENDPROC(__get_user_1)
22450 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
22451 ENTRY(__get_user_2)
22452 CFI_STARTPROC
22453 add $1,%_ASM_AX
22454 +
22455 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22456 jc bad_get_user
22457 GET_THREAD_INFO(%_ASM_DX)
22458 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22459 jae bad_get_user
22460 -2: movzwl -1(%_ASM_AX),%edx
22461 +
22462 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22463 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22464 + cmp %_ASM_DX,%_ASM_AX
22465 + jae 1234f
22466 + add %_ASM_DX,%_ASM_AX
22467 +1234:
22468 +#endif
22469 +
22470 +#endif
22471 +
22472 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
22473 xor %eax,%eax
22474 + pax_force_retaddr
22475 ret
22476 CFI_ENDPROC
22477 ENDPROC(__get_user_2)
22478 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
22479 ENTRY(__get_user_4)
22480 CFI_STARTPROC
22481 add $3,%_ASM_AX
22482 +
22483 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22484 jc bad_get_user
22485 GET_THREAD_INFO(%_ASM_DX)
22486 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22487 jae bad_get_user
22488 -3: mov -3(%_ASM_AX),%edx
22489 +
22490 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22491 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22492 + cmp %_ASM_DX,%_ASM_AX
22493 + jae 1234f
22494 + add %_ASM_DX,%_ASM_AX
22495 +1234:
22496 +#endif
22497 +
22498 +#endif
22499 +
22500 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
22501 xor %eax,%eax
22502 + pax_force_retaddr
22503 ret
22504 CFI_ENDPROC
22505 ENDPROC(__get_user_4)
22506 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
22507 GET_THREAD_INFO(%_ASM_DX)
22508 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22509 jae bad_get_user
22510 +
22511 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22512 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22513 + cmp %_ASM_DX,%_ASM_AX
22514 + jae 1234f
22515 + add %_ASM_DX,%_ASM_AX
22516 +1234:
22517 +#endif
22518 +
22519 4: movq -7(%_ASM_AX),%_ASM_DX
22520 xor %eax,%eax
22521 + pax_force_retaddr
22522 ret
22523 CFI_ENDPROC
22524 ENDPROC(__get_user_8)
22525 @@ -91,6 +152,7 @@ bad_get_user:
22526 CFI_STARTPROC
22527 xor %edx,%edx
22528 mov $(-EFAULT),%_ASM_AX
22529 + pax_force_retaddr
22530 ret
22531 CFI_ENDPROC
22532 END(bad_get_user)
22533 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
22534 index 05a95e7..326f2fa 100644
22535 --- a/arch/x86/lib/iomap_copy_64.S
22536 +++ b/arch/x86/lib/iomap_copy_64.S
22537 @@ -17,6 +17,7 @@
22538
22539 #include <linux/linkage.h>
22540 #include <asm/dwarf2.h>
22541 +#include <asm/alternative-asm.h>
22542
22543 /*
22544 * override generic version in lib/iomap_copy.c
22545 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
22546 CFI_STARTPROC
22547 movl %edx,%ecx
22548 rep movsd
22549 + pax_force_retaddr
22550 ret
22551 CFI_ENDPROC
22552 ENDPROC(__iowrite32_copy)
22553 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
22554 index ad5441e..610e351 100644
22555 --- a/arch/x86/lib/memcpy_64.S
22556 +++ b/arch/x86/lib/memcpy_64.S
22557 @@ -4,6 +4,7 @@
22558
22559 #include <asm/cpufeature.h>
22560 #include <asm/dwarf2.h>
22561 +#include <asm/alternative-asm.h>
22562
22563 /*
22564 * memcpy - Copy a memory block.
22565 @@ -34,6 +35,7 @@ memcpy_c:
22566 rep movsq
22567 movl %edx, %ecx
22568 rep movsb
22569 + pax_force_retaddr
22570 ret
22571 CFI_ENDPROC
22572 ENDPROC(memcpy_c)
22573 @@ -118,6 +120,7 @@ ENTRY(memcpy)
22574 jnz .Lloop_1
22575
22576 .Lend:
22577 + pax_force_retaddr 0, 1
22578 ret
22579 CFI_ENDPROC
22580 ENDPROC(memcpy)
22581 @@ -128,7 +131,7 @@ ENDPROC(__memcpy)
22582 * It is also a lot simpler. Use this when possible:
22583 */
22584
22585 - .section .altinstr_replacement, "ax"
22586 + .section .altinstr_replacement, "a"
22587 1: .byte 0xeb /* jmp <disp8> */
22588 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
22589 2:
22590 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22591 index 2c59481..7e9ba4e 100644
22592 --- a/arch/x86/lib/memset_64.S
22593 +++ b/arch/x86/lib/memset_64.S
22594 @@ -2,6 +2,7 @@
22595
22596 #include <linux/linkage.h>
22597 #include <asm/dwarf2.h>
22598 +#include <asm/alternative-asm.h>
22599
22600 /*
22601 * ISO C memset - set a memory block to a byte value.
22602 @@ -28,6 +29,7 @@ memset_c:
22603 movl %r8d,%ecx
22604 rep stosb
22605 movq %r9,%rax
22606 + pax_force_retaddr
22607 ret
22608 CFI_ENDPROC
22609 ENDPROC(memset_c)
22610 @@ -35,13 +37,13 @@ ENDPROC(memset_c)
22611 ENTRY(memset)
22612 ENTRY(__memset)
22613 CFI_STARTPROC
22614 - movq %rdi,%r10
22615 movq %rdx,%r11
22616
22617 /* expand byte value */
22618 movzbl %sil,%ecx
22619 movabs $0x0101010101010101,%rax
22620 mul %rcx /* with rax, clobbers rdx */
22621 + movq %rdi,%rdx
22622
22623 /* align dst */
22624 movl %edi,%r9d
22625 @@ -95,7 +97,8 @@ ENTRY(__memset)
22626 jnz .Lloop_1
22627
22628 .Lende:
22629 - movq %r10,%rax
22630 + movq %rdx,%rax
22631 + pax_force_retaddr
22632 ret
22633
22634 CFI_RESTORE_STATE
22635 @@ -118,7 +121,7 @@ ENDPROC(__memset)
22636
22637 #include <asm/cpufeature.h>
22638
22639 - .section .altinstr_replacement,"ax"
22640 + .section .altinstr_replacement,"a"
22641 1: .byte 0xeb /* jmp <disp8> */
22642 .byte (memset_c - memset) - (2f - 1b) /* offset */
22643 2:
22644 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22645 index c9f2d9b..e7fd2c0 100644
22646 --- a/arch/x86/lib/mmx_32.c
22647 +++ b/arch/x86/lib/mmx_32.c
22648 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22649 {
22650 void *p;
22651 int i;
22652 + unsigned long cr0;
22653
22654 if (unlikely(in_interrupt()))
22655 return __memcpy(to, from, len);
22656 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22657 kernel_fpu_begin();
22658
22659 __asm__ __volatile__ (
22660 - "1: prefetch (%0)\n" /* This set is 28 bytes */
22661 - " prefetch 64(%0)\n"
22662 - " prefetch 128(%0)\n"
22663 - " prefetch 192(%0)\n"
22664 - " prefetch 256(%0)\n"
22665 + "1: prefetch (%1)\n" /* This set is 28 bytes */
22666 + " prefetch 64(%1)\n"
22667 + " prefetch 128(%1)\n"
22668 + " prefetch 192(%1)\n"
22669 + " prefetch 256(%1)\n"
22670 "2: \n"
22671 ".section .fixup, \"ax\"\n"
22672 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22673 + "3: \n"
22674 +
22675 +#ifdef CONFIG_PAX_KERNEXEC
22676 + " movl %%cr0, %0\n"
22677 + " movl %0, %%eax\n"
22678 + " andl $0xFFFEFFFF, %%eax\n"
22679 + " movl %%eax, %%cr0\n"
22680 +#endif
22681 +
22682 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22683 +
22684 +#ifdef CONFIG_PAX_KERNEXEC
22685 + " movl %0, %%cr0\n"
22686 +#endif
22687 +
22688 " jmp 2b\n"
22689 ".previous\n"
22690 _ASM_EXTABLE(1b, 3b)
22691 - : : "r" (from));
22692 + : "=&r" (cr0) : "r" (from) : "ax");
22693
22694 for ( ; i > 5; i--) {
22695 __asm__ __volatile__ (
22696 - "1: prefetch 320(%0)\n"
22697 - "2: movq (%0), %%mm0\n"
22698 - " movq 8(%0), %%mm1\n"
22699 - " movq 16(%0), %%mm2\n"
22700 - " movq 24(%0), %%mm3\n"
22701 - " movq %%mm0, (%1)\n"
22702 - " movq %%mm1, 8(%1)\n"
22703 - " movq %%mm2, 16(%1)\n"
22704 - " movq %%mm3, 24(%1)\n"
22705 - " movq 32(%0), %%mm0\n"
22706 - " movq 40(%0), %%mm1\n"
22707 - " movq 48(%0), %%mm2\n"
22708 - " movq 56(%0), %%mm3\n"
22709 - " movq %%mm0, 32(%1)\n"
22710 - " movq %%mm1, 40(%1)\n"
22711 - " movq %%mm2, 48(%1)\n"
22712 - " movq %%mm3, 56(%1)\n"
22713 + "1: prefetch 320(%1)\n"
22714 + "2: movq (%1), %%mm0\n"
22715 + " movq 8(%1), %%mm1\n"
22716 + " movq 16(%1), %%mm2\n"
22717 + " movq 24(%1), %%mm3\n"
22718 + " movq %%mm0, (%2)\n"
22719 + " movq %%mm1, 8(%2)\n"
22720 + " movq %%mm2, 16(%2)\n"
22721 + " movq %%mm3, 24(%2)\n"
22722 + " movq 32(%1), %%mm0\n"
22723 + " movq 40(%1), %%mm1\n"
22724 + " movq 48(%1), %%mm2\n"
22725 + " movq 56(%1), %%mm3\n"
22726 + " movq %%mm0, 32(%2)\n"
22727 + " movq %%mm1, 40(%2)\n"
22728 + " movq %%mm2, 48(%2)\n"
22729 + " movq %%mm3, 56(%2)\n"
22730 ".section .fixup, \"ax\"\n"
22731 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22732 + "3:\n"
22733 +
22734 +#ifdef CONFIG_PAX_KERNEXEC
22735 + " movl %%cr0, %0\n"
22736 + " movl %0, %%eax\n"
22737 + " andl $0xFFFEFFFF, %%eax\n"
22738 + " movl %%eax, %%cr0\n"
22739 +#endif
22740 +
22741 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22742 +
22743 +#ifdef CONFIG_PAX_KERNEXEC
22744 + " movl %0, %%cr0\n"
22745 +#endif
22746 +
22747 " jmp 2b\n"
22748 ".previous\n"
22749 _ASM_EXTABLE(1b, 3b)
22750 - : : "r" (from), "r" (to) : "memory");
22751 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22752
22753 from += 64;
22754 to += 64;
22755 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22756 static void fast_copy_page(void *to, void *from)
22757 {
22758 int i;
22759 + unsigned long cr0;
22760
22761 kernel_fpu_begin();
22762
22763 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22764 * but that is for later. -AV
22765 */
22766 __asm__ __volatile__(
22767 - "1: prefetch (%0)\n"
22768 - " prefetch 64(%0)\n"
22769 - " prefetch 128(%0)\n"
22770 - " prefetch 192(%0)\n"
22771 - " prefetch 256(%0)\n"
22772 + "1: prefetch (%1)\n"
22773 + " prefetch 64(%1)\n"
22774 + " prefetch 128(%1)\n"
22775 + " prefetch 192(%1)\n"
22776 + " prefetch 256(%1)\n"
22777 "2: \n"
22778 ".section .fixup, \"ax\"\n"
22779 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22780 + "3: \n"
22781 +
22782 +#ifdef CONFIG_PAX_KERNEXEC
22783 + " movl %%cr0, %0\n"
22784 + " movl %0, %%eax\n"
22785 + " andl $0xFFFEFFFF, %%eax\n"
22786 + " movl %%eax, %%cr0\n"
22787 +#endif
22788 +
22789 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22790 +
22791 +#ifdef CONFIG_PAX_KERNEXEC
22792 + " movl %0, %%cr0\n"
22793 +#endif
22794 +
22795 " jmp 2b\n"
22796 ".previous\n"
22797 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22798 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22799
22800 for (i = 0; i < (4096-320)/64; i++) {
22801 __asm__ __volatile__ (
22802 - "1: prefetch 320(%0)\n"
22803 - "2: movq (%0), %%mm0\n"
22804 - " movntq %%mm0, (%1)\n"
22805 - " movq 8(%0), %%mm1\n"
22806 - " movntq %%mm1, 8(%1)\n"
22807 - " movq 16(%0), %%mm2\n"
22808 - " movntq %%mm2, 16(%1)\n"
22809 - " movq 24(%0), %%mm3\n"
22810 - " movntq %%mm3, 24(%1)\n"
22811 - " movq 32(%0), %%mm4\n"
22812 - " movntq %%mm4, 32(%1)\n"
22813 - " movq 40(%0), %%mm5\n"
22814 - " movntq %%mm5, 40(%1)\n"
22815 - " movq 48(%0), %%mm6\n"
22816 - " movntq %%mm6, 48(%1)\n"
22817 - " movq 56(%0), %%mm7\n"
22818 - " movntq %%mm7, 56(%1)\n"
22819 + "1: prefetch 320(%1)\n"
22820 + "2: movq (%1), %%mm0\n"
22821 + " movntq %%mm0, (%2)\n"
22822 + " movq 8(%1), %%mm1\n"
22823 + " movntq %%mm1, 8(%2)\n"
22824 + " movq 16(%1), %%mm2\n"
22825 + " movntq %%mm2, 16(%2)\n"
22826 + " movq 24(%1), %%mm3\n"
22827 + " movntq %%mm3, 24(%2)\n"
22828 + " movq 32(%1), %%mm4\n"
22829 + " movntq %%mm4, 32(%2)\n"
22830 + " movq 40(%1), %%mm5\n"
22831 + " movntq %%mm5, 40(%2)\n"
22832 + " movq 48(%1), %%mm6\n"
22833 + " movntq %%mm6, 48(%2)\n"
22834 + " movq 56(%1), %%mm7\n"
22835 + " movntq %%mm7, 56(%2)\n"
22836 ".section .fixup, \"ax\"\n"
22837 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22838 + "3:\n"
22839 +
22840 +#ifdef CONFIG_PAX_KERNEXEC
22841 + " movl %%cr0, %0\n"
22842 + " movl %0, %%eax\n"
22843 + " andl $0xFFFEFFFF, %%eax\n"
22844 + " movl %%eax, %%cr0\n"
22845 +#endif
22846 +
22847 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22848 +
22849 +#ifdef CONFIG_PAX_KERNEXEC
22850 + " movl %0, %%cr0\n"
22851 +#endif
22852 +
22853 " jmp 2b\n"
22854 ".previous\n"
22855 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22856 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22857
22858 from += 64;
22859 to += 64;
22860 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22861 static void fast_copy_page(void *to, void *from)
22862 {
22863 int i;
22864 + unsigned long cr0;
22865
22866 kernel_fpu_begin();
22867
22868 __asm__ __volatile__ (
22869 - "1: prefetch (%0)\n"
22870 - " prefetch 64(%0)\n"
22871 - " prefetch 128(%0)\n"
22872 - " prefetch 192(%0)\n"
22873 - " prefetch 256(%0)\n"
22874 + "1: prefetch (%1)\n"
22875 + " prefetch 64(%1)\n"
22876 + " prefetch 128(%1)\n"
22877 + " prefetch 192(%1)\n"
22878 + " prefetch 256(%1)\n"
22879 "2: \n"
22880 ".section .fixup, \"ax\"\n"
22881 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22882 + "3: \n"
22883 +
22884 +#ifdef CONFIG_PAX_KERNEXEC
22885 + " movl %%cr0, %0\n"
22886 + " movl %0, %%eax\n"
22887 + " andl $0xFFFEFFFF, %%eax\n"
22888 + " movl %%eax, %%cr0\n"
22889 +#endif
22890 +
22891 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22892 +
22893 +#ifdef CONFIG_PAX_KERNEXEC
22894 + " movl %0, %%cr0\n"
22895 +#endif
22896 +
22897 " jmp 2b\n"
22898 ".previous\n"
22899 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22900 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22901
22902 for (i = 0; i < 4096/64; i++) {
22903 __asm__ __volatile__ (
22904 - "1: prefetch 320(%0)\n"
22905 - "2: movq (%0), %%mm0\n"
22906 - " movq 8(%0), %%mm1\n"
22907 - " movq 16(%0), %%mm2\n"
22908 - " movq 24(%0), %%mm3\n"
22909 - " movq %%mm0, (%1)\n"
22910 - " movq %%mm1, 8(%1)\n"
22911 - " movq %%mm2, 16(%1)\n"
22912 - " movq %%mm3, 24(%1)\n"
22913 - " movq 32(%0), %%mm0\n"
22914 - " movq 40(%0), %%mm1\n"
22915 - " movq 48(%0), %%mm2\n"
22916 - " movq 56(%0), %%mm3\n"
22917 - " movq %%mm0, 32(%1)\n"
22918 - " movq %%mm1, 40(%1)\n"
22919 - " movq %%mm2, 48(%1)\n"
22920 - " movq %%mm3, 56(%1)\n"
22921 + "1: prefetch 320(%1)\n"
22922 + "2: movq (%1), %%mm0\n"
22923 + " movq 8(%1), %%mm1\n"
22924 + " movq 16(%1), %%mm2\n"
22925 + " movq 24(%1), %%mm3\n"
22926 + " movq %%mm0, (%2)\n"
22927 + " movq %%mm1, 8(%2)\n"
22928 + " movq %%mm2, 16(%2)\n"
22929 + " movq %%mm3, 24(%2)\n"
22930 + " movq 32(%1), %%mm0\n"
22931 + " movq 40(%1), %%mm1\n"
22932 + " movq 48(%1), %%mm2\n"
22933 + " movq 56(%1), %%mm3\n"
22934 + " movq %%mm0, 32(%2)\n"
22935 + " movq %%mm1, 40(%2)\n"
22936 + " movq %%mm2, 48(%2)\n"
22937 + " movq %%mm3, 56(%2)\n"
22938 ".section .fixup, \"ax\"\n"
22939 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22940 + "3:\n"
22941 +
22942 +#ifdef CONFIG_PAX_KERNEXEC
22943 + " movl %%cr0, %0\n"
22944 + " movl %0, %%eax\n"
22945 + " andl $0xFFFEFFFF, %%eax\n"
22946 + " movl %%eax, %%cr0\n"
22947 +#endif
22948 +
22949 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22950 +
22951 +#ifdef CONFIG_PAX_KERNEXEC
22952 + " movl %0, %%cr0\n"
22953 +#endif
22954 +
22955 " jmp 2b\n"
22956 ".previous\n"
22957 _ASM_EXTABLE(1b, 3b)
22958 - : : "r" (from), "r" (to) : "memory");
22959 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22960
22961 from += 64;
22962 to += 64;
22963 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22964 index 69fa106..adda88b 100644
22965 --- a/arch/x86/lib/msr-reg.S
22966 +++ b/arch/x86/lib/msr-reg.S
22967 @@ -3,6 +3,7 @@
22968 #include <asm/dwarf2.h>
22969 #include <asm/asm.h>
22970 #include <asm/msr.h>
22971 +#include <asm/alternative-asm.h>
22972
22973 #ifdef CONFIG_X86_64
22974 /*
22975 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22976 CFI_STARTPROC
22977 pushq_cfi %rbx
22978 pushq_cfi %rbp
22979 - movq %rdi, %r10 /* Save pointer */
22980 + movq %rdi, %r9 /* Save pointer */
22981 xorl %r11d, %r11d /* Return value */
22982 movl (%rdi), %eax
22983 movl 4(%rdi), %ecx
22984 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22985 movl 28(%rdi), %edi
22986 CFI_REMEMBER_STATE
22987 1: \op
22988 -2: movl %eax, (%r10)
22989 +2: movl %eax, (%r9)
22990 movl %r11d, %eax /* Return value */
22991 - movl %ecx, 4(%r10)
22992 - movl %edx, 8(%r10)
22993 - movl %ebx, 12(%r10)
22994 - movl %ebp, 20(%r10)
22995 - movl %esi, 24(%r10)
22996 - movl %edi, 28(%r10)
22997 + movl %ecx, 4(%r9)
22998 + movl %edx, 8(%r9)
22999 + movl %ebx, 12(%r9)
23000 + movl %ebp, 20(%r9)
23001 + movl %esi, 24(%r9)
23002 + movl %edi, 28(%r9)
23003 popq_cfi %rbp
23004 popq_cfi %rbx
23005 + pax_force_retaddr
23006 ret
23007 3:
23008 CFI_RESTORE_STATE
23009 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
23010 index 36b0d15..d381858 100644
23011 --- a/arch/x86/lib/putuser.S
23012 +++ b/arch/x86/lib/putuser.S
23013 @@ -15,7 +15,9 @@
23014 #include <asm/thread_info.h>
23015 #include <asm/errno.h>
23016 #include <asm/asm.h>
23017 -
23018 +#include <asm/segment.h>
23019 +#include <asm/pgtable.h>
23020 +#include <asm/alternative-asm.h>
23021
23022 /*
23023 * __put_user_X
23024 @@ -29,52 +31,119 @@
23025 * as they get called from within inline assembly.
23026 */
23027
23028 -#define ENTER CFI_STARTPROC ; \
23029 - GET_THREAD_INFO(%_ASM_BX)
23030 -#define EXIT ret ; \
23031 +#define ENTER CFI_STARTPROC
23032 +#define EXIT pax_force_retaddr; ret ; \
23033 CFI_ENDPROC
23034
23035 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23036 +#define _DEST %_ASM_CX,%_ASM_BX
23037 +#else
23038 +#define _DEST %_ASM_CX
23039 +#endif
23040 +
23041 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
23042 +#define __copyuser_seg gs;
23043 +#else
23044 +#define __copyuser_seg
23045 +#endif
23046 +
23047 .text
23048 ENTRY(__put_user_1)
23049 ENTER
23050 +
23051 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23052 + GET_THREAD_INFO(%_ASM_BX)
23053 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
23054 jae bad_put_user
23055 -1: movb %al,(%_ASM_CX)
23056 +
23057 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23058 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23059 + cmp %_ASM_BX,%_ASM_CX
23060 + jb 1234f
23061 + xor %ebx,%ebx
23062 +1234:
23063 +#endif
23064 +
23065 +#endif
23066 +
23067 +1: __copyuser_seg movb %al,(_DEST)
23068 xor %eax,%eax
23069 EXIT
23070 ENDPROC(__put_user_1)
23071
23072 ENTRY(__put_user_2)
23073 ENTER
23074 +
23075 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23076 + GET_THREAD_INFO(%_ASM_BX)
23077 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23078 sub $1,%_ASM_BX
23079 cmp %_ASM_BX,%_ASM_CX
23080 jae bad_put_user
23081 -2: movw %ax,(%_ASM_CX)
23082 +
23083 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23084 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23085 + cmp %_ASM_BX,%_ASM_CX
23086 + jb 1234f
23087 + xor %ebx,%ebx
23088 +1234:
23089 +#endif
23090 +
23091 +#endif
23092 +
23093 +2: __copyuser_seg movw %ax,(_DEST)
23094 xor %eax,%eax
23095 EXIT
23096 ENDPROC(__put_user_2)
23097
23098 ENTRY(__put_user_4)
23099 ENTER
23100 +
23101 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23102 + GET_THREAD_INFO(%_ASM_BX)
23103 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23104 sub $3,%_ASM_BX
23105 cmp %_ASM_BX,%_ASM_CX
23106 jae bad_put_user
23107 -3: movl %eax,(%_ASM_CX)
23108 +
23109 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23110 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23111 + cmp %_ASM_BX,%_ASM_CX
23112 + jb 1234f
23113 + xor %ebx,%ebx
23114 +1234:
23115 +#endif
23116 +
23117 +#endif
23118 +
23119 +3: __copyuser_seg movl %eax,(_DEST)
23120 xor %eax,%eax
23121 EXIT
23122 ENDPROC(__put_user_4)
23123
23124 ENTRY(__put_user_8)
23125 ENTER
23126 +
23127 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23128 + GET_THREAD_INFO(%_ASM_BX)
23129 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23130 sub $7,%_ASM_BX
23131 cmp %_ASM_BX,%_ASM_CX
23132 jae bad_put_user
23133 -4: mov %_ASM_AX,(%_ASM_CX)
23134 +
23135 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23136 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23137 + cmp %_ASM_BX,%_ASM_CX
23138 + jb 1234f
23139 + xor %ebx,%ebx
23140 +1234:
23141 +#endif
23142 +
23143 +#endif
23144 +
23145 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
23146 #ifdef CONFIG_X86_32
23147 -5: movl %edx,4(%_ASM_CX)
23148 +5: __copyuser_seg movl %edx,4(_DEST)
23149 #endif
23150 xor %eax,%eax
23151 EXIT
23152 diff --git a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S
23153 index 05ea55f..6345b9a 100644
23154 --- a/arch/x86/lib/rwlock_64.S
23155 +++ b/arch/x86/lib/rwlock_64.S
23156 @@ -2,6 +2,7 @@
23157
23158 #include <linux/linkage.h>
23159 #include <asm/rwlock.h>
23160 +#include <asm/asm.h>
23161 #include <asm/alternative-asm.h>
23162 #include <asm/dwarf2.h>
23163
23164 @@ -10,13 +11,34 @@ ENTRY(__write_lock_failed)
23165 CFI_STARTPROC
23166 LOCK_PREFIX
23167 addl $RW_LOCK_BIAS,(%rdi)
23168 +
23169 +#ifdef CONFIG_PAX_REFCOUNT
23170 + jno 1234f
23171 + LOCK_PREFIX
23172 + subl $RW_LOCK_BIAS,(%rdi)
23173 + int $4
23174 +1234:
23175 + _ASM_EXTABLE(1234b, 1234b)
23176 +#endif
23177 +
23178 1: rep
23179 nop
23180 cmpl $RW_LOCK_BIAS,(%rdi)
23181 jne 1b
23182 LOCK_PREFIX
23183 subl $RW_LOCK_BIAS,(%rdi)
23184 +
23185 +#ifdef CONFIG_PAX_REFCOUNT
23186 + jno 1234f
23187 + LOCK_PREFIX
23188 + addl $RW_LOCK_BIAS,(%rdi)
23189 + int $4
23190 +1234:
23191 + _ASM_EXTABLE(1234b, 1234b)
23192 +#endif
23193 +
23194 jnz __write_lock_failed
23195 + pax_force_retaddr
23196 ret
23197 CFI_ENDPROC
23198 END(__write_lock_failed)
23199 @@ -26,13 +48,34 @@ ENTRY(__read_lock_failed)
23200 CFI_STARTPROC
23201 LOCK_PREFIX
23202 incl (%rdi)
23203 +
23204 +#ifdef CONFIG_PAX_REFCOUNT
23205 + jno 1234f
23206 + LOCK_PREFIX
23207 + decl (%rdi)
23208 + int $4
23209 +1234:
23210 + _ASM_EXTABLE(1234b, 1234b)
23211 +#endif
23212 +
23213 1: rep
23214 nop
23215 cmpl $1,(%rdi)
23216 js 1b
23217 LOCK_PREFIX
23218 decl (%rdi)
23219 +
23220 +#ifdef CONFIG_PAX_REFCOUNT
23221 + jno 1234f
23222 + LOCK_PREFIX
23223 + incl (%rdi)
23224 + int $4
23225 +1234:
23226 + _ASM_EXTABLE(1234b, 1234b)
23227 +#endif
23228 +
23229 js __read_lock_failed
23230 + pax_force_retaddr
23231 ret
23232 CFI_ENDPROC
23233 END(__read_lock_failed)
23234 diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S
23235 index 15acecf..f768b10 100644
23236 --- a/arch/x86/lib/rwsem_64.S
23237 +++ b/arch/x86/lib/rwsem_64.S
23238 @@ -48,6 +48,7 @@ ENTRY(call_rwsem_down_read_failed)
23239 call rwsem_down_read_failed
23240 popq %rdx
23241 restore_common_regs
23242 + pax_force_retaddr
23243 ret
23244 ENDPROC(call_rwsem_down_read_failed)
23245
23246 @@ -56,6 +57,7 @@ ENTRY(call_rwsem_down_write_failed)
23247 movq %rax,%rdi
23248 call rwsem_down_write_failed
23249 restore_common_regs
23250 + pax_force_retaddr
23251 ret
23252 ENDPROC(call_rwsem_down_write_failed)
23253
23254 @@ -66,7 +68,8 @@ ENTRY(call_rwsem_wake)
23255 movq %rax,%rdi
23256 call rwsem_wake
23257 restore_common_regs
23258 -1: ret
23259 +1: pax_force_retaddr
23260 + ret
23261 ENDPROC(call_rwsem_wake)
23262
23263 /* Fix up special calling conventions */
23264 @@ -77,5 +80,6 @@ ENTRY(call_rwsem_downgrade_wake)
23265 call rwsem_downgrade_wake
23266 popq %rdx
23267 restore_common_regs
23268 + pax_force_retaddr
23269 ret
23270 ENDPROC(call_rwsem_downgrade_wake)
23271 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
23272 index bf9a7d5..fb06ab5 100644
23273 --- a/arch/x86/lib/thunk_64.S
23274 +++ b/arch/x86/lib/thunk_64.S
23275 @@ -10,7 +10,8 @@
23276 #include <asm/dwarf2.h>
23277 #include <asm/calling.h>
23278 #include <asm/rwlock.h>
23279 -
23280 + #include <asm/alternative-asm.h>
23281 +
23282 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
23283 .macro thunk name,func
23284 .globl \name
23285 @@ -70,6 +71,7 @@
23286 SAVE_ARGS
23287 restore:
23288 RESTORE_ARGS
23289 + pax_force_retaddr
23290 ret
23291 CFI_ENDPROC
23292
23293 @@ -77,5 +79,6 @@ restore:
23294 SAVE_ARGS
23295 restore_norax:
23296 RESTORE_ARGS 1
23297 + pax_force_retaddr
23298 ret
23299 CFI_ENDPROC
23300 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
23301 index 1f118d4..ec4a953 100644
23302 --- a/arch/x86/lib/usercopy_32.c
23303 +++ b/arch/x86/lib/usercopy_32.c
23304 @@ -43,7 +43,7 @@ do { \
23305 __asm__ __volatile__( \
23306 " testl %1,%1\n" \
23307 " jz 2f\n" \
23308 - "0: lodsb\n" \
23309 + "0: "__copyuser_seg"lodsb\n" \
23310 " stosb\n" \
23311 " testb %%al,%%al\n" \
23312 " jz 1f\n" \
23313 @@ -128,10 +128,12 @@ do { \
23314 int __d0; \
23315 might_fault(); \
23316 __asm__ __volatile__( \
23317 + __COPYUSER_SET_ES \
23318 "0: rep; stosl\n" \
23319 " movl %2,%0\n" \
23320 "1: rep; stosb\n" \
23321 "2:\n" \
23322 + __COPYUSER_RESTORE_ES \
23323 ".section .fixup,\"ax\"\n" \
23324 "3: lea 0(%2,%0,4),%0\n" \
23325 " jmp 2b\n" \
23326 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
23327 might_fault();
23328
23329 __asm__ __volatile__(
23330 + __COPYUSER_SET_ES
23331 " testl %0, %0\n"
23332 " jz 3f\n"
23333 " andl %0,%%ecx\n"
23334 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
23335 " subl %%ecx,%0\n"
23336 " addl %0,%%eax\n"
23337 "1:\n"
23338 + __COPYUSER_RESTORE_ES
23339 ".section .fixup,\"ax\"\n"
23340 "2: xorl %%eax,%%eax\n"
23341 " jmp 1b\n"
23342 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
23343
23344 #ifdef CONFIG_X86_INTEL_USERCOPY
23345 static unsigned long
23346 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
23347 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
23348 {
23349 int d0, d1;
23350 __asm__ __volatile__(
23351 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23352 " .align 2,0x90\n"
23353 "3: movl 0(%4), %%eax\n"
23354 "4: movl 4(%4), %%edx\n"
23355 - "5: movl %%eax, 0(%3)\n"
23356 - "6: movl %%edx, 4(%3)\n"
23357 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
23358 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
23359 "7: movl 8(%4), %%eax\n"
23360 "8: movl 12(%4),%%edx\n"
23361 - "9: movl %%eax, 8(%3)\n"
23362 - "10: movl %%edx, 12(%3)\n"
23363 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
23364 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
23365 "11: movl 16(%4), %%eax\n"
23366 "12: movl 20(%4), %%edx\n"
23367 - "13: movl %%eax, 16(%3)\n"
23368 - "14: movl %%edx, 20(%3)\n"
23369 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
23370 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
23371 "15: movl 24(%4), %%eax\n"
23372 "16: movl 28(%4), %%edx\n"
23373 - "17: movl %%eax, 24(%3)\n"
23374 - "18: movl %%edx, 28(%3)\n"
23375 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
23376 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
23377 "19: movl 32(%4), %%eax\n"
23378 "20: movl 36(%4), %%edx\n"
23379 - "21: movl %%eax, 32(%3)\n"
23380 - "22: movl %%edx, 36(%3)\n"
23381 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
23382 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
23383 "23: movl 40(%4), %%eax\n"
23384 "24: movl 44(%4), %%edx\n"
23385 - "25: movl %%eax, 40(%3)\n"
23386 - "26: movl %%edx, 44(%3)\n"
23387 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
23388 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
23389 "27: movl 48(%4), %%eax\n"
23390 "28: movl 52(%4), %%edx\n"
23391 - "29: movl %%eax, 48(%3)\n"
23392 - "30: movl %%edx, 52(%3)\n"
23393 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
23394 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
23395 "31: movl 56(%4), %%eax\n"
23396 "32: movl 60(%4), %%edx\n"
23397 - "33: movl %%eax, 56(%3)\n"
23398 - "34: movl %%edx, 60(%3)\n"
23399 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
23400 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
23401 " addl $-64, %0\n"
23402 " addl $64, %4\n"
23403 " addl $64, %3\n"
23404 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23405 " shrl $2, %0\n"
23406 " andl $3, %%eax\n"
23407 " cld\n"
23408 + __COPYUSER_SET_ES
23409 "99: rep; movsl\n"
23410 "36: movl %%eax, %0\n"
23411 "37: rep; movsb\n"
23412 "100:\n"
23413 + __COPYUSER_RESTORE_ES
23414 + ".section .fixup,\"ax\"\n"
23415 + "101: lea 0(%%eax,%0,4),%0\n"
23416 + " jmp 100b\n"
23417 + ".previous\n"
23418 + ".section __ex_table,\"a\"\n"
23419 + " .align 4\n"
23420 + " .long 1b,100b\n"
23421 + " .long 2b,100b\n"
23422 + " .long 3b,100b\n"
23423 + " .long 4b,100b\n"
23424 + " .long 5b,100b\n"
23425 + " .long 6b,100b\n"
23426 + " .long 7b,100b\n"
23427 + " .long 8b,100b\n"
23428 + " .long 9b,100b\n"
23429 + " .long 10b,100b\n"
23430 + " .long 11b,100b\n"
23431 + " .long 12b,100b\n"
23432 + " .long 13b,100b\n"
23433 + " .long 14b,100b\n"
23434 + " .long 15b,100b\n"
23435 + " .long 16b,100b\n"
23436 + " .long 17b,100b\n"
23437 + " .long 18b,100b\n"
23438 + " .long 19b,100b\n"
23439 + " .long 20b,100b\n"
23440 + " .long 21b,100b\n"
23441 + " .long 22b,100b\n"
23442 + " .long 23b,100b\n"
23443 + " .long 24b,100b\n"
23444 + " .long 25b,100b\n"
23445 + " .long 26b,100b\n"
23446 + " .long 27b,100b\n"
23447 + " .long 28b,100b\n"
23448 + " .long 29b,100b\n"
23449 + " .long 30b,100b\n"
23450 + " .long 31b,100b\n"
23451 + " .long 32b,100b\n"
23452 + " .long 33b,100b\n"
23453 + " .long 34b,100b\n"
23454 + " .long 35b,100b\n"
23455 + " .long 36b,100b\n"
23456 + " .long 37b,100b\n"
23457 + " .long 99b,101b\n"
23458 + ".previous"
23459 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
23460 + : "1"(to), "2"(from), "0"(size)
23461 + : "eax", "edx", "memory");
23462 + return size;
23463 +}
23464 +
23465 +static unsigned long
23466 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
23467 +{
23468 + int d0, d1;
23469 + __asm__ __volatile__(
23470 + " .align 2,0x90\n"
23471 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
23472 + " cmpl $67, %0\n"
23473 + " jbe 3f\n"
23474 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
23475 + " .align 2,0x90\n"
23476 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
23477 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
23478 + "5: movl %%eax, 0(%3)\n"
23479 + "6: movl %%edx, 4(%3)\n"
23480 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
23481 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
23482 + "9: movl %%eax, 8(%3)\n"
23483 + "10: movl %%edx, 12(%3)\n"
23484 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
23485 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
23486 + "13: movl %%eax, 16(%3)\n"
23487 + "14: movl %%edx, 20(%3)\n"
23488 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
23489 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
23490 + "17: movl %%eax, 24(%3)\n"
23491 + "18: movl %%edx, 28(%3)\n"
23492 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
23493 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
23494 + "21: movl %%eax, 32(%3)\n"
23495 + "22: movl %%edx, 36(%3)\n"
23496 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
23497 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
23498 + "25: movl %%eax, 40(%3)\n"
23499 + "26: movl %%edx, 44(%3)\n"
23500 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
23501 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
23502 + "29: movl %%eax, 48(%3)\n"
23503 + "30: movl %%edx, 52(%3)\n"
23504 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
23505 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
23506 + "33: movl %%eax, 56(%3)\n"
23507 + "34: movl %%edx, 60(%3)\n"
23508 + " addl $-64, %0\n"
23509 + " addl $64, %4\n"
23510 + " addl $64, %3\n"
23511 + " cmpl $63, %0\n"
23512 + " ja 1b\n"
23513 + "35: movl %0, %%eax\n"
23514 + " shrl $2, %0\n"
23515 + " andl $3, %%eax\n"
23516 + " cld\n"
23517 + "99: rep; "__copyuser_seg" movsl\n"
23518 + "36: movl %%eax, %0\n"
23519 + "37: rep; "__copyuser_seg" movsb\n"
23520 + "100:\n"
23521 ".section .fixup,\"ax\"\n"
23522 "101: lea 0(%%eax,%0,4),%0\n"
23523 " jmp 100b\n"
23524 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23525 int d0, d1;
23526 __asm__ __volatile__(
23527 " .align 2,0x90\n"
23528 - "0: movl 32(%4), %%eax\n"
23529 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23530 " cmpl $67, %0\n"
23531 " jbe 2f\n"
23532 - "1: movl 64(%4), %%eax\n"
23533 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23534 " .align 2,0x90\n"
23535 - "2: movl 0(%4), %%eax\n"
23536 - "21: movl 4(%4), %%edx\n"
23537 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23538 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23539 " movl %%eax, 0(%3)\n"
23540 " movl %%edx, 4(%3)\n"
23541 - "3: movl 8(%4), %%eax\n"
23542 - "31: movl 12(%4),%%edx\n"
23543 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23544 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23545 " movl %%eax, 8(%3)\n"
23546 " movl %%edx, 12(%3)\n"
23547 - "4: movl 16(%4), %%eax\n"
23548 - "41: movl 20(%4), %%edx\n"
23549 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23550 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23551 " movl %%eax, 16(%3)\n"
23552 " movl %%edx, 20(%3)\n"
23553 - "10: movl 24(%4), %%eax\n"
23554 - "51: movl 28(%4), %%edx\n"
23555 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23556 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23557 " movl %%eax, 24(%3)\n"
23558 " movl %%edx, 28(%3)\n"
23559 - "11: movl 32(%4), %%eax\n"
23560 - "61: movl 36(%4), %%edx\n"
23561 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23562 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23563 " movl %%eax, 32(%3)\n"
23564 " movl %%edx, 36(%3)\n"
23565 - "12: movl 40(%4), %%eax\n"
23566 - "71: movl 44(%4), %%edx\n"
23567 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23568 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23569 " movl %%eax, 40(%3)\n"
23570 " movl %%edx, 44(%3)\n"
23571 - "13: movl 48(%4), %%eax\n"
23572 - "81: movl 52(%4), %%edx\n"
23573 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23574 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23575 " movl %%eax, 48(%3)\n"
23576 " movl %%edx, 52(%3)\n"
23577 - "14: movl 56(%4), %%eax\n"
23578 - "91: movl 60(%4), %%edx\n"
23579 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23580 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23581 " movl %%eax, 56(%3)\n"
23582 " movl %%edx, 60(%3)\n"
23583 " addl $-64, %0\n"
23584 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23585 " shrl $2, %0\n"
23586 " andl $3, %%eax\n"
23587 " cld\n"
23588 - "6: rep; movsl\n"
23589 + "6: rep; "__copyuser_seg" movsl\n"
23590 " movl %%eax,%0\n"
23591 - "7: rep; movsb\n"
23592 + "7: rep; "__copyuser_seg" movsb\n"
23593 "8:\n"
23594 ".section .fixup,\"ax\"\n"
23595 "9: lea 0(%%eax,%0,4),%0\n"
23596 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23597
23598 __asm__ __volatile__(
23599 " .align 2,0x90\n"
23600 - "0: movl 32(%4), %%eax\n"
23601 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23602 " cmpl $67, %0\n"
23603 " jbe 2f\n"
23604 - "1: movl 64(%4), %%eax\n"
23605 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23606 " .align 2,0x90\n"
23607 - "2: movl 0(%4), %%eax\n"
23608 - "21: movl 4(%4), %%edx\n"
23609 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23610 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23611 " movnti %%eax, 0(%3)\n"
23612 " movnti %%edx, 4(%3)\n"
23613 - "3: movl 8(%4), %%eax\n"
23614 - "31: movl 12(%4),%%edx\n"
23615 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23616 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23617 " movnti %%eax, 8(%3)\n"
23618 " movnti %%edx, 12(%3)\n"
23619 - "4: movl 16(%4), %%eax\n"
23620 - "41: movl 20(%4), %%edx\n"
23621 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23622 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23623 " movnti %%eax, 16(%3)\n"
23624 " movnti %%edx, 20(%3)\n"
23625 - "10: movl 24(%4), %%eax\n"
23626 - "51: movl 28(%4), %%edx\n"
23627 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23628 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23629 " movnti %%eax, 24(%3)\n"
23630 " movnti %%edx, 28(%3)\n"
23631 - "11: movl 32(%4), %%eax\n"
23632 - "61: movl 36(%4), %%edx\n"
23633 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23634 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23635 " movnti %%eax, 32(%3)\n"
23636 " movnti %%edx, 36(%3)\n"
23637 - "12: movl 40(%4), %%eax\n"
23638 - "71: movl 44(%4), %%edx\n"
23639 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23640 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23641 " movnti %%eax, 40(%3)\n"
23642 " movnti %%edx, 44(%3)\n"
23643 - "13: movl 48(%4), %%eax\n"
23644 - "81: movl 52(%4), %%edx\n"
23645 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23646 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23647 " movnti %%eax, 48(%3)\n"
23648 " movnti %%edx, 52(%3)\n"
23649 - "14: movl 56(%4), %%eax\n"
23650 - "91: movl 60(%4), %%edx\n"
23651 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23652 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23653 " movnti %%eax, 56(%3)\n"
23654 " movnti %%edx, 60(%3)\n"
23655 " addl $-64, %0\n"
23656 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23657 " shrl $2, %0\n"
23658 " andl $3, %%eax\n"
23659 " cld\n"
23660 - "6: rep; movsl\n"
23661 + "6: rep; "__copyuser_seg" movsl\n"
23662 " movl %%eax,%0\n"
23663 - "7: rep; movsb\n"
23664 + "7: rep; "__copyuser_seg" movsb\n"
23665 "8:\n"
23666 ".section .fixup,\"ax\"\n"
23667 "9: lea 0(%%eax,%0,4),%0\n"
23668 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
23669
23670 __asm__ __volatile__(
23671 " .align 2,0x90\n"
23672 - "0: movl 32(%4), %%eax\n"
23673 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23674 " cmpl $67, %0\n"
23675 " jbe 2f\n"
23676 - "1: movl 64(%4), %%eax\n"
23677 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23678 " .align 2,0x90\n"
23679 - "2: movl 0(%4), %%eax\n"
23680 - "21: movl 4(%4), %%edx\n"
23681 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23682 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23683 " movnti %%eax, 0(%3)\n"
23684 " movnti %%edx, 4(%3)\n"
23685 - "3: movl 8(%4), %%eax\n"
23686 - "31: movl 12(%4),%%edx\n"
23687 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23688 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23689 " movnti %%eax, 8(%3)\n"
23690 " movnti %%edx, 12(%3)\n"
23691 - "4: movl 16(%4), %%eax\n"
23692 - "41: movl 20(%4), %%edx\n"
23693 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23694 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23695 " movnti %%eax, 16(%3)\n"
23696 " movnti %%edx, 20(%3)\n"
23697 - "10: movl 24(%4), %%eax\n"
23698 - "51: movl 28(%4), %%edx\n"
23699 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23700 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23701 " movnti %%eax, 24(%3)\n"
23702 " movnti %%edx, 28(%3)\n"
23703 - "11: movl 32(%4), %%eax\n"
23704 - "61: movl 36(%4), %%edx\n"
23705 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23706 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23707 " movnti %%eax, 32(%3)\n"
23708 " movnti %%edx, 36(%3)\n"
23709 - "12: movl 40(%4), %%eax\n"
23710 - "71: movl 44(%4), %%edx\n"
23711 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23712 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23713 " movnti %%eax, 40(%3)\n"
23714 " movnti %%edx, 44(%3)\n"
23715 - "13: movl 48(%4), %%eax\n"
23716 - "81: movl 52(%4), %%edx\n"
23717 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23718 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23719 " movnti %%eax, 48(%3)\n"
23720 " movnti %%edx, 52(%3)\n"
23721 - "14: movl 56(%4), %%eax\n"
23722 - "91: movl 60(%4), %%edx\n"
23723 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23724 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23725 " movnti %%eax, 56(%3)\n"
23726 " movnti %%edx, 60(%3)\n"
23727 " addl $-64, %0\n"
23728 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23729 " shrl $2, %0\n"
23730 " andl $3, %%eax\n"
23731 " cld\n"
23732 - "6: rep; movsl\n"
23733 + "6: rep; "__copyuser_seg" movsl\n"
23734 " movl %%eax,%0\n"
23735 - "7: rep; movsb\n"
23736 + "7: rep; "__copyuser_seg" movsb\n"
23737 "8:\n"
23738 ".section .fixup,\"ax\"\n"
23739 "9: lea 0(%%eax,%0,4),%0\n"
23740 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23741 */
23742 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23743 unsigned long size);
23744 -unsigned long __copy_user_intel(void __user *to, const void *from,
23745 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23746 + unsigned long size);
23747 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23748 unsigned long size);
23749 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23750 const void __user *from, unsigned long size);
23751 #endif /* CONFIG_X86_INTEL_USERCOPY */
23752
23753 /* Generic arbitrary sized copy. */
23754 -#define __copy_user(to, from, size) \
23755 +#define __copy_user(to, from, size, prefix, set, restore) \
23756 do { \
23757 int __d0, __d1, __d2; \
23758 __asm__ __volatile__( \
23759 + set \
23760 " cmp $7,%0\n" \
23761 " jbe 1f\n" \
23762 " movl %1,%0\n" \
23763 " negl %0\n" \
23764 " andl $7,%0\n" \
23765 " subl %0,%3\n" \
23766 - "4: rep; movsb\n" \
23767 + "4: rep; "prefix"movsb\n" \
23768 " movl %3,%0\n" \
23769 " shrl $2,%0\n" \
23770 " andl $3,%3\n" \
23771 " .align 2,0x90\n" \
23772 - "0: rep; movsl\n" \
23773 + "0: rep; "prefix"movsl\n" \
23774 " movl %3,%0\n" \
23775 - "1: rep; movsb\n" \
23776 + "1: rep; "prefix"movsb\n" \
23777 "2:\n" \
23778 + restore \
23779 ".section .fixup,\"ax\"\n" \
23780 "5: addl %3,%0\n" \
23781 " jmp 2b\n" \
23782 @@ -682,14 +799,14 @@ do { \
23783 " negl %0\n" \
23784 " andl $7,%0\n" \
23785 " subl %0,%3\n" \
23786 - "4: rep; movsb\n" \
23787 + "4: rep; "__copyuser_seg"movsb\n" \
23788 " movl %3,%0\n" \
23789 " shrl $2,%0\n" \
23790 " andl $3,%3\n" \
23791 " .align 2,0x90\n" \
23792 - "0: rep; movsl\n" \
23793 + "0: rep; "__copyuser_seg"movsl\n" \
23794 " movl %3,%0\n" \
23795 - "1: rep; movsb\n" \
23796 + "1: rep; "__copyuser_seg"movsb\n" \
23797 "2:\n" \
23798 ".section .fixup,\"ax\"\n" \
23799 "5: addl %3,%0\n" \
23800 @@ -775,9 +892,9 @@ survive:
23801 }
23802 #endif
23803 if (movsl_is_ok(to, from, n))
23804 - __copy_user(to, from, n);
23805 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23806 else
23807 - n = __copy_user_intel(to, from, n);
23808 + n = __generic_copy_to_user_intel(to, from, n);
23809 return n;
23810 }
23811 EXPORT_SYMBOL(__copy_to_user_ll);
23812 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23813 unsigned long n)
23814 {
23815 if (movsl_is_ok(to, from, n))
23816 - __copy_user(to, from, n);
23817 + __copy_user(to, from, n, __copyuser_seg, "", "");
23818 else
23819 - n = __copy_user_intel((void __user *)to,
23820 - (const void *)from, n);
23821 + n = __generic_copy_from_user_intel(to, from, n);
23822 return n;
23823 }
23824 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23825 @@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23826 if (n > 64 && cpu_has_xmm2)
23827 n = __copy_user_intel_nocache(to, from, n);
23828 else
23829 - __copy_user(to, from, n);
23830 + __copy_user(to, from, n, __copyuser_seg, "", "");
23831 #else
23832 - __copy_user(to, from, n);
23833 + __copy_user(to, from, n, __copyuser_seg, "", "");
23834 #endif
23835 return n;
23836 }
23837 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23838
23839 -/**
23840 - * copy_to_user: - Copy a block of data into user space.
23841 - * @to: Destination address, in user space.
23842 - * @from: Source address, in kernel space.
23843 - * @n: Number of bytes to copy.
23844 - *
23845 - * Context: User context only. This function may sleep.
23846 - *
23847 - * Copy data from kernel space to user space.
23848 - *
23849 - * Returns number of bytes that could not be copied.
23850 - * On success, this will be zero.
23851 - */
23852 -unsigned long
23853 -copy_to_user(void __user *to, const void *from, unsigned long n)
23854 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23855 +void __set_fs(mm_segment_t x)
23856 {
23857 - if (access_ok(VERIFY_WRITE, to, n))
23858 - n = __copy_to_user(to, from, n);
23859 - return n;
23860 + switch (x.seg) {
23861 + case 0:
23862 + loadsegment(gs, 0);
23863 + break;
23864 + case TASK_SIZE_MAX:
23865 + loadsegment(gs, __USER_DS);
23866 + break;
23867 + case -1UL:
23868 + loadsegment(gs, __KERNEL_DS);
23869 + break;
23870 + default:
23871 + BUG();
23872 + }
23873 + return;
23874 }
23875 -EXPORT_SYMBOL(copy_to_user);
23876 +EXPORT_SYMBOL(__set_fs);
23877
23878 -/**
23879 - * copy_from_user: - Copy a block of data from user space.
23880 - * @to: Destination address, in kernel space.
23881 - * @from: Source address, in user space.
23882 - * @n: Number of bytes to copy.
23883 - *
23884 - * Context: User context only. This function may sleep.
23885 - *
23886 - * Copy data from user space to kernel space.
23887 - *
23888 - * Returns number of bytes that could not be copied.
23889 - * On success, this will be zero.
23890 - *
23891 - * If some data could not be copied, this function will pad the copied
23892 - * data to the requested size using zero bytes.
23893 - */
23894 -unsigned long
23895 -copy_from_user(void *to, const void __user *from, unsigned long n)
23896 +void set_fs(mm_segment_t x)
23897 {
23898 - if (access_ok(VERIFY_READ, from, n))
23899 - n = __copy_from_user(to, from, n);
23900 - else
23901 - memset(to, 0, n);
23902 - return n;
23903 + current_thread_info()->addr_limit = x;
23904 + __set_fs(x);
23905 }
23906 -EXPORT_SYMBOL(copy_from_user);
23907 +EXPORT_SYMBOL(set_fs);
23908 +#endif
23909 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23910 index b7c2849..8633ad8 100644
23911 --- a/arch/x86/lib/usercopy_64.c
23912 +++ b/arch/x86/lib/usercopy_64.c
23913 @@ -42,6 +42,12 @@ long
23914 __strncpy_from_user(char *dst, const char __user *src, long count)
23915 {
23916 long res;
23917 +
23918 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23919 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
23920 + src += PAX_USER_SHADOW_BASE;
23921 +#endif
23922 +
23923 __do_strncpy_from_user(dst, src, count, res);
23924 return res;
23925 }
23926 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23927 {
23928 long __d0;
23929 might_fault();
23930 +
23931 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23932 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23933 + addr += PAX_USER_SHADOW_BASE;
23934 +#endif
23935 +
23936 /* no memory constraint because it doesn't change any memory gcc knows
23937 about */
23938 asm volatile(
23939 @@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
23940 }
23941 EXPORT_SYMBOL(strlen_user);
23942
23943 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23944 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23945 {
23946 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23947 - return copy_user_generic((__force void *)to, (__force void *)from, len);
23948 - }
23949 - return len;
23950 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23951 +
23952 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23953 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23954 + to += PAX_USER_SHADOW_BASE;
23955 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23956 + from += PAX_USER_SHADOW_BASE;
23957 +#endif
23958 +
23959 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23960 + }
23961 + return len;
23962 }
23963 EXPORT_SYMBOL(copy_in_user);
23964
23965 @@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
23966 * it is not necessary to optimize tail handling.
23967 */
23968 unsigned long
23969 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23970 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23971 {
23972 char c;
23973 unsigned zero_len;
23974 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23975 index 61b41ca..5fef66a 100644
23976 --- a/arch/x86/mm/extable.c
23977 +++ b/arch/x86/mm/extable.c
23978 @@ -1,14 +1,71 @@
23979 #include <linux/module.h>
23980 #include <linux/spinlock.h>
23981 +#include <linux/sort.h>
23982 #include <asm/uaccess.h>
23983 +#include <asm/pgtable.h>
23984
23985 +/*
23986 + * The exception table needs to be sorted so that the binary
23987 + * search that we use to find entries in it works properly.
23988 + * This is used both for the kernel exception table and for
23989 + * the exception tables of modules that get loaded.
23990 + */
23991 +static int cmp_ex(const void *a, const void *b)
23992 +{
23993 + const struct exception_table_entry *x = a, *y = b;
23994 +
23995 + /* avoid overflow */
23996 + if (x->insn > y->insn)
23997 + return 1;
23998 + if (x->insn < y->insn)
23999 + return -1;
24000 + return 0;
24001 +}
24002 +
24003 +static void swap_ex(void *a, void *b, int size)
24004 +{
24005 + struct exception_table_entry t, *x = a, *y = b;
24006 +
24007 + t = *x;
24008 +
24009 + pax_open_kernel();
24010 + *x = *y;
24011 + *y = t;
24012 + pax_close_kernel();
24013 +}
24014 +
24015 +void sort_extable(struct exception_table_entry *start,
24016 + struct exception_table_entry *finish)
24017 +{
24018 + sort(start, finish - start, sizeof(struct exception_table_entry),
24019 + cmp_ex, swap_ex);
24020 +}
24021 +
24022 +#ifdef CONFIG_MODULES
24023 +/*
24024 + * If the exception table is sorted, any referring to the module init
24025 + * will be at the beginning or the end.
24026 + */
24027 +void trim_init_extable(struct module *m)
24028 +{
24029 + /*trim the beginning*/
24030 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
24031 + m->extable++;
24032 + m->num_exentries--;
24033 + }
24034 + /*trim the end*/
24035 + while (m->num_exentries &&
24036 + within_module_init(m->extable[m->num_exentries-1].insn, m))
24037 + m->num_exentries--;
24038 +}
24039 +#endif /* CONFIG_MODULES */
24040
24041 int fixup_exception(struct pt_regs *regs)
24042 {
24043 const struct exception_table_entry *fixup;
24044
24045 #ifdef CONFIG_PNPBIOS
24046 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
24047 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
24048 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
24049 extern u32 pnp_bios_is_utter_crap;
24050 pnp_bios_is_utter_crap = 1;
24051 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
24052 index 8ac0d76..ca501e2 100644
24053 --- a/arch/x86/mm/fault.c
24054 +++ b/arch/x86/mm/fault.c
24055 @@ -11,10 +11,19 @@
24056 #include <linux/kprobes.h> /* __kprobes, ... */
24057 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
24058 #include <linux/perf_event.h> /* perf_sw_event */
24059 +#include <linux/unistd.h>
24060 +#include <linux/compiler.h>
24061
24062 #include <asm/traps.h> /* dotraplinkage, ... */
24063 #include <asm/pgalloc.h> /* pgd_*(), ... */
24064 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
24065 +#include <asm/vsyscall.h>
24066 +#include <asm/tlbflush.h>
24067 +
24068 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24069 +#include <asm/stacktrace.h>
24070 +#include "../kernel/dumpstack.h"
24071 +#endif
24072
24073 /*
24074 * Page fault error code bits:
24075 @@ -51,7 +60,7 @@ static inline int notify_page_fault(struct pt_regs *regs)
24076 int ret = 0;
24077
24078 /* kprobe_running() needs smp_processor_id() */
24079 - if (kprobes_built_in() && !user_mode_vm(regs)) {
24080 + if (kprobes_built_in() && !user_mode(regs)) {
24081 preempt_disable();
24082 if (kprobe_running() && kprobe_fault_handler(regs, 14))
24083 ret = 1;
24084 @@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
24085 return !instr_lo || (instr_lo>>1) == 1;
24086 case 0x00:
24087 /* Prefetch instruction is 0x0F0D or 0x0F18 */
24088 - if (probe_kernel_address(instr, opcode))
24089 + if (user_mode(regs)) {
24090 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
24091 + return 0;
24092 + } else if (probe_kernel_address(instr, opcode))
24093 return 0;
24094
24095 *prefetch = (instr_lo == 0xF) &&
24096 @@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
24097 while (instr < max_instr) {
24098 unsigned char opcode;
24099
24100 - if (probe_kernel_address(instr, opcode))
24101 + if (user_mode(regs)) {
24102 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
24103 + break;
24104 + } else if (probe_kernel_address(instr, opcode))
24105 break;
24106
24107 instr++;
24108 @@ -172,6 +187,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
24109 force_sig_info(si_signo, &info, tsk);
24110 }
24111
24112 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24113 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
24114 +#endif
24115 +
24116 +#ifdef CONFIG_PAX_EMUTRAMP
24117 +static int pax_handle_fetch_fault(struct pt_regs *regs);
24118 +#endif
24119 +
24120 +#ifdef CONFIG_PAX_PAGEEXEC
24121 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
24122 +{
24123 + pgd_t *pgd;
24124 + pud_t *pud;
24125 + pmd_t *pmd;
24126 +
24127 + pgd = pgd_offset(mm, address);
24128 + if (!pgd_present(*pgd))
24129 + return NULL;
24130 + pud = pud_offset(pgd, address);
24131 + if (!pud_present(*pud))
24132 + return NULL;
24133 + pmd = pmd_offset(pud, address);
24134 + if (!pmd_present(*pmd))
24135 + return NULL;
24136 + return pmd;
24137 +}
24138 +#endif
24139 +
24140 DEFINE_SPINLOCK(pgd_lock);
24141 LIST_HEAD(pgd_list);
24142
24143 @@ -224,11 +267,24 @@ void vmalloc_sync_all(void)
24144 address += PMD_SIZE) {
24145
24146 unsigned long flags;
24147 +
24148 +#ifdef CONFIG_PAX_PER_CPU_PGD
24149 + unsigned long cpu;
24150 +#else
24151 struct page *page;
24152 +#endif
24153
24154 spin_lock_irqsave(&pgd_lock, flags);
24155 +
24156 +#ifdef CONFIG_PAX_PER_CPU_PGD
24157 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24158 + pgd_t *pgd = get_cpu_pgd(cpu);
24159 +#else
24160 list_for_each_entry(page, &pgd_list, lru) {
24161 - if (!vmalloc_sync_one(page_address(page), address))
24162 + pgd_t *pgd = page_address(page);
24163 +#endif
24164 +
24165 + if (!vmalloc_sync_one(pgd, address))
24166 break;
24167 }
24168 spin_unlock_irqrestore(&pgd_lock, flags);
24169 @@ -258,6 +314,11 @@ static noinline int vmalloc_fault(unsigned long address)
24170 * an interrupt in the middle of a task switch..
24171 */
24172 pgd_paddr = read_cr3();
24173 +
24174 +#ifdef CONFIG_PAX_PER_CPU_PGD
24175 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
24176 +#endif
24177 +
24178 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
24179 if (!pmd_k)
24180 return -1;
24181 @@ -332,15 +393,27 @@ void vmalloc_sync_all(void)
24182
24183 const pgd_t *pgd_ref = pgd_offset_k(address);
24184 unsigned long flags;
24185 +
24186 +#ifdef CONFIG_PAX_PER_CPU_PGD
24187 + unsigned long cpu;
24188 +#else
24189 struct page *page;
24190 +#endif
24191
24192 if (pgd_none(*pgd_ref))
24193 continue;
24194
24195 spin_lock_irqsave(&pgd_lock, flags);
24196 +
24197 +#ifdef CONFIG_PAX_PER_CPU_PGD
24198 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24199 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
24200 +#else
24201 list_for_each_entry(page, &pgd_list, lru) {
24202 pgd_t *pgd;
24203 pgd = (pgd_t *)page_address(page) + pgd_index(address);
24204 +#endif
24205 +
24206 if (pgd_none(*pgd))
24207 set_pgd(pgd, *pgd_ref);
24208 else
24209 @@ -373,7 +446,14 @@ static noinline int vmalloc_fault(unsigned long address)
24210 * happen within a race in page table update. In the later
24211 * case just flush:
24212 */
24213 +
24214 +#ifdef CONFIG_PAX_PER_CPU_PGD
24215 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
24216 + pgd = pgd_offset_cpu(smp_processor_id(), address);
24217 +#else
24218 pgd = pgd_offset(current->active_mm, address);
24219 +#endif
24220 +
24221 pgd_ref = pgd_offset_k(address);
24222 if (pgd_none(*pgd_ref))
24223 return -1;
24224 @@ -535,7 +615,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
24225 static int is_errata100(struct pt_regs *regs, unsigned long address)
24226 {
24227 #ifdef CONFIG_X86_64
24228 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
24229 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
24230 return 1;
24231 #endif
24232 return 0;
24233 @@ -562,7 +642,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
24234 }
24235
24236 static const char nx_warning[] = KERN_CRIT
24237 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
24238 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
24239
24240 static void
24241 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
24242 @@ -571,15 +651,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
24243 if (!oops_may_print())
24244 return;
24245
24246 - if (error_code & PF_INSTR) {
24247 + if (nx_enabled && (error_code & PF_INSTR)) {
24248 unsigned int level;
24249
24250 pte_t *pte = lookup_address(address, &level);
24251
24252 if (pte && pte_present(*pte) && !pte_exec(*pte))
24253 - printk(nx_warning, current_uid());
24254 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
24255 }
24256
24257 +#ifdef CONFIG_PAX_KERNEXEC
24258 + if (init_mm.start_code <= address && address < init_mm.end_code) {
24259 + if (current->signal->curr_ip)
24260 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24261 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
24262 + else
24263 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24264 + current->comm, task_pid_nr(current), current_uid(), current_euid());
24265 + }
24266 +#endif
24267 +
24268 printk(KERN_ALERT "BUG: unable to handle kernel ");
24269 if (address < PAGE_SIZE)
24270 printk(KERN_CONT "NULL pointer dereference");
24271 @@ -705,6 +796,23 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24272 {
24273 struct task_struct *tsk = current;
24274
24275 +#ifdef CONFIG_X86_64
24276 + struct mm_struct *mm = tsk->mm;
24277 +
24278 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
24279 + if (regs->ip == (unsigned long)vgettimeofday) {
24280 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
24281 + return;
24282 + } else if (regs->ip == (unsigned long)vtime) {
24283 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
24284 + return;
24285 + } else if (regs->ip == (unsigned long)vgetcpu) {
24286 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
24287 + return;
24288 + }
24289 + }
24290 +#endif
24291 +
24292 /* User mode accesses just cause a SIGSEGV */
24293 if (error_code & PF_USER) {
24294 /*
24295 @@ -722,6 +830,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24296 if (is_errata100(regs, address))
24297 return;
24298
24299 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24300 + if (pax_is_fetch_fault(regs, error_code, address)) {
24301 +
24302 +#ifdef CONFIG_PAX_EMUTRAMP
24303 + switch (pax_handle_fetch_fault(regs)) {
24304 + case 2:
24305 + return;
24306 + }
24307 +#endif
24308 +
24309 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24310 + do_group_exit(SIGKILL);
24311 + }
24312 +#endif
24313 +
24314 if (unlikely(show_unhandled_signals))
24315 show_signal_msg(regs, error_code, address, tsk);
24316
24317 @@ -818,7 +941,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
24318 if (fault & VM_FAULT_HWPOISON) {
24319 printk(KERN_ERR
24320 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
24321 - tsk->comm, tsk->pid, address);
24322 + tsk->comm, task_pid_nr(tsk), address);
24323 code = BUS_MCEERR_AR;
24324 }
24325 #endif
24326 @@ -857,6 +980,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
24327 return 1;
24328 }
24329
24330 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24331 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
24332 +{
24333 + pte_t *pte;
24334 + pmd_t *pmd;
24335 + spinlock_t *ptl;
24336 + unsigned char pte_mask;
24337 +
24338 + if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
24339 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
24340 + return 0;
24341 +
24342 + /* PaX: it's our fault, let's handle it if we can */
24343 +
24344 + /* PaX: take a look at read faults before acquiring any locks */
24345 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
24346 + /* instruction fetch attempt from a protected page in user mode */
24347 + up_read(&mm->mmap_sem);
24348 +
24349 +#ifdef CONFIG_PAX_EMUTRAMP
24350 + switch (pax_handle_fetch_fault(regs)) {
24351 + case 2:
24352 + return 1;
24353 + }
24354 +#endif
24355 +
24356 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24357 + do_group_exit(SIGKILL);
24358 + }
24359 +
24360 + pmd = pax_get_pmd(mm, address);
24361 + if (unlikely(!pmd))
24362 + return 0;
24363 +
24364 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
24365 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
24366 + pte_unmap_unlock(pte, ptl);
24367 + return 0;
24368 + }
24369 +
24370 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
24371 + /* write attempt to a protected page in user mode */
24372 + pte_unmap_unlock(pte, ptl);
24373 + return 0;
24374 + }
24375 +
24376 +#ifdef CONFIG_SMP
24377 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
24378 +#else
24379 + if (likely(address > get_limit(regs->cs)))
24380 +#endif
24381 + {
24382 + set_pte(pte, pte_mkread(*pte));
24383 + __flush_tlb_one(address);
24384 + pte_unmap_unlock(pte, ptl);
24385 + up_read(&mm->mmap_sem);
24386 + return 1;
24387 + }
24388 +
24389 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
24390 +
24391 + /*
24392 + * PaX: fill DTLB with user rights and retry
24393 + */
24394 + __asm__ __volatile__ (
24395 + "orb %2,(%1)\n"
24396 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
24397 +/*
24398 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
24399 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
24400 + * page fault when examined during a TLB load attempt. this is true not only
24401 + * for PTEs holding a non-present entry but also present entries that will
24402 + * raise a page fault (such as those set up by PaX, or the copy-on-write
24403 + * mechanism). in effect it means that we do *not* need to flush the TLBs
24404 + * for our target pages since their PTEs are simply not in the TLBs at all.
24405 +
24406 + * the best thing in omitting it is that we gain around 15-20% speed in the
24407 + * fast path of the page fault handler and can get rid of tracing since we
24408 + * can no longer flush unintended entries.
24409 + */
24410 + "invlpg (%0)\n"
24411 +#endif
24412 + __copyuser_seg"testb $0,(%0)\n"
24413 + "xorb %3,(%1)\n"
24414 + :
24415 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
24416 + : "memory", "cc");
24417 + pte_unmap_unlock(pte, ptl);
24418 + up_read(&mm->mmap_sem);
24419 + return 1;
24420 +}
24421 +#endif
24422 +
24423 /*
24424 * Handle a spurious fault caused by a stale TLB entry.
24425 *
24426 @@ -923,6 +1139,9 @@ int show_unhandled_signals = 1;
24427 static inline int
24428 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
24429 {
24430 + if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
24431 + return 1;
24432 +
24433 if (write) {
24434 /* write, present and write, not present: */
24435 if (unlikely(!(vma->vm_flags & VM_WRITE)))
24436 @@ -956,16 +1175,30 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24437 {
24438 struct vm_area_struct *vma;
24439 struct task_struct *tsk;
24440 - unsigned long address;
24441 struct mm_struct *mm;
24442 int write;
24443 int fault;
24444
24445 - tsk = current;
24446 - mm = tsk->mm;
24447 -
24448 /* Get the faulting address: */
24449 - address = read_cr2();
24450 + unsigned long address = read_cr2();
24451 +
24452 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24453 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
24454 + if (!search_exception_tables(regs->ip)) {
24455 + bad_area_nosemaphore(regs, error_code, address);
24456 + return;
24457 + }
24458 + if (address < PAX_USER_SHADOW_BASE) {
24459 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
24460 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
24461 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
24462 + } else
24463 + address -= PAX_USER_SHADOW_BASE;
24464 + }
24465 +#endif
24466 +
24467 + tsk = current;
24468 + mm = tsk->mm;
24469
24470 /*
24471 * Detect and handle instructions that would cause a page fault for
24472 @@ -1026,7 +1259,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24473 * User-mode registers count as a user access even for any
24474 * potential system fault or CPU buglet:
24475 */
24476 - if (user_mode_vm(regs)) {
24477 + if (user_mode(regs)) {
24478 local_irq_enable();
24479 error_code |= PF_USER;
24480 } else {
24481 @@ -1080,6 +1313,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24482 might_sleep();
24483 }
24484
24485 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24486 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
24487 + return;
24488 +#endif
24489 +
24490 vma = find_vma(mm, address);
24491 if (unlikely(!vma)) {
24492 bad_area(regs, error_code, address);
24493 @@ -1091,18 +1329,24 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24494 bad_area(regs, error_code, address);
24495 return;
24496 }
24497 - if (error_code & PF_USER) {
24498 - /*
24499 - * Accessing the stack below %sp is always a bug.
24500 - * The large cushion allows instructions like enter
24501 - * and pusha to work. ("enter $65535, $31" pushes
24502 - * 32 pointers and then decrements %sp by 65535.)
24503 - */
24504 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
24505 - bad_area(regs, error_code, address);
24506 - return;
24507 - }
24508 + /*
24509 + * Accessing the stack below %sp is always a bug.
24510 + * The large cushion allows instructions like enter
24511 + * and pusha to work. ("enter $65535, $31" pushes
24512 + * 32 pointers and then decrements %sp by 65535.)
24513 + */
24514 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
24515 + bad_area(regs, error_code, address);
24516 + return;
24517 }
24518 +
24519 +#ifdef CONFIG_PAX_SEGMEXEC
24520 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
24521 + bad_area(regs, error_code, address);
24522 + return;
24523 + }
24524 +#endif
24525 +
24526 if (unlikely(expand_stack(vma, address))) {
24527 bad_area(regs, error_code, address);
24528 return;
24529 @@ -1146,3 +1390,292 @@ good_area:
24530
24531 up_read(&mm->mmap_sem);
24532 }
24533 +
24534 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24535 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
24536 +{
24537 + struct mm_struct *mm = current->mm;
24538 + unsigned long ip = regs->ip;
24539 +
24540 + if (v8086_mode(regs))
24541 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24542 +
24543 +#ifdef CONFIG_PAX_PAGEEXEC
24544 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
24545 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
24546 + return true;
24547 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
24548 + return true;
24549 + return false;
24550 + }
24551 +#endif
24552 +
24553 +#ifdef CONFIG_PAX_SEGMEXEC
24554 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
24555 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
24556 + return true;
24557 + return false;
24558 + }
24559 +#endif
24560 +
24561 + return false;
24562 +}
24563 +#endif
24564 +
24565 +#ifdef CONFIG_PAX_EMUTRAMP
24566 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
24567 +{
24568 + int err;
24569 +
24570 + do { /* PaX: libffi trampoline emulation */
24571 + unsigned char mov, jmp;
24572 + unsigned int addr1, addr2;
24573 +
24574 +#ifdef CONFIG_X86_64
24575 + if ((regs->ip + 9) >> 32)
24576 + break;
24577 +#endif
24578 +
24579 + err = get_user(mov, (unsigned char __user *)regs->ip);
24580 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24581 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24582 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24583 +
24584 + if (err)
24585 + break;
24586 +
24587 + if (mov == 0xB8 && jmp == 0xE9) {
24588 + regs->ax = addr1;
24589 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24590 + return 2;
24591 + }
24592 + } while (0);
24593 +
24594 + do { /* PaX: gcc trampoline emulation #1 */
24595 + unsigned char mov1, mov2;
24596 + unsigned short jmp;
24597 + unsigned int addr1, addr2;
24598 +
24599 +#ifdef CONFIG_X86_64
24600 + if ((regs->ip + 11) >> 32)
24601 + break;
24602 +#endif
24603 +
24604 + err = get_user(mov1, (unsigned char __user *)regs->ip);
24605 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24606 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24607 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24608 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24609 +
24610 + if (err)
24611 + break;
24612 +
24613 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24614 + regs->cx = addr1;
24615 + regs->ax = addr2;
24616 + regs->ip = addr2;
24617 + return 2;
24618 + }
24619 + } while (0);
24620 +
24621 + do { /* PaX: gcc trampoline emulation #2 */
24622 + unsigned char mov, jmp;
24623 + unsigned int addr1, addr2;
24624 +
24625 +#ifdef CONFIG_X86_64
24626 + if ((regs->ip + 9) >> 32)
24627 + break;
24628 +#endif
24629 +
24630 + err = get_user(mov, (unsigned char __user *)regs->ip);
24631 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24632 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24633 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24634 +
24635 + if (err)
24636 + break;
24637 +
24638 + if (mov == 0xB9 && jmp == 0xE9) {
24639 + regs->cx = addr1;
24640 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24641 + return 2;
24642 + }
24643 + } while (0);
24644 +
24645 + return 1; /* PaX in action */
24646 +}
24647 +
24648 +#ifdef CONFIG_X86_64
24649 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24650 +{
24651 + int err;
24652 +
24653 + do { /* PaX: libffi trampoline emulation */
24654 + unsigned short mov1, mov2, jmp1;
24655 + unsigned char stcclc, jmp2;
24656 + unsigned long addr1, addr2;
24657 +
24658 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24659 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24660 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24661 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24662 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
24663 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
24664 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
24665 +
24666 + if (err)
24667 + break;
24668 +
24669 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24670 + regs->r11 = addr1;
24671 + regs->r10 = addr2;
24672 + if (stcclc == 0xF8)
24673 + regs->flags &= ~X86_EFLAGS_CF;
24674 + else
24675 + regs->flags |= X86_EFLAGS_CF;
24676 + regs->ip = addr1;
24677 + return 2;
24678 + }
24679 + } while (0);
24680 +
24681 + do { /* PaX: gcc trampoline emulation #1 */
24682 + unsigned short mov1, mov2, jmp1;
24683 + unsigned char jmp2;
24684 + unsigned int addr1;
24685 + unsigned long addr2;
24686 +
24687 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24688 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24689 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24690 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24691 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24692 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24693 +
24694 + if (err)
24695 + break;
24696 +
24697 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24698 + regs->r11 = addr1;
24699 + regs->r10 = addr2;
24700 + regs->ip = addr1;
24701 + return 2;
24702 + }
24703 + } while (0);
24704 +
24705 + do { /* PaX: gcc trampoline emulation #2 */
24706 + unsigned short mov1, mov2, jmp1;
24707 + unsigned char jmp2;
24708 + unsigned long addr1, addr2;
24709 +
24710 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24711 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24712 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24713 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24714 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24715 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24716 +
24717 + if (err)
24718 + break;
24719 +
24720 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24721 + regs->r11 = addr1;
24722 + regs->r10 = addr2;
24723 + regs->ip = addr1;
24724 + return 2;
24725 + }
24726 + } while (0);
24727 +
24728 + return 1; /* PaX in action */
24729 +}
24730 +#endif
24731 +
24732 +/*
24733 + * PaX: decide what to do with offenders (regs->ip = fault address)
24734 + *
24735 + * returns 1 when task should be killed
24736 + * 2 when gcc trampoline was detected
24737 + */
24738 +static int pax_handle_fetch_fault(struct pt_regs *regs)
24739 +{
24740 + if (v8086_mode(regs))
24741 + return 1;
24742 +
24743 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24744 + return 1;
24745 +
24746 +#ifdef CONFIG_X86_32
24747 + return pax_handle_fetch_fault_32(regs);
24748 +#else
24749 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24750 + return pax_handle_fetch_fault_32(regs);
24751 + else
24752 + return pax_handle_fetch_fault_64(regs);
24753 +#endif
24754 +}
24755 +#endif
24756 +
24757 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24758 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24759 +{
24760 + long i;
24761 +
24762 + printk(KERN_ERR "PAX: bytes at PC: ");
24763 + for (i = 0; i < 20; i++) {
24764 + unsigned char c;
24765 + if (get_user(c, (unsigned char __force_user *)pc+i))
24766 + printk(KERN_CONT "?? ");
24767 + else
24768 + printk(KERN_CONT "%02x ", c);
24769 + }
24770 + printk("\n");
24771 +
24772 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24773 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
24774 + unsigned long c;
24775 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
24776 +#ifdef CONFIG_X86_32
24777 + printk(KERN_CONT "???????? ");
24778 +#else
24779 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24780 + printk(KERN_CONT "???????? ???????? ");
24781 + else
24782 + printk(KERN_CONT "???????????????? ");
24783 +#endif
24784 + } else {
24785 +#ifdef CONFIG_X86_64
24786 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24787 + printk(KERN_CONT "%08x ", (unsigned int)c);
24788 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24789 + } else
24790 +#endif
24791 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24792 + }
24793 + }
24794 + printk("\n");
24795 +}
24796 +#endif
24797 +
24798 +/**
24799 + * probe_kernel_write(): safely attempt to write to a location
24800 + * @dst: address to write to
24801 + * @src: pointer to the data that shall be written
24802 + * @size: size of the data chunk
24803 + *
24804 + * Safely write to address @dst from the buffer at @src. If a kernel fault
24805 + * happens, handle that and return -EFAULT.
24806 + */
24807 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24808 +{
24809 + long ret;
24810 + mm_segment_t old_fs = get_fs();
24811 +
24812 + set_fs(KERNEL_DS);
24813 + pagefault_disable();
24814 + pax_open_kernel();
24815 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24816 + pax_close_kernel();
24817 + pagefault_enable();
24818 + set_fs(old_fs);
24819 +
24820 + return ret ? -EFAULT : 0;
24821 +}
24822 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24823 index 71da1bc..7a16bf4 100644
24824 --- a/arch/x86/mm/gup.c
24825 +++ b/arch/x86/mm/gup.c
24826 @@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24827 addr = start;
24828 len = (unsigned long) nr_pages << PAGE_SHIFT;
24829 end = start + len;
24830 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24831 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24832 (void __user *)start, len)))
24833 return 0;
24834
24835 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24836 index 63a6ba6..79abd7a 100644
24837 --- a/arch/x86/mm/highmem_32.c
24838 +++ b/arch/x86/mm/highmem_32.c
24839 @@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
24840 idx = type + KM_TYPE_NR*smp_processor_id();
24841 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24842 BUG_ON(!pte_none(*(kmap_pte-idx)));
24843 +
24844 + pax_open_kernel();
24845 set_pte(kmap_pte-idx, mk_pte(page, prot));
24846 + pax_close_kernel();
24847
24848 return (void *)vaddr;
24849 }
24850 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24851 index f46c3407..6ff9a26 100644
24852 --- a/arch/x86/mm/hugetlbpage.c
24853 +++ b/arch/x86/mm/hugetlbpage.c
24854 @@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24855 struct hstate *h = hstate_file(file);
24856 struct mm_struct *mm = current->mm;
24857 struct vm_area_struct *vma;
24858 - unsigned long start_addr;
24859 + unsigned long start_addr, pax_task_size = TASK_SIZE;
24860 +
24861 +#ifdef CONFIG_PAX_SEGMEXEC
24862 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24863 + pax_task_size = SEGMEXEC_TASK_SIZE;
24864 +#endif
24865 +
24866 + pax_task_size -= PAGE_SIZE;
24867
24868 if (len > mm->cached_hole_size) {
24869 - start_addr = mm->free_area_cache;
24870 + start_addr = mm->free_area_cache;
24871 } else {
24872 - start_addr = TASK_UNMAPPED_BASE;
24873 - mm->cached_hole_size = 0;
24874 + start_addr = mm->mmap_base;
24875 + mm->cached_hole_size = 0;
24876 }
24877
24878 full_search:
24879 @@ -281,26 +288,27 @@ full_search:
24880
24881 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24882 /* At this point: (!vma || addr < vma->vm_end). */
24883 - if (TASK_SIZE - len < addr) {
24884 + if (pax_task_size - len < addr) {
24885 /*
24886 * Start a new search - just in case we missed
24887 * some holes.
24888 */
24889 - if (start_addr != TASK_UNMAPPED_BASE) {
24890 - start_addr = TASK_UNMAPPED_BASE;
24891 + if (start_addr != mm->mmap_base) {
24892 + start_addr = mm->mmap_base;
24893 mm->cached_hole_size = 0;
24894 goto full_search;
24895 }
24896 return -ENOMEM;
24897 }
24898 - if (!vma || addr + len <= vma->vm_start) {
24899 - mm->free_area_cache = addr + len;
24900 - return addr;
24901 - }
24902 + if (check_heap_stack_gap(vma, addr, len))
24903 + break;
24904 if (addr + mm->cached_hole_size < vma->vm_start)
24905 mm->cached_hole_size = vma->vm_start - addr;
24906 addr = ALIGN(vma->vm_end, huge_page_size(h));
24907 }
24908 +
24909 + mm->free_area_cache = addr + len;
24910 + return addr;
24911 }
24912
24913 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24914 @@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24915 {
24916 struct hstate *h = hstate_file(file);
24917 struct mm_struct *mm = current->mm;
24918 - struct vm_area_struct *vma, *prev_vma;
24919 - unsigned long base = mm->mmap_base, addr = addr0;
24920 + struct vm_area_struct *vma;
24921 + unsigned long base = mm->mmap_base, addr;
24922 unsigned long largest_hole = mm->cached_hole_size;
24923 - int first_time = 1;
24924
24925 /* don't allow allocations above current base */
24926 if (mm->free_area_cache > base)
24927 @@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24928 largest_hole = 0;
24929 mm->free_area_cache = base;
24930 }
24931 -try_again:
24932 +
24933 /* make sure it can fit in the remaining address space */
24934 if (mm->free_area_cache < len)
24935 goto fail;
24936
24937 /* either no address requested or cant fit in requested address hole */
24938 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
24939 + addr = (mm->free_area_cache - len);
24940 do {
24941 + addr &= huge_page_mask(h);
24942 + vma = find_vma(mm, addr);
24943 /*
24944 * Lookup failure means no vma is above this address,
24945 * i.e. return with success:
24946 - */
24947 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
24948 - return addr;
24949 -
24950 - /*
24951 * new region fits between prev_vma->vm_end and
24952 * vma->vm_start, use it:
24953 */
24954 - if (addr + len <= vma->vm_start &&
24955 - (!prev_vma || (addr >= prev_vma->vm_end))) {
24956 + if (check_heap_stack_gap(vma, addr, len)) {
24957 /* remember the address as a hint for next time */
24958 - mm->cached_hole_size = largest_hole;
24959 - return (mm->free_area_cache = addr);
24960 - } else {
24961 - /* pull free_area_cache down to the first hole */
24962 - if (mm->free_area_cache == vma->vm_end) {
24963 - mm->free_area_cache = vma->vm_start;
24964 - mm->cached_hole_size = largest_hole;
24965 - }
24966 + mm->cached_hole_size = largest_hole;
24967 + return (mm->free_area_cache = addr);
24968 + }
24969 + /* pull free_area_cache down to the first hole */
24970 + if (mm->free_area_cache == vma->vm_end) {
24971 + mm->free_area_cache = vma->vm_start;
24972 + mm->cached_hole_size = largest_hole;
24973 }
24974
24975 /* remember the largest hole we saw so far */
24976 if (addr + largest_hole < vma->vm_start)
24977 - largest_hole = vma->vm_start - addr;
24978 + largest_hole = vma->vm_start - addr;
24979
24980 /* try just below the current vma->vm_start */
24981 - addr = (vma->vm_start - len) & huge_page_mask(h);
24982 - } while (len <= vma->vm_start);
24983 + addr = skip_heap_stack_gap(vma, len);
24984 + } while (!IS_ERR_VALUE(addr));
24985
24986 fail:
24987 /*
24988 - * if hint left us with no space for the requested
24989 - * mapping then try again:
24990 - */
24991 - if (first_time) {
24992 - mm->free_area_cache = base;
24993 - largest_hole = 0;
24994 - first_time = 0;
24995 - goto try_again;
24996 - }
24997 - /*
24998 * A failed mmap() very likely causes application failure,
24999 * so fall back to the bottom-up function here. This scenario
25000 * can happen with large stack limits and large mmap()
25001 * allocations.
25002 */
25003 - mm->free_area_cache = TASK_UNMAPPED_BASE;
25004 +
25005 +#ifdef CONFIG_PAX_SEGMEXEC
25006 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25007 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
25008 + else
25009 +#endif
25010 +
25011 + mm->mmap_base = TASK_UNMAPPED_BASE;
25012 +
25013 +#ifdef CONFIG_PAX_RANDMMAP
25014 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25015 + mm->mmap_base += mm->delta_mmap;
25016 +#endif
25017 +
25018 + mm->free_area_cache = mm->mmap_base;
25019 mm->cached_hole_size = ~0UL;
25020 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
25021 len, pgoff, flags);
25022 @@ -387,6 +393,7 @@ fail:
25023 /*
25024 * Restore the topdown base:
25025 */
25026 + mm->mmap_base = base;
25027 mm->free_area_cache = base;
25028 mm->cached_hole_size = ~0UL;
25029
25030 @@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
25031 struct hstate *h = hstate_file(file);
25032 struct mm_struct *mm = current->mm;
25033 struct vm_area_struct *vma;
25034 + unsigned long pax_task_size = TASK_SIZE;
25035
25036 if (len & ~huge_page_mask(h))
25037 return -EINVAL;
25038 - if (len > TASK_SIZE)
25039 +
25040 +#ifdef CONFIG_PAX_SEGMEXEC
25041 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25042 + pax_task_size = SEGMEXEC_TASK_SIZE;
25043 +#endif
25044 +
25045 + pax_task_size -= PAGE_SIZE;
25046 +
25047 + if (len > pax_task_size)
25048 return -ENOMEM;
25049
25050 if (flags & MAP_FIXED) {
25051 @@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
25052 if (addr) {
25053 addr = ALIGN(addr, huge_page_size(h));
25054 vma = find_vma(mm, addr);
25055 - if (TASK_SIZE - len >= addr &&
25056 - (!vma || addr + len <= vma->vm_start))
25057 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
25058 return addr;
25059 }
25060 if (mm->get_unmapped_area == arch_get_unmapped_area)
25061 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
25062 index 73ffd55..f61c2a7 100644
25063 --- a/arch/x86/mm/init.c
25064 +++ b/arch/x86/mm/init.c
25065 @@ -69,11 +69,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
25066 * cause a hotspot and fill up ZONE_DMA. The page tables
25067 * need roughly 0.5KB per GB.
25068 */
25069 -#ifdef CONFIG_X86_32
25070 - start = 0x7000;
25071 -#else
25072 - start = 0x8000;
25073 -#endif
25074 + start = 0x100000;
25075 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
25076 tables, PAGE_SIZE);
25077 if (e820_table_start == -1UL)
25078 @@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
25079 #endif
25080
25081 set_nx();
25082 - if (nx_enabled)
25083 + if (nx_enabled && cpu_has_nx)
25084 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
25085
25086 /* Enable PSE if available */
25087 @@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
25088 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
25089 * mmio resources as well as potential bios/acpi data regions.
25090 */
25091 +
25092 int devmem_is_allowed(unsigned long pagenr)
25093 {
25094 +#ifdef CONFIG_GRKERNSEC_KMEM
25095 + /* allow BDA */
25096 + if (!pagenr)
25097 + return 1;
25098 + /* allow EBDA */
25099 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
25100 + return 1;
25101 + /* allow ISA/video mem */
25102 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
25103 + return 1;
25104 + /* throw out everything else below 1MB */
25105 + if (pagenr <= 256)
25106 + return 0;
25107 +#else
25108 if (pagenr <= 256)
25109 return 1;
25110 +#endif
25111 +
25112 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
25113 return 0;
25114 if (!page_is_ram(pagenr))
25115 @@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
25116
25117 void free_initmem(void)
25118 {
25119 +
25120 +#ifdef CONFIG_PAX_KERNEXEC
25121 +#ifdef CONFIG_X86_32
25122 + /* PaX: limit KERNEL_CS to actual size */
25123 + unsigned long addr, limit;
25124 + struct desc_struct d;
25125 + int cpu;
25126 +
25127 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
25128 + limit = (limit - 1UL) >> PAGE_SHIFT;
25129 +
25130 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
25131 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
25132 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
25133 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
25134 + }
25135 +
25136 + /* PaX: make KERNEL_CS read-only */
25137 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
25138 + if (!paravirt_enabled())
25139 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
25140 +/*
25141 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
25142 + pgd = pgd_offset_k(addr);
25143 + pud = pud_offset(pgd, addr);
25144 + pmd = pmd_offset(pud, addr);
25145 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25146 + }
25147 +*/
25148 +#ifdef CONFIG_X86_PAE
25149 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
25150 +/*
25151 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
25152 + pgd = pgd_offset_k(addr);
25153 + pud = pud_offset(pgd, addr);
25154 + pmd = pmd_offset(pud, addr);
25155 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
25156 + }
25157 +*/
25158 +#endif
25159 +
25160 +#ifdef CONFIG_MODULES
25161 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
25162 +#endif
25163 +
25164 +#else
25165 + pgd_t *pgd;
25166 + pud_t *pud;
25167 + pmd_t *pmd;
25168 + unsigned long addr, end;
25169 +
25170 + /* PaX: make kernel code/rodata read-only, rest non-executable */
25171 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
25172 + pgd = pgd_offset_k(addr);
25173 + pud = pud_offset(pgd, addr);
25174 + pmd = pmd_offset(pud, addr);
25175 + if (!pmd_present(*pmd))
25176 + continue;
25177 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
25178 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25179 + else
25180 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
25181 + }
25182 +
25183 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
25184 + end = addr + KERNEL_IMAGE_SIZE;
25185 + for (; addr < end; addr += PMD_SIZE) {
25186 + pgd = pgd_offset_k(addr);
25187 + pud = pud_offset(pgd, addr);
25188 + pmd = pmd_offset(pud, addr);
25189 + if (!pmd_present(*pmd))
25190 + continue;
25191 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
25192 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25193 + }
25194 +#endif
25195 +
25196 + flush_tlb_all();
25197 +#endif
25198 +
25199 free_init_pages("unused kernel memory",
25200 (unsigned long)(&__init_begin),
25201 (unsigned long)(&__init_end));
25202 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
25203 index 30938c1..bda3d5d 100644
25204 --- a/arch/x86/mm/init_32.c
25205 +++ b/arch/x86/mm/init_32.c
25206 @@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
25207 }
25208
25209 /*
25210 - * Creates a middle page table and puts a pointer to it in the
25211 - * given global directory entry. This only returns the gd entry
25212 - * in non-PAE compilation mode, since the middle layer is folded.
25213 - */
25214 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
25215 -{
25216 - pud_t *pud;
25217 - pmd_t *pmd_table;
25218 -
25219 -#ifdef CONFIG_X86_PAE
25220 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
25221 - if (after_bootmem)
25222 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
25223 - else
25224 - pmd_table = (pmd_t *)alloc_low_page();
25225 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
25226 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
25227 - pud = pud_offset(pgd, 0);
25228 - BUG_ON(pmd_table != pmd_offset(pud, 0));
25229 -
25230 - return pmd_table;
25231 - }
25232 -#endif
25233 - pud = pud_offset(pgd, 0);
25234 - pmd_table = pmd_offset(pud, 0);
25235 -
25236 - return pmd_table;
25237 -}
25238 -
25239 -/*
25240 * Create a page table and place a pointer to it in a middle page
25241 * directory entry:
25242 */
25243 @@ -121,13 +91,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
25244 page_table = (pte_t *)alloc_low_page();
25245
25246 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
25247 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25248 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
25249 +#else
25250 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
25251 +#endif
25252 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
25253 }
25254
25255 return pte_offset_kernel(pmd, 0);
25256 }
25257
25258 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
25259 +{
25260 + pud_t *pud;
25261 + pmd_t *pmd_table;
25262 +
25263 + pud = pud_offset(pgd, 0);
25264 + pmd_table = pmd_offset(pud, 0);
25265 +
25266 + return pmd_table;
25267 +}
25268 +
25269 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
25270 {
25271 int pgd_idx = pgd_index(vaddr);
25272 @@ -201,6 +186,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25273 int pgd_idx, pmd_idx;
25274 unsigned long vaddr;
25275 pgd_t *pgd;
25276 + pud_t *pud;
25277 pmd_t *pmd;
25278 pte_t *pte = NULL;
25279
25280 @@ -210,8 +196,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25281 pgd = pgd_base + pgd_idx;
25282
25283 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
25284 - pmd = one_md_table_init(pgd);
25285 - pmd = pmd + pmd_index(vaddr);
25286 + pud = pud_offset(pgd, vaddr);
25287 + pmd = pmd_offset(pud, vaddr);
25288 +
25289 +#ifdef CONFIG_X86_PAE
25290 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25291 +#endif
25292 +
25293 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
25294 pmd++, pmd_idx++) {
25295 pte = page_table_kmap_check(one_page_table_init(pmd),
25296 @@ -223,11 +214,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25297 }
25298 }
25299
25300 -static inline int is_kernel_text(unsigned long addr)
25301 +static inline int is_kernel_text(unsigned long start, unsigned long end)
25302 {
25303 - if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
25304 - return 1;
25305 - return 0;
25306 + if ((start > ktla_ktva((unsigned long)_etext) ||
25307 + end <= ktla_ktva((unsigned long)_stext)) &&
25308 + (start > ktla_ktva((unsigned long)_einittext) ||
25309 + end <= ktla_ktva((unsigned long)_sinittext)) &&
25310 +
25311 +#ifdef CONFIG_ACPI_SLEEP
25312 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
25313 +#endif
25314 +
25315 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
25316 + return 0;
25317 + return 1;
25318 }
25319
25320 /*
25321 @@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned long start,
25322 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
25323 unsigned long start_pfn, end_pfn;
25324 pgd_t *pgd_base = swapper_pg_dir;
25325 - int pgd_idx, pmd_idx, pte_ofs;
25326 + unsigned int pgd_idx, pmd_idx, pte_ofs;
25327 unsigned long pfn;
25328 pgd_t *pgd;
25329 + pud_t *pud;
25330 pmd_t *pmd;
25331 pte_t *pte;
25332 unsigned pages_2m, pages_4k;
25333 @@ -278,8 +279,13 @@ repeat:
25334 pfn = start_pfn;
25335 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25336 pgd = pgd_base + pgd_idx;
25337 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
25338 - pmd = one_md_table_init(pgd);
25339 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
25340 + pud = pud_offset(pgd, 0);
25341 + pmd = pmd_offset(pud, 0);
25342 +
25343 +#ifdef CONFIG_X86_PAE
25344 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25345 +#endif
25346
25347 if (pfn >= end_pfn)
25348 continue;
25349 @@ -291,14 +297,13 @@ repeat:
25350 #endif
25351 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
25352 pmd++, pmd_idx++) {
25353 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
25354 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
25355
25356 /*
25357 * Map with big pages if possible, otherwise
25358 * create normal page tables:
25359 */
25360 if (use_pse) {
25361 - unsigned int addr2;
25362 pgprot_t prot = PAGE_KERNEL_LARGE;
25363 /*
25364 * first pass will use the same initial
25365 @@ -308,11 +313,7 @@ repeat:
25366 __pgprot(PTE_IDENT_ATTR |
25367 _PAGE_PSE);
25368
25369 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
25370 - PAGE_OFFSET + PAGE_SIZE-1;
25371 -
25372 - if (is_kernel_text(addr) ||
25373 - is_kernel_text(addr2))
25374 + if (is_kernel_text(address, address + PMD_SIZE))
25375 prot = PAGE_KERNEL_LARGE_EXEC;
25376
25377 pages_2m++;
25378 @@ -329,7 +330,7 @@ repeat:
25379 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25380 pte += pte_ofs;
25381 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
25382 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
25383 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
25384 pgprot_t prot = PAGE_KERNEL;
25385 /*
25386 * first pass will use the same initial
25387 @@ -337,7 +338,7 @@ repeat:
25388 */
25389 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
25390
25391 - if (is_kernel_text(addr))
25392 + if (is_kernel_text(address, address + PAGE_SIZE))
25393 prot = PAGE_KERNEL_EXEC;
25394
25395 pages_4k++;
25396 @@ -489,7 +490,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
25397
25398 pud = pud_offset(pgd, va);
25399 pmd = pmd_offset(pud, va);
25400 - if (!pmd_present(*pmd))
25401 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
25402 break;
25403
25404 pte = pte_offset_kernel(pmd, va);
25405 @@ -541,9 +542,7 @@ void __init early_ioremap_page_table_range_init(void)
25406
25407 static void __init pagetable_init(void)
25408 {
25409 - pgd_t *pgd_base = swapper_pg_dir;
25410 -
25411 - permanent_kmaps_init(pgd_base);
25412 + permanent_kmaps_init(swapper_pg_dir);
25413 }
25414
25415 #ifdef CONFIG_ACPI_SLEEP
25416 @@ -551,12 +550,12 @@ static void __init pagetable_init(void)
25417 * ACPI suspend needs this for resume, because things like the intel-agp
25418 * driver might have split up a kernel 4MB mapping.
25419 */
25420 -char swsusp_pg_dir[PAGE_SIZE]
25421 +pgd_t swsusp_pg_dir[PTRS_PER_PGD]
25422 __attribute__ ((aligned(PAGE_SIZE)));
25423
25424 static inline void save_pg_dir(void)
25425 {
25426 - memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
25427 + clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
25428 }
25429 #else /* !CONFIG_ACPI_SLEEP */
25430 static inline void save_pg_dir(void)
25431 @@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
25432 flush_tlb_all();
25433 }
25434
25435 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25436 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25437 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25438
25439 /* user-defined highmem size */
25440 @@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void)
25441 * Initialize the boot-time allocator (with low memory only):
25442 */
25443 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
25444 - bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25445 + bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25446 PAGE_SIZE);
25447 if (bootmap == -1L)
25448 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
25449 @@ -864,6 +863,12 @@ void __init mem_init(void)
25450
25451 pci_iommu_alloc();
25452
25453 +#ifdef CONFIG_PAX_PER_CPU_PGD
25454 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25455 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25456 + KERNEL_PGD_PTRS);
25457 +#endif
25458 +
25459 #ifdef CONFIG_FLATMEM
25460 BUG_ON(!mem_map);
25461 #endif
25462 @@ -881,7 +886,7 @@ void __init mem_init(void)
25463 set_highmem_pages_init();
25464
25465 codesize = (unsigned long) &_etext - (unsigned long) &_text;
25466 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
25467 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
25468 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
25469
25470 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
25471 @@ -923,10 +928,10 @@ void __init mem_init(void)
25472 ((unsigned long)&__init_end -
25473 (unsigned long)&__init_begin) >> 10,
25474
25475 - (unsigned long)&_etext, (unsigned long)&_edata,
25476 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
25477 + (unsigned long)&_sdata, (unsigned long)&_edata,
25478 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
25479
25480 - (unsigned long)&_text, (unsigned long)&_etext,
25481 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
25482 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
25483
25484 /*
25485 @@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
25486 if (!kernel_set_to_readonly)
25487 return;
25488
25489 + start = ktla_ktva(start);
25490 pr_debug("Set kernel text: %lx - %lx for read write\n",
25491 start, start+size);
25492
25493 @@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
25494 if (!kernel_set_to_readonly)
25495 return;
25496
25497 + start = ktla_ktva(start);
25498 pr_debug("Set kernel text: %lx - %lx for read only\n",
25499 start, start+size);
25500
25501 @@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
25502 unsigned long start = PFN_ALIGN(_text);
25503 unsigned long size = PFN_ALIGN(_etext) - start;
25504
25505 + start = ktla_ktva(start);
25506 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
25507 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
25508 size >> 10);
25509 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
25510 index 7d095ad..25d2549 100644
25511 --- a/arch/x86/mm/init_64.c
25512 +++ b/arch/x86/mm/init_64.c
25513 @@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
25514 pmd = fill_pmd(pud, vaddr);
25515 pte = fill_pte(pmd, vaddr);
25516
25517 + pax_open_kernel();
25518 set_pte(pte, new_pte);
25519 + pax_close_kernel();
25520
25521 /*
25522 * It's enough to flush this one mapping.
25523 @@ -223,14 +225,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
25524 pgd = pgd_offset_k((unsigned long)__va(phys));
25525 if (pgd_none(*pgd)) {
25526 pud = (pud_t *) spp_getpage();
25527 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25528 - _PAGE_USER));
25529 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25530 }
25531 pud = pud_offset(pgd, (unsigned long)__va(phys));
25532 if (pud_none(*pud)) {
25533 pmd = (pmd_t *) spp_getpage();
25534 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25535 - _PAGE_USER));
25536 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25537 }
25538 pmd = pmd_offset(pud, phys);
25539 BUG_ON(!pmd_none(*pmd));
25540 @@ -675,6 +675,12 @@ void __init mem_init(void)
25541
25542 pci_iommu_alloc();
25543
25544 +#ifdef CONFIG_PAX_PER_CPU_PGD
25545 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25546 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25547 + KERNEL_PGD_PTRS);
25548 +#endif
25549 +
25550 /* clear_bss() already clear the empty_zero_page */
25551
25552 reservedpages = 0;
25553 @@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
25554 static struct vm_area_struct gate_vma = {
25555 .vm_start = VSYSCALL_START,
25556 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25557 - .vm_page_prot = PAGE_READONLY_EXEC,
25558 - .vm_flags = VM_READ | VM_EXEC
25559 + .vm_page_prot = PAGE_READONLY,
25560 + .vm_flags = VM_READ
25561 };
25562
25563 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
25564 @@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long addr)
25565
25566 const char *arch_vma_name(struct vm_area_struct *vma)
25567 {
25568 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25569 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25570 return "[vdso]";
25571 if (vma == &gate_vma)
25572 return "[vsyscall]";
25573 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25574 index 84e236c..69bd3f6 100644
25575 --- a/arch/x86/mm/iomap_32.c
25576 +++ b/arch/x86/mm/iomap_32.c
25577 @@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
25578 debug_kmap_atomic(type);
25579 idx = type + KM_TYPE_NR * smp_processor_id();
25580 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25581 +
25582 + pax_open_kernel();
25583 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25584 + pax_close_kernel();
25585 +
25586 arch_flush_lazy_mmu_mode();
25587
25588 return (void *)vaddr;
25589 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25590 index 2feb9bd..ab91e7b 100644
25591 --- a/arch/x86/mm/ioremap.c
25592 +++ b/arch/x86/mm/ioremap.c
25593 @@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
25594 * Second special case: Some BIOSen report the PC BIOS
25595 * area (640->1Mb) as ram even though it is not.
25596 */
25597 - if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
25598 - pagenr < (BIOS_END >> PAGE_SHIFT))
25599 + if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
25600 + pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
25601 return 0;
25602
25603 for (i = 0; i < e820.nr_map; i++) {
25604 @@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25605 /*
25606 * Don't allow anybody to remap normal RAM that we're using..
25607 */
25608 - for (pfn = phys_addr >> PAGE_SHIFT;
25609 - (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
25610 - pfn++) {
25611 -
25612 + for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
25613 int is_ram = page_is_ram(pfn);
25614
25615 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25616 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25617 return NULL;
25618 WARN_ON_ONCE(is_ram);
25619 }
25620 @@ -378,6 +375,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
25621
25622 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
25623 if (page_is_ram(start >> PAGE_SHIFT))
25624 +#ifdef CONFIG_HIGHMEM
25625 + if ((start >> PAGE_SHIFT) < max_low_pfn)
25626 +#endif
25627 return __va(phys);
25628
25629 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
25630 @@ -407,7 +407,7 @@ static int __init early_ioremap_debug_setup(char *str)
25631 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25632
25633 static __initdata int after_paging_init;
25634 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25635 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25636
25637 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25638 {
25639 @@ -439,8 +439,7 @@ void __init early_ioremap_init(void)
25640 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25641
25642 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25643 - memset(bm_pte, 0, sizeof(bm_pte));
25644 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
25645 + pmd_populate_user(&init_mm, pmd, bm_pte);
25646
25647 /*
25648 * The boot-ioremap range spans multiple pmds, for which
25649 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25650 index 8cc1833..1abbc5b 100644
25651 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
25652 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25653 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25654 * memory (e.g. tracked pages)? For now, we need this to avoid
25655 * invoking kmemcheck for PnP BIOS calls.
25656 */
25657 - if (regs->flags & X86_VM_MASK)
25658 + if (v8086_mode(regs))
25659 return false;
25660 - if (regs->cs != __KERNEL_CS)
25661 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25662 return false;
25663
25664 pte = kmemcheck_pte_lookup(address);
25665 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25666 index c9e57af..07a321b 100644
25667 --- a/arch/x86/mm/mmap.c
25668 +++ b/arch/x86/mm/mmap.c
25669 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void)
25670 * Leave an at least ~128 MB hole with possible stack randomization.
25671 */
25672 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25673 -#define MAX_GAP (TASK_SIZE/6*5)
25674 +#define MAX_GAP (pax_task_size/6*5)
25675
25676 /*
25677 * True on X86_32 or when emulating IA32 on X86_64
25678 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
25679 return rnd << PAGE_SHIFT;
25680 }
25681
25682 -static unsigned long mmap_base(void)
25683 +static unsigned long mmap_base(struct mm_struct *mm)
25684 {
25685 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
25686 + unsigned long pax_task_size = TASK_SIZE;
25687 +
25688 +#ifdef CONFIG_PAX_SEGMEXEC
25689 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25690 + pax_task_size = SEGMEXEC_TASK_SIZE;
25691 +#endif
25692
25693 if (gap < MIN_GAP)
25694 gap = MIN_GAP;
25695 else if (gap > MAX_GAP)
25696 gap = MAX_GAP;
25697
25698 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25699 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25700 }
25701
25702 /*
25703 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25704 * does, but not when emulating X86_32
25705 */
25706 -static unsigned long mmap_legacy_base(void)
25707 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
25708 {
25709 - if (mmap_is_ia32())
25710 + if (mmap_is_ia32()) {
25711 +
25712 +#ifdef CONFIG_PAX_SEGMEXEC
25713 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25714 + return SEGMEXEC_TASK_UNMAPPED_BASE;
25715 + else
25716 +#endif
25717 +
25718 return TASK_UNMAPPED_BASE;
25719 - else
25720 + } else
25721 return TASK_UNMAPPED_BASE + mmap_rnd();
25722 }
25723
25724 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void)
25725 void arch_pick_mmap_layout(struct mm_struct *mm)
25726 {
25727 if (mmap_is_legacy()) {
25728 - mm->mmap_base = mmap_legacy_base();
25729 + mm->mmap_base = mmap_legacy_base(mm);
25730 +
25731 +#ifdef CONFIG_PAX_RANDMMAP
25732 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25733 + mm->mmap_base += mm->delta_mmap;
25734 +#endif
25735 +
25736 mm->get_unmapped_area = arch_get_unmapped_area;
25737 mm->unmap_area = arch_unmap_area;
25738 } else {
25739 - mm->mmap_base = mmap_base();
25740 + mm->mmap_base = mmap_base(mm);
25741 +
25742 +#ifdef CONFIG_PAX_RANDMMAP
25743 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25744 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25745 +#endif
25746 +
25747 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25748 mm->unmap_area = arch_unmap_area_topdown;
25749 }
25750 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25751 index 132772a..b961f11 100644
25752 --- a/arch/x86/mm/mmio-mod.c
25753 +++ b/arch/x86/mm/mmio-mod.c
25754 @@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25755 break;
25756 default:
25757 {
25758 - unsigned char *ip = (unsigned char *)instptr;
25759 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25760 my_trace->opcode = MMIO_UNKNOWN_OP;
25761 my_trace->width = 0;
25762 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25763 @@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25764 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25765 void __iomem *addr)
25766 {
25767 - static atomic_t next_id;
25768 + static atomic_unchecked_t next_id;
25769 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25770 /* These are page-unaligned. */
25771 struct mmiotrace_map map = {
25772 @@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25773 .private = trace
25774 },
25775 .phys = offset,
25776 - .id = atomic_inc_return(&next_id)
25777 + .id = atomic_inc_return_unchecked(&next_id)
25778 };
25779 map.map_id = trace->id;
25780
25781 diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
25782 index d253006..e56dd6a 100644
25783 --- a/arch/x86/mm/numa_32.c
25784 +++ b/arch/x86/mm/numa_32.c
25785 @@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
25786 }
25787 #endif
25788
25789 -extern unsigned long find_max_low_pfn(void);
25790 extern unsigned long highend_pfn, highstart_pfn;
25791
25792 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
25793 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25794 index e1d1069..2251ff3 100644
25795 --- a/arch/x86/mm/pageattr-test.c
25796 +++ b/arch/x86/mm/pageattr-test.c
25797 @@ -36,7 +36,7 @@ enum {
25798
25799 static int pte_testbit(pte_t pte)
25800 {
25801 - return pte_flags(pte) & _PAGE_UNUSED1;
25802 + return pte_flags(pte) & _PAGE_CPA_TEST;
25803 }
25804
25805 struct split_state {
25806 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25807 index dd38bfb..b72c63e 100644
25808 --- a/arch/x86/mm/pageattr.c
25809 +++ b/arch/x86/mm/pageattr.c
25810 @@ -261,16 +261,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25811 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
25812 */
25813 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25814 - pgprot_val(forbidden) |= _PAGE_NX;
25815 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25816
25817 /*
25818 * The kernel text needs to be executable for obvious reasons
25819 * Does not cover __inittext since that is gone later on. On
25820 * 64bit we do not enforce !NX on the low mapping
25821 */
25822 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
25823 - pgprot_val(forbidden) |= _PAGE_NX;
25824 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25825 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25826
25827 +#ifdef CONFIG_DEBUG_RODATA
25828 /*
25829 * The .rodata section needs to be read-only. Using the pfn
25830 * catches all aliases.
25831 @@ -278,6 +279,14 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25832 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25833 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25834 pgprot_val(forbidden) |= _PAGE_RW;
25835 +#endif
25836 +
25837 +#ifdef CONFIG_PAX_KERNEXEC
25838 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25839 + pgprot_val(forbidden) |= _PAGE_RW;
25840 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25841 + }
25842 +#endif
25843
25844 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25845
25846 @@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25847 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25848 {
25849 /* change init_mm */
25850 + pax_open_kernel();
25851 set_pte_atomic(kpte, pte);
25852 +
25853 #ifdef CONFIG_X86_32
25854 if (!SHARED_KERNEL_PMD) {
25855 +
25856 +#ifdef CONFIG_PAX_PER_CPU_PGD
25857 + unsigned long cpu;
25858 +#else
25859 struct page *page;
25860 +#endif
25861
25862 +#ifdef CONFIG_PAX_PER_CPU_PGD
25863 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25864 + pgd_t *pgd = get_cpu_pgd(cpu);
25865 +#else
25866 list_for_each_entry(page, &pgd_list, lru) {
25867 - pgd_t *pgd;
25868 + pgd_t *pgd = (pgd_t *)page_address(page);
25869 +#endif
25870 +
25871 pud_t *pud;
25872 pmd_t *pmd;
25873
25874 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
25875 + pgd += pgd_index(address);
25876 pud = pud_offset(pgd, address);
25877 pmd = pmd_offset(pud, address);
25878 set_pte_atomic((pte_t *)pmd, pte);
25879 }
25880 }
25881 #endif
25882 + pax_close_kernel();
25883 }
25884
25885 static int
25886 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25887 index e78cd0e..de0a817 100644
25888 --- a/arch/x86/mm/pat.c
25889 +++ b/arch/x86/mm/pat.c
25890 @@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
25891
25892 conflict:
25893 printk(KERN_INFO "%s:%d conflicting memory types "
25894 - "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
25895 + "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
25896 new->end, cattr_name(new->type), cattr_name(entry->type));
25897 return -EBUSY;
25898 }
25899 @@ -559,7 +559,7 @@ unlock_ret:
25900
25901 if (err) {
25902 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25903 - current->comm, current->pid, start, end);
25904 + current->comm, task_pid_nr(current), start, end);
25905 }
25906
25907 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
25908 @@ -689,8 +689,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25909 while (cursor < to) {
25910 if (!devmem_is_allowed(pfn)) {
25911 printk(KERN_INFO
25912 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25913 - current->comm, from, to);
25914 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25915 + current->comm, from, to, cursor);
25916 return 0;
25917 }
25918 cursor += PAGE_SIZE;
25919 @@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25920 printk(KERN_INFO
25921 "%s:%d ioremap_change_attr failed %s "
25922 "for %Lx-%Lx\n",
25923 - current->comm, current->pid,
25924 + current->comm, task_pid_nr(current),
25925 cattr_name(flags),
25926 base, (unsigned long long)(base + size));
25927 return -EINVAL;
25928 @@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25929 free_memtype(paddr, paddr + size);
25930 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25931 " for %Lx-%Lx, got %s\n",
25932 - current->comm, current->pid,
25933 + current->comm, task_pid_nr(current),
25934 cattr_name(want_flags),
25935 (unsigned long long)paddr,
25936 (unsigned long long)(paddr + size),
25937 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25938 index df3d5c8..c2223e1 100644
25939 --- a/arch/x86/mm/pf_in.c
25940 +++ b/arch/x86/mm/pf_in.c
25941 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25942 int i;
25943 enum reason_type rv = OTHERS;
25944
25945 - p = (unsigned char *)ins_addr;
25946 + p = (unsigned char *)ktla_ktva(ins_addr);
25947 p += skip_prefix(p, &prf);
25948 p += get_opcode(p, &opcode);
25949
25950 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25951 struct prefix_bits prf;
25952 int i;
25953
25954 - p = (unsigned char *)ins_addr;
25955 + p = (unsigned char *)ktla_ktva(ins_addr);
25956 p += skip_prefix(p, &prf);
25957 p += get_opcode(p, &opcode);
25958
25959 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25960 struct prefix_bits prf;
25961 int i;
25962
25963 - p = (unsigned char *)ins_addr;
25964 + p = (unsigned char *)ktla_ktva(ins_addr);
25965 p += skip_prefix(p, &prf);
25966 p += get_opcode(p, &opcode);
25967
25968 @@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25969 int i;
25970 unsigned long rv;
25971
25972 - p = (unsigned char *)ins_addr;
25973 + p = (unsigned char *)ktla_ktva(ins_addr);
25974 p += skip_prefix(p, &prf);
25975 p += get_opcode(p, &opcode);
25976 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25977 @@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25978 int i;
25979 unsigned long rv;
25980
25981 - p = (unsigned char *)ins_addr;
25982 + p = (unsigned char *)ktla_ktva(ins_addr);
25983 p += skip_prefix(p, &prf);
25984 p += get_opcode(p, &opcode);
25985 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25986 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25987 index e0e6fad..c56b495 100644
25988 --- a/arch/x86/mm/pgtable.c
25989 +++ b/arch/x86/mm/pgtable.c
25990 @@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *pgd)
25991 list_del(&page->lru);
25992 }
25993
25994 -#define UNSHARED_PTRS_PER_PGD \
25995 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25996 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25997 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25998
25999 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
26000 +{
26001 + while (count--)
26002 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
26003 +}
26004 +#endif
26005 +
26006 +#ifdef CONFIG_PAX_PER_CPU_PGD
26007 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
26008 +{
26009 + while (count--)
26010 +
26011 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26012 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
26013 +#else
26014 + *dst++ = *src++;
26015 +#endif
26016 +
26017 +}
26018 +#endif
26019 +
26020 +#ifdef CONFIG_X86_64
26021 +#define pxd_t pud_t
26022 +#define pyd_t pgd_t
26023 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
26024 +#define pxd_free(mm, pud) pud_free((mm), (pud))
26025 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
26026 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
26027 +#define PYD_SIZE PGDIR_SIZE
26028 +#else
26029 +#define pxd_t pmd_t
26030 +#define pyd_t pud_t
26031 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
26032 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
26033 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
26034 +#define pyd_offset(mm, address) pud_offset((mm), (address))
26035 +#define PYD_SIZE PUD_SIZE
26036 +#endif
26037 +
26038 +#ifdef CONFIG_PAX_PER_CPU_PGD
26039 +static inline void pgd_ctor(pgd_t *pgd) {}
26040 +static inline void pgd_dtor(pgd_t *pgd) {}
26041 +#else
26042 static void pgd_ctor(pgd_t *pgd)
26043 {
26044 /* If the pgd points to a shared pagetable level (either the
26045 @@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
26046 pgd_list_del(pgd);
26047 spin_unlock_irqrestore(&pgd_lock, flags);
26048 }
26049 +#endif
26050
26051 /*
26052 * List of all pgd's needed for non-PAE so it can invalidate entries
26053 @@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
26054 * -- wli
26055 */
26056
26057 -#ifdef CONFIG_X86_PAE
26058 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26059 /*
26060 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
26061 * updating the top-level pagetable entries to guarantee the
26062 @@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
26063 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
26064 * and initialize the kernel pmds here.
26065 */
26066 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
26067 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
26068
26069 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
26070 {
26071 @@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
26072 */
26073 flush_tlb_mm(mm);
26074 }
26075 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
26076 +#define PREALLOCATED_PXDS USER_PGD_PTRS
26077 #else /* !CONFIG_X86_PAE */
26078
26079 /* No need to prepopulate any pagetable entries in non-PAE modes. */
26080 -#define PREALLOCATED_PMDS 0
26081 +#define PREALLOCATED_PXDS 0
26082
26083 #endif /* CONFIG_X86_PAE */
26084
26085 -static void free_pmds(pmd_t *pmds[])
26086 +static void free_pxds(pxd_t *pxds[])
26087 {
26088 int i;
26089
26090 - for(i = 0; i < PREALLOCATED_PMDS; i++)
26091 - if (pmds[i])
26092 - free_page((unsigned long)pmds[i]);
26093 + for(i = 0; i < PREALLOCATED_PXDS; i++)
26094 + if (pxds[i])
26095 + free_page((unsigned long)pxds[i]);
26096 }
26097
26098 -static int preallocate_pmds(pmd_t *pmds[])
26099 +static int preallocate_pxds(pxd_t *pxds[])
26100 {
26101 int i;
26102 bool failed = false;
26103
26104 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
26105 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
26106 - if (pmd == NULL)
26107 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
26108 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
26109 + if (pxd == NULL)
26110 failed = true;
26111 - pmds[i] = pmd;
26112 + pxds[i] = pxd;
26113 }
26114
26115 if (failed) {
26116 - free_pmds(pmds);
26117 + free_pxds(pxds);
26118 return -ENOMEM;
26119 }
26120
26121 @@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[])
26122 * preallocate which never got a corresponding vma will need to be
26123 * freed manually.
26124 */
26125 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
26126 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
26127 {
26128 int i;
26129
26130 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
26131 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
26132 pgd_t pgd = pgdp[i];
26133
26134 if (pgd_val(pgd) != 0) {
26135 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
26136 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
26137
26138 - pgdp[i] = native_make_pgd(0);
26139 + set_pgd(pgdp + i, native_make_pgd(0));
26140
26141 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
26142 - pmd_free(mm, pmd);
26143 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
26144 + pxd_free(mm, pxd);
26145 }
26146 }
26147 }
26148
26149 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
26150 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
26151 {
26152 - pud_t *pud;
26153 + pyd_t *pyd;
26154 unsigned long addr;
26155 int i;
26156
26157 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
26158 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
26159 return;
26160
26161 - pud = pud_offset(pgd, 0);
26162 +#ifdef CONFIG_X86_64
26163 + pyd = pyd_offset(mm, 0L);
26164 +#else
26165 + pyd = pyd_offset(pgd, 0L);
26166 +#endif
26167
26168 - for (addr = i = 0; i < PREALLOCATED_PMDS;
26169 - i++, pud++, addr += PUD_SIZE) {
26170 - pmd_t *pmd = pmds[i];
26171 + for (addr = i = 0; i < PREALLOCATED_PXDS;
26172 + i++, pyd++, addr += PYD_SIZE) {
26173 + pxd_t *pxd = pxds[i];
26174
26175 if (i >= KERNEL_PGD_BOUNDARY)
26176 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
26177 - sizeof(pmd_t) * PTRS_PER_PMD);
26178 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
26179 + sizeof(pxd_t) * PTRS_PER_PMD);
26180
26181 - pud_populate(mm, pud, pmd);
26182 + pyd_populate(mm, pyd, pxd);
26183 }
26184 }
26185
26186 pgd_t *pgd_alloc(struct mm_struct *mm)
26187 {
26188 pgd_t *pgd;
26189 - pmd_t *pmds[PREALLOCATED_PMDS];
26190 + pxd_t *pxds[PREALLOCATED_PXDS];
26191 +
26192 unsigned long flags;
26193
26194 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
26195 @@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
26196
26197 mm->pgd = pgd;
26198
26199 - if (preallocate_pmds(pmds) != 0)
26200 + if (preallocate_pxds(pxds) != 0)
26201 goto out_free_pgd;
26202
26203 if (paravirt_pgd_alloc(mm) != 0)
26204 - goto out_free_pmds;
26205 + goto out_free_pxds;
26206
26207 /*
26208 * Make sure that pre-populating the pmds is atomic with
26209 @@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
26210 spin_lock_irqsave(&pgd_lock, flags);
26211
26212 pgd_ctor(pgd);
26213 - pgd_prepopulate_pmd(mm, pgd, pmds);
26214 + pgd_prepopulate_pxd(mm, pgd, pxds);
26215
26216 spin_unlock_irqrestore(&pgd_lock, flags);
26217
26218 return pgd;
26219
26220 -out_free_pmds:
26221 - free_pmds(pmds);
26222 +out_free_pxds:
26223 + free_pxds(pxds);
26224 out_free_pgd:
26225 free_page((unsigned long)pgd);
26226 out:
26227 @@ -287,7 +338,7 @@ out:
26228
26229 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
26230 {
26231 - pgd_mop_up_pmds(mm, pgd);
26232 + pgd_mop_up_pxds(mm, pgd);
26233 pgd_dtor(pgd);
26234 paravirt_pgd_free(mm, pgd);
26235 free_page((unsigned long)pgd);
26236 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
26237 index 46c8834..fcab43d 100644
26238 --- a/arch/x86/mm/pgtable_32.c
26239 +++ b/arch/x86/mm/pgtable_32.c
26240 @@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
26241 return;
26242 }
26243 pte = pte_offset_kernel(pmd, vaddr);
26244 +
26245 + pax_open_kernel();
26246 if (pte_val(pteval))
26247 set_pte_at(&init_mm, vaddr, pte, pteval);
26248 else
26249 pte_clear(&init_mm, vaddr, pte);
26250 + pax_close_kernel();
26251
26252 /*
26253 * It's enough to flush this one mapping.
26254 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
26255 index 513d8ed..978c161 100644
26256 --- a/arch/x86/mm/setup_nx.c
26257 +++ b/arch/x86/mm/setup_nx.c
26258 @@ -4,11 +4,10 @@
26259
26260 #include <asm/pgtable.h>
26261
26262 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26263 int nx_enabled;
26264
26265 -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26266 -static int disable_nx __cpuinitdata;
26267 -
26268 +#ifndef CONFIG_PAX_PAGEEXEC
26269 /*
26270 * noexec = on|off
26271 *
26272 @@ -22,32 +21,26 @@ static int __init noexec_setup(char *str)
26273 if (!str)
26274 return -EINVAL;
26275 if (!strncmp(str, "on", 2)) {
26276 - __supported_pte_mask |= _PAGE_NX;
26277 - disable_nx = 0;
26278 + nx_enabled = 1;
26279 } else if (!strncmp(str, "off", 3)) {
26280 - disable_nx = 1;
26281 - __supported_pte_mask &= ~_PAGE_NX;
26282 + nx_enabled = 0;
26283 }
26284 return 0;
26285 }
26286 early_param("noexec", noexec_setup);
26287 #endif
26288 +#endif
26289
26290 #ifdef CONFIG_X86_PAE
26291 void __init set_nx(void)
26292 {
26293 - unsigned int v[4], l, h;
26294 + if (!nx_enabled && cpu_has_nx) {
26295 + unsigned l, h;
26296
26297 - if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
26298 - cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
26299 -
26300 - if ((v[3] & (1 << 20)) && !disable_nx) {
26301 - rdmsr(MSR_EFER, l, h);
26302 - l |= EFER_NX;
26303 - wrmsr(MSR_EFER, l, h);
26304 - nx_enabled = 1;
26305 - __supported_pte_mask |= _PAGE_NX;
26306 - }
26307 + __supported_pte_mask &= ~_PAGE_NX;
26308 + rdmsr(MSR_EFER, l, h);
26309 + l &= ~EFER_NX;
26310 + wrmsr(MSR_EFER, l, h);
26311 }
26312 }
26313 #else
26314 @@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
26315 unsigned long efer;
26316
26317 rdmsrl(MSR_EFER, efer);
26318 - if (!(efer & EFER_NX) || disable_nx)
26319 + if (!(efer & EFER_NX) || !nx_enabled)
26320 __supported_pte_mask &= ~_PAGE_NX;
26321 }
26322 #endif
26323 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
26324 index 36fe08e..b123d3a 100644
26325 --- a/arch/x86/mm/tlb.c
26326 +++ b/arch/x86/mm/tlb.c
26327 @@ -61,7 +61,11 @@ void leave_mm(int cpu)
26328 BUG();
26329 cpumask_clear_cpu(cpu,
26330 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
26331 +
26332 +#ifndef CONFIG_PAX_PER_CPU_PGD
26333 load_cr3(swapper_pg_dir);
26334 +#endif
26335 +
26336 }
26337 EXPORT_SYMBOL_GPL(leave_mm);
26338
26339 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26340 index 829edf0..672adb3 100644
26341 --- a/arch/x86/oprofile/backtrace.c
26342 +++ b/arch/x86/oprofile/backtrace.c
26343 @@ -115,7 +115,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
26344 {
26345 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
26346
26347 - if (!user_mode_vm(regs)) {
26348 + if (!user_mode(regs)) {
26349 unsigned long stack = kernel_stack_pointer(regs);
26350 if (depth)
26351 dump_trace(NULL, regs, (unsigned long *)stack, 0,
26352 diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
26353 index e6a160a..36deff6 100644
26354 --- a/arch/x86/oprofile/op_model_p4.c
26355 +++ b/arch/x86/oprofile/op_model_p4.c
26356 @@ -50,7 +50,7 @@ static inline void setup_num_counters(void)
26357 #endif
26358 }
26359
26360 -static int inline addr_increment(void)
26361 +static inline int addr_increment(void)
26362 {
26363 #ifdef CONFIG_SMP
26364 return smp_num_siblings == 2 ? 2 : 1;
26365 diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
26366 index 1331fcf..03901b2 100644
26367 --- a/arch/x86/pci/common.c
26368 +++ b/arch/x86/pci/common.c
26369 @@ -31,8 +31,8 @@ int noioapicreroute = 1;
26370 int pcibios_last_bus = -1;
26371 unsigned long pirq_table_addr;
26372 struct pci_bus *pci_root_bus;
26373 -struct pci_raw_ops *raw_pci_ops;
26374 -struct pci_raw_ops *raw_pci_ext_ops;
26375 +const struct pci_raw_ops *raw_pci_ops;
26376 +const struct pci_raw_ops *raw_pci_ext_ops;
26377
26378 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
26379 int reg, int len, u32 *val)
26380 diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c
26381 index 347d882..4baf6b6 100644
26382 --- a/arch/x86/pci/direct.c
26383 +++ b/arch/x86/pci/direct.c
26384 @@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
26385
26386 #undef PCI_CONF1_ADDRESS
26387
26388 -struct pci_raw_ops pci_direct_conf1 = {
26389 +const struct pci_raw_ops pci_direct_conf1 = {
26390 .read = pci_conf1_read,
26391 .write = pci_conf1_write,
26392 };
26393 @@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
26394
26395 #undef PCI_CONF2_ADDRESS
26396
26397 -struct pci_raw_ops pci_direct_conf2 = {
26398 +const struct pci_raw_ops pci_direct_conf2 = {
26399 .read = pci_conf2_read,
26400 .write = pci_conf2_write,
26401 };
26402 @@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
26403 * This should be close to trivial, but it isn't, because there are buggy
26404 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
26405 */
26406 -static int __init pci_sanity_check(struct pci_raw_ops *o)
26407 +static int __init pci_sanity_check(const struct pci_raw_ops *o)
26408 {
26409 u32 x = 0;
26410 int year, devfn;
26411 diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
26412 index f10a7e9..0425342 100644
26413 --- a/arch/x86/pci/mmconfig_32.c
26414 +++ b/arch/x86/pci/mmconfig_32.c
26415 @@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
26416 return 0;
26417 }
26418
26419 -static struct pci_raw_ops pci_mmcfg = {
26420 +static const struct pci_raw_ops pci_mmcfg = {
26421 .read = pci_mmcfg_read,
26422 .write = pci_mmcfg_write,
26423 };
26424 diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
26425 index 94349f8..41600a7 100644
26426 --- a/arch/x86/pci/mmconfig_64.c
26427 +++ b/arch/x86/pci/mmconfig_64.c
26428 @@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
26429 return 0;
26430 }
26431
26432 -static struct pci_raw_ops pci_mmcfg = {
26433 +static const struct pci_raw_ops pci_mmcfg = {
26434 .read = pci_mmcfg_read,
26435 .write = pci_mmcfg_write,
26436 };
26437 diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
26438 index 8eb295e..86bd657 100644
26439 --- a/arch/x86/pci/numaq_32.c
26440 +++ b/arch/x86/pci/numaq_32.c
26441 @@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus,
26442
26443 #undef PCI_CONF1_MQ_ADDRESS
26444
26445 -static struct pci_raw_ops pci_direct_conf1_mq = {
26446 +static const struct pci_raw_ops pci_direct_conf1_mq = {
26447 .read = pci_conf1_mq_read,
26448 .write = pci_conf1_mq_write
26449 };
26450 diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c
26451 index b889d82..5a58a0a 100644
26452 --- a/arch/x86/pci/olpc.c
26453 +++ b/arch/x86/pci/olpc.c
26454 @@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int seg, unsigned int bus,
26455 return 0;
26456 }
26457
26458 -static struct pci_raw_ops pci_olpc_conf = {
26459 +static const struct pci_raw_ops pci_olpc_conf = {
26460 .read = pci_olpc_read,
26461 .write = pci_olpc_write,
26462 };
26463 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26464 index 1c975cc..b8e16c2 100644
26465 --- a/arch/x86/pci/pcbios.c
26466 +++ b/arch/x86/pci/pcbios.c
26467 @@ -56,50 +56,93 @@ union bios32 {
26468 static struct {
26469 unsigned long address;
26470 unsigned short segment;
26471 -} bios32_indirect = { 0, __KERNEL_CS };
26472 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26473
26474 /*
26475 * Returns the entry point for the given service, NULL on error
26476 */
26477
26478 -static unsigned long bios32_service(unsigned long service)
26479 +static unsigned long __devinit bios32_service(unsigned long service)
26480 {
26481 unsigned char return_code; /* %al */
26482 unsigned long address; /* %ebx */
26483 unsigned long length; /* %ecx */
26484 unsigned long entry; /* %edx */
26485 unsigned long flags;
26486 + struct desc_struct d, *gdt;
26487
26488 local_irq_save(flags);
26489 - __asm__("lcall *(%%edi); cld"
26490 +
26491 + gdt = get_cpu_gdt_table(smp_processor_id());
26492 +
26493 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26494 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26495 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26496 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26497 +
26498 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26499 : "=a" (return_code),
26500 "=b" (address),
26501 "=c" (length),
26502 "=d" (entry)
26503 : "0" (service),
26504 "1" (0),
26505 - "D" (&bios32_indirect));
26506 + "D" (&bios32_indirect),
26507 + "r"(__PCIBIOS_DS)
26508 + : "memory");
26509 +
26510 + pax_open_kernel();
26511 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26512 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26513 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26514 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26515 + pax_close_kernel();
26516 +
26517 local_irq_restore(flags);
26518
26519 switch (return_code) {
26520 - case 0:
26521 - return address + entry;
26522 - case 0x80: /* Not present */
26523 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26524 - return 0;
26525 - default: /* Shouldn't happen */
26526 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26527 - service, return_code);
26528 + case 0: {
26529 + int cpu;
26530 + unsigned char flags;
26531 +
26532 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26533 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26534 + printk(KERN_WARNING "bios32_service: not valid\n");
26535 return 0;
26536 + }
26537 + address = address + PAGE_OFFSET;
26538 + length += 16UL; /* some BIOSs underreport this... */
26539 + flags = 4;
26540 + if (length >= 64*1024*1024) {
26541 + length >>= PAGE_SHIFT;
26542 + flags |= 8;
26543 + }
26544 +
26545 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26546 + gdt = get_cpu_gdt_table(cpu);
26547 + pack_descriptor(&d, address, length, 0x9b, flags);
26548 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26549 + pack_descriptor(&d, address, length, 0x93, flags);
26550 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26551 + }
26552 + return entry;
26553 + }
26554 + case 0x80: /* Not present */
26555 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26556 + return 0;
26557 + default: /* Shouldn't happen */
26558 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26559 + service, return_code);
26560 + return 0;
26561 }
26562 }
26563
26564 static struct {
26565 unsigned long address;
26566 unsigned short segment;
26567 -} pci_indirect = { 0, __KERNEL_CS };
26568 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26569
26570 -static int pci_bios_present;
26571 +static int pci_bios_present __read_only;
26572
26573 static int __devinit check_pcibios(void)
26574 {
26575 @@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
26576 unsigned long flags, pcibios_entry;
26577
26578 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26579 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26580 + pci_indirect.address = pcibios_entry;
26581
26582 local_irq_save(flags);
26583 - __asm__(
26584 - "lcall *(%%edi); cld\n\t"
26585 + __asm__("movw %w6, %%ds\n\t"
26586 + "lcall *%%ss:(%%edi); cld\n\t"
26587 + "push %%ss\n\t"
26588 + "pop %%ds\n\t"
26589 "jc 1f\n\t"
26590 "xor %%ah, %%ah\n"
26591 "1:"
26592 @@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
26593 "=b" (ebx),
26594 "=c" (ecx)
26595 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26596 - "D" (&pci_indirect)
26597 + "D" (&pci_indirect),
26598 + "r" (__PCIBIOS_DS)
26599 : "memory");
26600 local_irq_restore(flags);
26601
26602 @@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26603
26604 switch (len) {
26605 case 1:
26606 - __asm__("lcall *(%%esi); cld\n\t"
26607 + __asm__("movw %w6, %%ds\n\t"
26608 + "lcall *%%ss:(%%esi); cld\n\t"
26609 + "push %%ss\n\t"
26610 + "pop %%ds\n\t"
26611 "jc 1f\n\t"
26612 "xor %%ah, %%ah\n"
26613 "1:"
26614 @@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26615 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26616 "b" (bx),
26617 "D" ((long)reg),
26618 - "S" (&pci_indirect));
26619 + "S" (&pci_indirect),
26620 + "r" (__PCIBIOS_DS));
26621 /*
26622 * Zero-extend the result beyond 8 bits, do not trust the
26623 * BIOS having done it:
26624 @@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26625 *value &= 0xff;
26626 break;
26627 case 2:
26628 - __asm__("lcall *(%%esi); cld\n\t"
26629 + __asm__("movw %w6, %%ds\n\t"
26630 + "lcall *%%ss:(%%esi); cld\n\t"
26631 + "push %%ss\n\t"
26632 + "pop %%ds\n\t"
26633 "jc 1f\n\t"
26634 "xor %%ah, %%ah\n"
26635 "1:"
26636 @@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26637 : "1" (PCIBIOS_READ_CONFIG_WORD),
26638 "b" (bx),
26639 "D" ((long)reg),
26640 - "S" (&pci_indirect));
26641 + "S" (&pci_indirect),
26642 + "r" (__PCIBIOS_DS));
26643 /*
26644 * Zero-extend the result beyond 16 bits, do not trust the
26645 * BIOS having done it:
26646 @@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26647 *value &= 0xffff;
26648 break;
26649 case 4:
26650 - __asm__("lcall *(%%esi); cld\n\t"
26651 + __asm__("movw %w6, %%ds\n\t"
26652 + "lcall *%%ss:(%%esi); cld\n\t"
26653 + "push %%ss\n\t"
26654 + "pop %%ds\n\t"
26655 "jc 1f\n\t"
26656 "xor %%ah, %%ah\n"
26657 "1:"
26658 @@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26659 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26660 "b" (bx),
26661 "D" ((long)reg),
26662 - "S" (&pci_indirect));
26663 + "S" (&pci_indirect),
26664 + "r" (__PCIBIOS_DS));
26665 break;
26666 }
26667
26668 @@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26669
26670 switch (len) {
26671 case 1:
26672 - __asm__("lcall *(%%esi); cld\n\t"
26673 + __asm__("movw %w6, %%ds\n\t"
26674 + "lcall *%%ss:(%%esi); cld\n\t"
26675 + "push %%ss\n\t"
26676 + "pop %%ds\n\t"
26677 "jc 1f\n\t"
26678 "xor %%ah, %%ah\n"
26679 "1:"
26680 @@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26681 "c" (value),
26682 "b" (bx),
26683 "D" ((long)reg),
26684 - "S" (&pci_indirect));
26685 + "S" (&pci_indirect),
26686 + "r" (__PCIBIOS_DS));
26687 break;
26688 case 2:
26689 - __asm__("lcall *(%%esi); cld\n\t"
26690 + __asm__("movw %w6, %%ds\n\t"
26691 + "lcall *%%ss:(%%esi); cld\n\t"
26692 + "push %%ss\n\t"
26693 + "pop %%ds\n\t"
26694 "jc 1f\n\t"
26695 "xor %%ah, %%ah\n"
26696 "1:"
26697 @@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26698 "c" (value),
26699 "b" (bx),
26700 "D" ((long)reg),
26701 - "S" (&pci_indirect));
26702 + "S" (&pci_indirect),
26703 + "r" (__PCIBIOS_DS));
26704 break;
26705 case 4:
26706 - __asm__("lcall *(%%esi); cld\n\t"
26707 + __asm__("movw %w6, %%ds\n\t"
26708 + "lcall *%%ss:(%%esi); cld\n\t"
26709 + "push %%ss\n\t"
26710 + "pop %%ds\n\t"
26711 "jc 1f\n\t"
26712 "xor %%ah, %%ah\n"
26713 "1:"
26714 @@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26715 "c" (value),
26716 "b" (bx),
26717 "D" ((long)reg),
26718 - "S" (&pci_indirect));
26719 + "S" (&pci_indirect),
26720 + "r" (__PCIBIOS_DS));
26721 break;
26722 }
26723
26724 @@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26725 * Function table for BIOS32 access
26726 */
26727
26728 -static struct pci_raw_ops pci_bios_access = {
26729 +static const struct pci_raw_ops pci_bios_access = {
26730 .read = pci_bios_read,
26731 .write = pci_bios_write
26732 };
26733 @@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_access = {
26734 * Try to find PCI BIOS.
26735 */
26736
26737 -static struct pci_raw_ops * __devinit pci_find_bios(void)
26738 +static const struct pci_raw_ops * __devinit pci_find_bios(void)
26739 {
26740 union bios32 *check;
26741 unsigned char sum;
26742 @@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26743
26744 DBG("PCI: Fetching IRQ routing table... ");
26745 __asm__("push %%es\n\t"
26746 + "movw %w8, %%ds\n\t"
26747 "push %%ds\n\t"
26748 "pop %%es\n\t"
26749 - "lcall *(%%esi); cld\n\t"
26750 + "lcall *%%ss:(%%esi); cld\n\t"
26751 "pop %%es\n\t"
26752 + "push %%ss\n\t"
26753 + "pop %%ds\n"
26754 "jc 1f\n\t"
26755 "xor %%ah, %%ah\n"
26756 "1:"
26757 @@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26758 "1" (0),
26759 "D" ((long) &opt),
26760 "S" (&pci_indirect),
26761 - "m" (opt)
26762 + "m" (opt),
26763 + "r" (__PCIBIOS_DS)
26764 : "memory");
26765 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26766 if (ret & 0xff00)
26767 @@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26768 {
26769 int ret;
26770
26771 - __asm__("lcall *(%%esi); cld\n\t"
26772 + __asm__("movw %w5, %%ds\n\t"
26773 + "lcall *%%ss:(%%esi); cld\n\t"
26774 + "push %%ss\n\t"
26775 + "pop %%ds\n"
26776 "jc 1f\n\t"
26777 "xor %%ah, %%ah\n"
26778 "1:"
26779 @@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26780 : "0" (PCIBIOS_SET_PCI_HW_INT),
26781 "b" ((dev->bus->number << 8) | dev->devfn),
26782 "c" ((irq << 8) | (pin + 10)),
26783 - "S" (&pci_indirect));
26784 + "S" (&pci_indirect),
26785 + "r" (__PCIBIOS_DS));
26786 return !(ret & 0xff00);
26787 }
26788 EXPORT_SYMBOL(pcibios_set_irq_routing);
26789 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26790 index fa0f651..9d8f3d9 100644
26791 --- a/arch/x86/power/cpu.c
26792 +++ b/arch/x86/power/cpu.c
26793 @@ -129,7 +129,7 @@ static void do_fpu_end(void)
26794 static void fix_processor_context(void)
26795 {
26796 int cpu = smp_processor_id();
26797 - struct tss_struct *t = &per_cpu(init_tss, cpu);
26798 + struct tss_struct *t = init_tss + cpu;
26799
26800 set_tss_desc(cpu, t); /*
26801 * This just modifies memory; should not be
26802 @@ -139,7 +139,9 @@ static void fix_processor_context(void)
26803 */
26804
26805 #ifdef CONFIG_X86_64
26806 + pax_open_kernel();
26807 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26808 + pax_close_kernel();
26809
26810 syscall_init(); /* This sets MSR_*STAR and related */
26811 #endif
26812 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
26813 index dd78ef6..f9d928d 100644
26814 --- a/arch/x86/vdso/Makefile
26815 +++ b/arch/x86/vdso/Makefile
26816 @@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
26817 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
26818 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
26819
26820 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26821 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26822 GCOV_PROFILE := n
26823
26824 #
26825 diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
26826 index ee55754..0013b2e 100644
26827 --- a/arch/x86/vdso/vclock_gettime.c
26828 +++ b/arch/x86/vdso/vclock_gettime.c
26829 @@ -22,24 +22,48 @@
26830 #include <asm/hpet.h>
26831 #include <asm/unistd.h>
26832 #include <asm/io.h>
26833 +#include <asm/fixmap.h>
26834 #include "vextern.h"
26835
26836 #define gtod vdso_vsyscall_gtod_data
26837
26838 +notrace noinline long __vdso_fallback_time(long *t)
26839 +{
26840 + long secs;
26841 + asm volatile("syscall"
26842 + : "=a" (secs)
26843 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
26844 + return secs;
26845 +}
26846 +
26847 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
26848 {
26849 long ret;
26850 asm("syscall" : "=a" (ret) :
26851 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
26852 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
26853 return ret;
26854 }
26855
26856 +notrace static inline cycle_t __vdso_vread_hpet(void)
26857 +{
26858 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
26859 +}
26860 +
26861 +notrace static inline cycle_t __vdso_vread_tsc(void)
26862 +{
26863 + cycle_t ret = (cycle_t)vget_cycles();
26864 +
26865 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
26866 +}
26867 +
26868 notrace static inline long vgetns(void)
26869 {
26870 long v;
26871 - cycles_t (*vread)(void);
26872 - vread = gtod->clock.vread;
26873 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
26874 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
26875 + v = __vdso_vread_tsc();
26876 + else
26877 + v = __vdso_vread_hpet();
26878 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
26879 return (v * gtod->clock.mult) >> gtod->clock.shift;
26880 }
26881
26882 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
26883
26884 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26885 {
26886 - if (likely(gtod->sysctl_enabled))
26887 + if (likely(gtod->sysctl_enabled &&
26888 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26889 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26890 switch (clock) {
26891 case CLOCK_REALTIME:
26892 if (likely(gtod->clock.vread))
26893 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26894 int clock_gettime(clockid_t, struct timespec *)
26895 __attribute__((weak, alias("__vdso_clock_gettime")));
26896
26897 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
26898 +{
26899 + long ret;
26900 + asm("syscall" : "=a" (ret) :
26901 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
26902 + return ret;
26903 +}
26904 +
26905 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26906 {
26907 - long ret;
26908 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
26909 + if (likely(gtod->sysctl_enabled &&
26910 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26911 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26912 + {
26913 if (likely(tv != NULL)) {
26914 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
26915 offsetof(struct timespec, tv_nsec) ||
26916 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26917 }
26918 return 0;
26919 }
26920 - asm("syscall" : "=a" (ret) :
26921 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
26922 - return ret;
26923 + return __vdso_fallback_gettimeofday(tv, tz);
26924 }
26925 int gettimeofday(struct timeval *, struct timezone *)
26926 __attribute__((weak, alias("__vdso_gettimeofday")));
26927 diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
26928 index 4e5dd3b..00ba15e 100644
26929 --- a/arch/x86/vdso/vdso.lds.S
26930 +++ b/arch/x86/vdso/vdso.lds.S
26931 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
26932 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
26933 #include "vextern.h"
26934 #undef VEXTERN
26935 +
26936 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
26937 +VEXTERN(fallback_gettimeofday)
26938 +VEXTERN(fallback_time)
26939 +VEXTERN(getcpu)
26940 +#undef VEXTERN
26941 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
26942 index 58bc00f..d53fb48 100644
26943 --- a/arch/x86/vdso/vdso32-setup.c
26944 +++ b/arch/x86/vdso/vdso32-setup.c
26945 @@ -25,6 +25,7 @@
26946 #include <asm/tlbflush.h>
26947 #include <asm/vdso.h>
26948 #include <asm/proto.h>
26949 +#include <asm/mman.h>
26950
26951 enum {
26952 VDSO_DISABLED = 0,
26953 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
26954 void enable_sep_cpu(void)
26955 {
26956 int cpu = get_cpu();
26957 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
26958 + struct tss_struct *tss = init_tss + cpu;
26959
26960 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26961 put_cpu();
26962 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26963 gate_vma.vm_start = FIXADDR_USER_START;
26964 gate_vma.vm_end = FIXADDR_USER_END;
26965 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26966 - gate_vma.vm_page_prot = __P101;
26967 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26968 /*
26969 * Make sure the vDSO gets into every core dump.
26970 * Dumping its contents makes post-mortem fully interpretable later
26971 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26972 if (compat)
26973 addr = VDSO_HIGH_BASE;
26974 else {
26975 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26976 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26977 if (IS_ERR_VALUE(addr)) {
26978 ret = addr;
26979 goto up_fail;
26980 }
26981 }
26982
26983 - current->mm->context.vdso = (void *)addr;
26984 + current->mm->context.vdso = addr;
26985
26986 if (compat_uses_vma || !compat) {
26987 /*
26988 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26989 }
26990
26991 current_thread_info()->sysenter_return =
26992 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26993 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26994
26995 up_fail:
26996 if (ret)
26997 - current->mm->context.vdso = NULL;
26998 + current->mm->context.vdso = 0;
26999
27000 up_write(&mm->mmap_sem);
27001
27002 @@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
27003
27004 const char *arch_vma_name(struct vm_area_struct *vma)
27005 {
27006 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
27007 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
27008 return "[vdso]";
27009 +
27010 +#ifdef CONFIG_PAX_SEGMEXEC
27011 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
27012 + return "[vdso]";
27013 +#endif
27014 +
27015 return NULL;
27016 }
27017
27018 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
27019 struct mm_struct *mm = tsk->mm;
27020
27021 /* Check to see if this task was created in compat vdso mode */
27022 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
27023 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
27024 return &gate_vma;
27025 return NULL;
27026 }
27027 diff --git a/arch/x86/vdso/vextern.h b/arch/x86/vdso/vextern.h
27028 index 1683ba2..48d07f3 100644
27029 --- a/arch/x86/vdso/vextern.h
27030 +++ b/arch/x86/vdso/vextern.h
27031 @@ -11,6 +11,5 @@
27032 put into vextern.h and be referenced as a pointer with vdso prefix.
27033 The main kernel later fills in the values. */
27034
27035 -VEXTERN(jiffies)
27036 VEXTERN(vgetcpu_mode)
27037 VEXTERN(vsyscall_gtod_data)
27038 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
27039 index 21e1aeb..2c0b3c4 100644
27040 --- a/arch/x86/vdso/vma.c
27041 +++ b/arch/x86/vdso/vma.c
27042 @@ -17,8 +17,6 @@
27043 #include "vextern.h" /* Just for VMAGIC. */
27044 #undef VEXTERN
27045
27046 -unsigned int __read_mostly vdso_enabled = 1;
27047 -
27048 extern char vdso_start[], vdso_end[];
27049 extern unsigned short vdso_sync_cpuid;
27050
27051 @@ -27,10 +25,8 @@ static unsigned vdso_size;
27052
27053 static inline void *var_ref(void *p, char *name)
27054 {
27055 - if (*(void **)p != (void *)VMAGIC) {
27056 - printk("VDSO: variable %s broken\n", name);
27057 - vdso_enabled = 0;
27058 - }
27059 + if (*(void **)p != (void *)VMAGIC)
27060 + panic("VDSO: variable %s broken\n", name);
27061 return p;
27062 }
27063
27064 @@ -57,21 +53,18 @@ static int __init init_vdso_vars(void)
27065 if (!vbase)
27066 goto oom;
27067
27068 - if (memcmp(vbase, "\177ELF", 4)) {
27069 - printk("VDSO: I'm broken; not ELF\n");
27070 - vdso_enabled = 0;
27071 - }
27072 + if (memcmp(vbase, ELFMAG, SELFMAG))
27073 + panic("VDSO: I'm broken; not ELF\n");
27074
27075 #define VEXTERN(x) \
27076 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
27077 #include "vextern.h"
27078 #undef VEXTERN
27079 + vunmap(vbase);
27080 return 0;
27081
27082 oom:
27083 - printk("Cannot allocate vdso\n");
27084 - vdso_enabled = 0;
27085 - return -ENOMEM;
27086 + panic("Cannot allocate vdso\n");
27087 }
27088 __initcall(init_vdso_vars);
27089
27090 @@ -102,13 +95,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
27091 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27092 {
27093 struct mm_struct *mm = current->mm;
27094 - unsigned long addr;
27095 + unsigned long addr = 0;
27096 int ret;
27097
27098 - if (!vdso_enabled)
27099 - return 0;
27100 -
27101 down_write(&mm->mmap_sem);
27102 +
27103 +#ifdef CONFIG_PAX_RANDMMAP
27104 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27105 +#endif
27106 +
27107 addr = vdso_addr(mm->start_stack, vdso_size);
27108 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
27109 if (IS_ERR_VALUE(addr)) {
27110 @@ -116,7 +111,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27111 goto up_fail;
27112 }
27113
27114 - current->mm->context.vdso = (void *)addr;
27115 + current->mm->context.vdso = addr;
27116
27117 ret = install_special_mapping(mm, addr, vdso_size,
27118 VM_READ|VM_EXEC|
27119 @@ -124,7 +119,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27120 VM_ALWAYSDUMP,
27121 vdso_pages);
27122 if (ret) {
27123 - current->mm->context.vdso = NULL;
27124 + current->mm->context.vdso = 0;
27125 goto up_fail;
27126 }
27127
27128 @@ -132,10 +127,3 @@ up_fail:
27129 up_write(&mm->mmap_sem);
27130 return ret;
27131 }
27132 -
27133 -static __init int vdso_setup(char *s)
27134 -{
27135 - vdso_enabled = simple_strtoul(s, NULL, 0);
27136 - return 0;
27137 -}
27138 -__setup("vdso=", vdso_setup);
27139 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
27140 index 0087b00..eecb34f 100644
27141 --- a/arch/x86/xen/enlighten.c
27142 +++ b/arch/x86/xen/enlighten.c
27143 @@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
27144
27145 struct shared_info xen_dummy_shared_info;
27146
27147 -void *xen_initial_gdt;
27148 -
27149 /*
27150 * Point at some empty memory to start with. We map the real shared_info
27151 * page as soon as fixmap is up and running.
27152 @@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
27153
27154 preempt_disable();
27155
27156 - start = __get_cpu_var(idt_desc).address;
27157 + start = (unsigned long)__get_cpu_var(idt_desc).address;
27158 end = start + __get_cpu_var(idt_desc).size + 1;
27159
27160 xen_mc_flush();
27161 @@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic_ops __initdata = {
27162 #endif
27163 };
27164
27165 -static void xen_reboot(int reason)
27166 +static __noreturn void xen_reboot(int reason)
27167 {
27168 struct sched_shutdown r = { .reason = reason };
27169
27170 @@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
27171 BUG();
27172 }
27173
27174 -static void xen_restart(char *msg)
27175 +static __noreturn void xen_restart(char *msg)
27176 {
27177 xen_reboot(SHUTDOWN_reboot);
27178 }
27179
27180 -static void xen_emergency_restart(void)
27181 +static __noreturn void xen_emergency_restart(void)
27182 {
27183 xen_reboot(SHUTDOWN_reboot);
27184 }
27185
27186 -static void xen_machine_halt(void)
27187 +static __noreturn void xen_machine_halt(void)
27188 {
27189 xen_reboot(SHUTDOWN_poweroff);
27190 }
27191 @@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(void)
27192 */
27193 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
27194
27195 -#ifdef CONFIG_X86_64
27196 /* Work out if we support NX */
27197 - check_efer();
27198 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27199 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
27200 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
27201 + unsigned l, h;
27202 +
27203 +#ifdef CONFIG_X86_PAE
27204 + nx_enabled = 1;
27205 +#endif
27206 + __supported_pte_mask |= _PAGE_NX;
27207 + rdmsr(MSR_EFER, l, h);
27208 + l |= EFER_NX;
27209 + wrmsr(MSR_EFER, l, h);
27210 + }
27211 #endif
27212
27213 xen_setup_features();
27214 @@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(void)
27215
27216 machine_ops = xen_machine_ops;
27217
27218 - /*
27219 - * The only reliable way to retain the initial address of the
27220 - * percpu gdt_page is to remember it here, so we can go and
27221 - * mark it RW later, when the initial percpu area is freed.
27222 - */
27223 - xen_initial_gdt = &per_cpu(gdt_page, 0);
27224 -
27225 xen_smp_init();
27226
27227 pgd = (pgd_t *)xen_start_info->pt_base;
27228 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
27229 index 3f90a2c..2c2ad84 100644
27230 --- a/arch/x86/xen/mmu.c
27231 +++ b/arch/x86/xen/mmu.c
27232 @@ -1719,6 +1719,9 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
27233 convert_pfn_mfn(init_level4_pgt);
27234 convert_pfn_mfn(level3_ident_pgt);
27235 convert_pfn_mfn(level3_kernel_pgt);
27236 + convert_pfn_mfn(level3_vmalloc_start_pgt);
27237 + convert_pfn_mfn(level3_vmalloc_end_pgt);
27238 + convert_pfn_mfn(level3_vmemmap_pgt);
27239
27240 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
27241 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
27242 @@ -1737,7 +1740,11 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
27243 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
27244 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
27245 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
27246 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
27247 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
27248 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
27249 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
27250 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
27251 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
27252 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
27253
27254 @@ -1860,6 +1867,7 @@ static __init void xen_post_allocator_init(void)
27255 pv_mmu_ops.set_pud = xen_set_pud;
27256 #if PAGETABLE_LEVELS == 4
27257 pv_mmu_ops.set_pgd = xen_set_pgd;
27258 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
27259 #endif
27260
27261 /* This will work as long as patching hasn't happened yet
27262 @@ -1946,6 +1954,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
27263 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
27264 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
27265 .set_pgd = xen_set_pgd_hyper,
27266 + .set_pgd_batched = xen_set_pgd_hyper,
27267
27268 .alloc_pud = xen_alloc_pmd_init,
27269 .release_pud = xen_release_pmd_init,
27270 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
27271 index a96204a..fca9b8e 100644
27272 --- a/arch/x86/xen/smp.c
27273 +++ b/arch/x86/xen/smp.c
27274 @@ -168,11 +168,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
27275 {
27276 BUG_ON(smp_processor_id() != 0);
27277 native_smp_prepare_boot_cpu();
27278 -
27279 - /* We've switched to the "real" per-cpu gdt, so make sure the
27280 - old memory can be recycled */
27281 - make_lowmem_page_readwrite(xen_initial_gdt);
27282 -
27283 xen_setup_vcpu_info_placement();
27284 }
27285
27286 @@ -241,12 +236,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
27287 gdt = get_cpu_gdt_table(cpu);
27288
27289 ctxt->flags = VGCF_IN_KERNEL;
27290 - ctxt->user_regs.ds = __USER_DS;
27291 - ctxt->user_regs.es = __USER_DS;
27292 + ctxt->user_regs.ds = __KERNEL_DS;
27293 + ctxt->user_regs.es = __KERNEL_DS;
27294 ctxt->user_regs.ss = __KERNEL_DS;
27295 #ifdef CONFIG_X86_32
27296 ctxt->user_regs.fs = __KERNEL_PERCPU;
27297 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
27298 + savesegment(gs, ctxt->user_regs.gs);
27299 #else
27300 ctxt->gs_base_kernel = per_cpu_offset(cpu);
27301 #endif
27302 @@ -297,13 +292,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
27303 int rc;
27304
27305 per_cpu(current_task, cpu) = idle;
27306 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
27307 #ifdef CONFIG_X86_32
27308 irq_ctx_init(cpu);
27309 #else
27310 clear_tsk_thread_flag(idle, TIF_FORK);
27311 - per_cpu(kernel_stack, cpu) =
27312 - (unsigned long)task_stack_page(idle) -
27313 - KERNEL_STACK_OFFSET + THREAD_SIZE;
27314 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27315 #endif
27316 xen_setup_runstate_info(cpu);
27317 xen_setup_timer(cpu);
27318 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
27319 index 9a95a9c..4f39e774 100644
27320 --- a/arch/x86/xen/xen-asm_32.S
27321 +++ b/arch/x86/xen/xen-asm_32.S
27322 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
27323 ESP_OFFSET=4 # bytes pushed onto stack
27324
27325 /*
27326 - * Store vcpu_info pointer for easy access. Do it this way to
27327 - * avoid having to reload %fs
27328 + * Store vcpu_info pointer for easy access.
27329 */
27330 #ifdef CONFIG_SMP
27331 - GET_THREAD_INFO(%eax)
27332 - movl TI_cpu(%eax), %eax
27333 - movl __per_cpu_offset(,%eax,4), %eax
27334 - mov per_cpu__xen_vcpu(%eax), %eax
27335 + push %fs
27336 + mov $(__KERNEL_PERCPU), %eax
27337 + mov %eax, %fs
27338 + mov PER_CPU_VAR(xen_vcpu), %eax
27339 + pop %fs
27340 #else
27341 movl per_cpu__xen_vcpu, %eax
27342 #endif
27343 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27344 index 1a5ff24..a187d40 100644
27345 --- a/arch/x86/xen/xen-head.S
27346 +++ b/arch/x86/xen/xen-head.S
27347 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
27348 #ifdef CONFIG_X86_32
27349 mov %esi,xen_start_info
27350 mov $init_thread_union+THREAD_SIZE,%esp
27351 +#ifdef CONFIG_SMP
27352 + movl $cpu_gdt_table,%edi
27353 + movl $__per_cpu_load,%eax
27354 + movw %ax,__KERNEL_PERCPU + 2(%edi)
27355 + rorl $16,%eax
27356 + movb %al,__KERNEL_PERCPU + 4(%edi)
27357 + movb %ah,__KERNEL_PERCPU + 7(%edi)
27358 + movl $__per_cpu_end - 1,%eax
27359 + subl $__per_cpu_start,%eax
27360 + movw %ax,__KERNEL_PERCPU + 0(%edi)
27361 +#endif
27362 #else
27363 mov %rsi,xen_start_info
27364 mov $init_thread_union+THREAD_SIZE,%rsp
27365 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27366 index f9153a3..51eab3d 100644
27367 --- a/arch/x86/xen/xen-ops.h
27368 +++ b/arch/x86/xen/xen-ops.h
27369 @@ -10,8 +10,6 @@
27370 extern const char xen_hypervisor_callback[];
27371 extern const char xen_failsafe_callback[];
27372
27373 -extern void *xen_initial_gdt;
27374 -
27375 struct trap_info;
27376 void xen_copy_trap_info(struct trap_info *traps);
27377
27378 diff --git a/block/blk-integrity.c b/block/blk-integrity.c
27379 index 15c6308..96e83c2 100644
27380 --- a/block/blk-integrity.c
27381 +++ b/block/blk-integrity.c
27382 @@ -278,7 +278,7 @@ static struct attribute *integrity_attrs[] = {
27383 NULL,
27384 };
27385
27386 -static struct sysfs_ops integrity_ops = {
27387 +static const struct sysfs_ops integrity_ops = {
27388 .show = &integrity_attr_show,
27389 .store = &integrity_attr_store,
27390 };
27391 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27392 index ca56420..f2fc409 100644
27393 --- a/block/blk-iopoll.c
27394 +++ b/block/blk-iopoll.c
27395 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
27396 }
27397 EXPORT_SYMBOL(blk_iopoll_complete);
27398
27399 -static void blk_iopoll_softirq(struct softirq_action *h)
27400 +static void blk_iopoll_softirq(void)
27401 {
27402 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27403 int rearm = 0, budget = blk_iopoll_budget;
27404 diff --git a/block/blk-map.c b/block/blk-map.c
27405 index 30a7e51..0aeec6a 100644
27406 --- a/block/blk-map.c
27407 +++ b/block/blk-map.c
27408 @@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
27409 * direct dma. else, set up kernel bounce buffers
27410 */
27411 uaddr = (unsigned long) ubuf;
27412 - if (blk_rq_aligned(q, ubuf, len) && !map_data)
27413 + if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
27414 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
27415 else
27416 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
27417 @@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
27418 for (i = 0; i < iov_count; i++) {
27419 unsigned long uaddr = (unsigned long)iov[i].iov_base;
27420
27421 + if (!iov[i].iov_len)
27422 + return -EINVAL;
27423 +
27424 if (uaddr & queue_dma_alignment(q)) {
27425 unaligned = 1;
27426 break;
27427 }
27428 - if (!iov[i].iov_len)
27429 - return -EINVAL;
27430 }
27431
27432 if (unaligned || (q->dma_pad_mask & len) || map_data)
27433 @@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
27434 if (!len || !kbuf)
27435 return -EINVAL;
27436
27437 - do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
27438 + do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
27439 if (do_copy)
27440 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27441 else
27442 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27443 index ee9c216..58d410a 100644
27444 --- a/block/blk-softirq.c
27445 +++ b/block/blk-softirq.c
27446 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27447 * Softirq action handler - move entries to local list and loop over them
27448 * while passing them to the queue registered handler.
27449 */
27450 -static void blk_done_softirq(struct softirq_action *h)
27451 +static void blk_done_softirq(void)
27452 {
27453 struct list_head *cpu_list, local_list;
27454
27455 diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
27456 index bb9c5ea..5330d48 100644
27457 --- a/block/blk-sysfs.c
27458 +++ b/block/blk-sysfs.c
27459 @@ -414,7 +414,7 @@ static void blk_release_queue(struct kobject *kobj)
27460 kmem_cache_free(blk_requestq_cachep, q);
27461 }
27462
27463 -static struct sysfs_ops queue_sysfs_ops = {
27464 +static const struct sysfs_ops queue_sysfs_ops = {
27465 .show = queue_attr_show,
27466 .store = queue_attr_store,
27467 };
27468 diff --git a/block/bsg.c b/block/bsg.c
27469 index 7154a7a..08ac2f0 100644
27470 --- a/block/bsg.c
27471 +++ b/block/bsg.c
27472 @@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27473 struct sg_io_v4 *hdr, struct bsg_device *bd,
27474 fmode_t has_write_perm)
27475 {
27476 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27477 + unsigned char *cmdptr;
27478 +
27479 if (hdr->request_len > BLK_MAX_CDB) {
27480 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27481 if (!rq->cmd)
27482 return -ENOMEM;
27483 - }
27484 + cmdptr = rq->cmd;
27485 + } else
27486 + cmdptr = tmpcmd;
27487
27488 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
27489 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27490 hdr->request_len))
27491 return -EFAULT;
27492
27493 + if (cmdptr != rq->cmd)
27494 + memcpy(rq->cmd, cmdptr, hdr->request_len);
27495 +
27496 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27497 if (blk_verify_command(rq->cmd, has_write_perm))
27498 return -EPERM;
27499 @@ -282,7 +290,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27500 rq->next_rq = next_rq;
27501 next_rq->cmd_type = rq->cmd_type;
27502
27503 - dxferp = (void*)(unsigned long)hdr->din_xferp;
27504 + dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27505 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
27506 hdr->din_xfer_len, GFP_KERNEL);
27507 if (ret)
27508 @@ -291,10 +299,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27509
27510 if (hdr->dout_xfer_len) {
27511 dxfer_len = hdr->dout_xfer_len;
27512 - dxferp = (void*)(unsigned long)hdr->dout_xferp;
27513 + dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
27514 } else if (hdr->din_xfer_len) {
27515 dxfer_len = hdr->din_xfer_len;
27516 - dxferp = (void*)(unsigned long)hdr->din_xferp;
27517 + dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27518 } else
27519 dxfer_len = 0;
27520
27521 @@ -436,7 +444,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
27522 int len = min_t(unsigned int, hdr->max_response_len,
27523 rq->sense_len);
27524
27525 - ret = copy_to_user((void*)(unsigned long)hdr->response,
27526 + ret = copy_to_user((void __user *)(unsigned long)hdr->response,
27527 rq->sense, len);
27528 if (!ret)
27529 hdr->response_len = len;
27530 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27531 index 9bd086c..ca1fc22 100644
27532 --- a/block/compat_ioctl.c
27533 +++ b/block/compat_ioctl.c
27534 @@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27535 err |= __get_user(f->spec1, &uf->spec1);
27536 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27537 err |= __get_user(name, &uf->name);
27538 - f->name = compat_ptr(name);
27539 + f->name = (void __force_kernel *)compat_ptr(name);
27540 if (err) {
27541 err = -EFAULT;
27542 goto out;
27543 diff --git a/block/elevator.c b/block/elevator.c
27544 index a847046..75a1746 100644
27545 --- a/block/elevator.c
27546 +++ b/block/elevator.c
27547 @@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, struct attribute *attr,
27548 return error;
27549 }
27550
27551 -static struct sysfs_ops elv_sysfs_ops = {
27552 +static const struct sysfs_ops elv_sysfs_ops = {
27553 .show = elv_attr_show,
27554 .store = elv_attr_store,
27555 };
27556 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27557 index 2be0a97..bded3fd 100644
27558 --- a/block/scsi_ioctl.c
27559 +++ b/block/scsi_ioctl.c
27560 @@ -221,8 +221,20 @@ EXPORT_SYMBOL(blk_verify_command);
27561 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27562 struct sg_io_hdr *hdr, fmode_t mode)
27563 {
27564 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27565 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27566 + unsigned char *cmdptr;
27567 +
27568 + if (rq->cmd != rq->__cmd)
27569 + cmdptr = rq->cmd;
27570 + else
27571 + cmdptr = tmpcmd;
27572 +
27573 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27574 return -EFAULT;
27575 +
27576 + if (cmdptr != rq->cmd)
27577 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27578 +
27579 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27580 return -EPERM;
27581
27582 @@ -431,6 +443,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27583 int err;
27584 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27585 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27586 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27587 + unsigned char *cmdptr;
27588
27589 if (!sic)
27590 return -EINVAL;
27591 @@ -464,9 +478,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27592 */
27593 err = -EFAULT;
27594 rq->cmd_len = cmdlen;
27595 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
27596 +
27597 + if (rq->cmd != rq->__cmd)
27598 + cmdptr = rq->cmd;
27599 + else
27600 + cmdptr = tmpcmd;
27601 +
27602 + if (copy_from_user(cmdptr, sic->data, cmdlen))
27603 goto error;
27604
27605 + if (rq->cmd != cmdptr)
27606 + memcpy(rq->cmd, cmdptr, cmdlen);
27607 +
27608 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27609 goto error;
27610
27611 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27612 index 3533582..f143117 100644
27613 --- a/crypto/cryptd.c
27614 +++ b/crypto/cryptd.c
27615 @@ -50,7 +50,7 @@ struct cryptd_blkcipher_ctx {
27616
27617 struct cryptd_blkcipher_request_ctx {
27618 crypto_completion_t complete;
27619 -};
27620 +} __no_const;
27621
27622 struct cryptd_hash_ctx {
27623 struct crypto_shash *child;
27624 diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
27625 index a90d260..7a9765e 100644
27626 --- a/crypto/gf128mul.c
27627 +++ b/crypto/gf128mul.c
27628 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128 *b)
27629 for (i = 0; i < 7; ++i)
27630 gf128mul_x_lle(&p[i + 1], &p[i]);
27631
27632 - memset(r, 0, sizeof(r));
27633 + memset(r, 0, sizeof(*r));
27634 for (i = 0;;) {
27635 u8 ch = ((u8 *)b)[15 - i];
27636
27637 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128 *b)
27638 for (i = 0; i < 7; ++i)
27639 gf128mul_x_bbe(&p[i + 1], &p[i]);
27640
27641 - memset(r, 0, sizeof(r));
27642 + memset(r, 0, sizeof(*r));
27643 for (i = 0;;) {
27644 u8 ch = ((u8 *)b)[i];
27645
27646 diff --git a/crypto/serpent.c b/crypto/serpent.c
27647 index b651a55..023297d 100644
27648 --- a/crypto/serpent.c
27649 +++ b/crypto/serpent.c
27650 @@ -21,6 +21,7 @@
27651 #include <asm/byteorder.h>
27652 #include <linux/crypto.h>
27653 #include <linux/types.h>
27654 +#include <linux/sched.h>
27655
27656 /* Key is padded to the maximum of 256 bits before round key generation.
27657 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
27658 @@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
27659 u32 r0,r1,r2,r3,r4;
27660 int i;
27661
27662 + pax_track_stack();
27663 +
27664 /* Copy key, add padding */
27665
27666 for (i = 0; i < keylen; ++i)
27667 diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
27668 index 0d2cdb8..d8de48d 100644
27669 --- a/drivers/acpi/acpi_pad.c
27670 +++ b/drivers/acpi/acpi_pad.c
27671 @@ -30,7 +30,7 @@
27672 #include <acpi/acpi_bus.h>
27673 #include <acpi/acpi_drivers.h>
27674
27675 -#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
27676 +#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
27677 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
27678 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
27679 static DEFINE_MUTEX(isolated_cpus_lock);
27680 diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
27681 index 3f4602b..2e41d36 100644
27682 --- a/drivers/acpi/battery.c
27683 +++ b/drivers/acpi/battery.c
27684 @@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
27685 }
27686
27687 static struct battery_file {
27688 - struct file_operations ops;
27689 + const struct file_operations ops;
27690 mode_t mode;
27691 const char *name;
27692 } acpi_battery_file[] = {
27693 diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
27694 index 7338b6a..82f0257 100644
27695 --- a/drivers/acpi/dock.c
27696 +++ b/drivers/acpi/dock.c
27697 @@ -77,7 +77,7 @@ struct dock_dependent_device {
27698 struct list_head list;
27699 struct list_head hotplug_list;
27700 acpi_handle handle;
27701 - struct acpi_dock_ops *ops;
27702 + const struct acpi_dock_ops *ops;
27703 void *context;
27704 };
27705
27706 @@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
27707 * the dock driver after _DCK is executed.
27708 */
27709 int
27710 -register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
27711 +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
27712 void *context)
27713 {
27714 struct dock_dependent_device *dd;
27715 diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
27716 index 7c1c59e..2993595 100644
27717 --- a/drivers/acpi/osl.c
27718 +++ b/drivers/acpi/osl.c
27719 @@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
27720 void __iomem *virt_addr;
27721
27722 virt_addr = ioremap(phys_addr, width);
27723 + if (!virt_addr)
27724 + return AE_NO_MEMORY;
27725 if (!value)
27726 value = &dummy;
27727
27728 @@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
27729 void __iomem *virt_addr;
27730
27731 virt_addr = ioremap(phys_addr, width);
27732 + if (!virt_addr)
27733 + return AE_NO_MEMORY;
27734
27735 switch (width) {
27736 case 8:
27737 diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
27738 index c216062..eec10d2 100644
27739 --- a/drivers/acpi/power_meter.c
27740 +++ b/drivers/acpi/power_meter.c
27741 @@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
27742 return res;
27743
27744 temp /= 1000;
27745 - if (temp < 0)
27746 - return -EINVAL;
27747
27748 mutex_lock(&resource->lock);
27749 resource->trip[attr->index - 7] = temp;
27750 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27751 index d0d25e2..961643d 100644
27752 --- a/drivers/acpi/proc.c
27753 +++ b/drivers/acpi/proc.c
27754 @@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct file *file,
27755 size_t count, loff_t * ppos)
27756 {
27757 struct list_head *node, *next;
27758 - char strbuf[5];
27759 - char str[5] = "";
27760 - unsigned int len = count;
27761 + char strbuf[5] = {0};
27762 struct acpi_device *found_dev = NULL;
27763
27764 - if (len > 4)
27765 - len = 4;
27766 - if (len < 0)
27767 - return -EFAULT;
27768 + if (count > 4)
27769 + count = 4;
27770
27771 - if (copy_from_user(strbuf, buffer, len))
27772 + if (copy_from_user(strbuf, buffer, count))
27773 return -EFAULT;
27774 - strbuf[len] = '\0';
27775 - sscanf(strbuf, "%s", str);
27776 + strbuf[count] = '\0';
27777
27778 mutex_lock(&acpi_device_lock);
27779 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27780 @@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct file *file,
27781 if (!dev->wakeup.flags.valid)
27782 continue;
27783
27784 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
27785 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27786 dev->wakeup.state.enabled =
27787 dev->wakeup.state.enabled ? 0 : 1;
27788 found_dev = dev;
27789 diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
27790 index 7102474..de8ad22 100644
27791 --- a/drivers/acpi/processor_core.c
27792 +++ b/drivers/acpi/processor_core.c
27793 @@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27794 return 0;
27795 }
27796
27797 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27798 + BUG_ON(pr->id >= nr_cpu_ids);
27799
27800 /*
27801 * Buggy BIOS check
27802 diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
27803 index d933980..5761f13 100644
27804 --- a/drivers/acpi/sbshc.c
27805 +++ b/drivers/acpi/sbshc.c
27806 @@ -17,7 +17,7 @@
27807
27808 #define PREFIX "ACPI: "
27809
27810 -#define ACPI_SMB_HC_CLASS "smbus_host_controller"
27811 +#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
27812 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
27813
27814 struct acpi_smb_hc {
27815 diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
27816 index 0458094..6978e7b 100644
27817 --- a/drivers/acpi/sleep.c
27818 +++ b/drivers/acpi/sleep.c
27819 @@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state)
27820 }
27821 }
27822
27823 -static struct platform_suspend_ops acpi_suspend_ops = {
27824 +static const struct platform_suspend_ops acpi_suspend_ops = {
27825 .valid = acpi_suspend_state_valid,
27826 .begin = acpi_suspend_begin,
27827 .prepare_late = acpi_pm_prepare,
27828 @@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state)
27829 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27830 * been requested.
27831 */
27832 -static struct platform_suspend_ops acpi_suspend_ops_old = {
27833 +static const struct platform_suspend_ops acpi_suspend_ops_old = {
27834 .valid = acpi_suspend_state_valid,
27835 .begin = acpi_suspend_begin_old,
27836 .prepare_late = acpi_pm_disable_gpes,
27837 @@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
27838 acpi_enable_all_runtime_gpes();
27839 }
27840
27841 -static struct platform_hibernation_ops acpi_hibernation_ops = {
27842 +static const struct platform_hibernation_ops acpi_hibernation_ops = {
27843 .begin = acpi_hibernation_begin,
27844 .end = acpi_pm_end,
27845 .pre_snapshot = acpi_hibernation_pre_snapshot,
27846 @@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot_old(void)
27847 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27848 * been requested.
27849 */
27850 -static struct platform_hibernation_ops acpi_hibernation_ops_old = {
27851 +static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
27852 .begin = acpi_hibernation_begin_old,
27853 .end = acpi_pm_end,
27854 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
27855 diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
27856 index 05dff63..b662ab7 100644
27857 --- a/drivers/acpi/video.c
27858 +++ b/drivers/acpi/video.c
27859 @@ -359,7 +359,7 @@ static int acpi_video_set_brightness(struct backlight_device *bd)
27860 vd->brightness->levels[request_level]);
27861 }
27862
27863 -static struct backlight_ops acpi_backlight_ops = {
27864 +static const struct backlight_ops acpi_backlight_ops = {
27865 .get_brightness = acpi_video_get_brightness,
27866 .update_status = acpi_video_set_brightness,
27867 };
27868 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
27869 index 6787aab..23ffb0e 100644
27870 --- a/drivers/ata/ahci.c
27871 +++ b/drivers/ata/ahci.c
27872 @@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sht = {
27873 .sdev_attrs = ahci_sdev_attrs,
27874 };
27875
27876 -static struct ata_port_operations ahci_ops = {
27877 +static const struct ata_port_operations ahci_ops = {
27878 .inherits = &sata_pmp_port_ops,
27879
27880 .qc_defer = sata_pmp_qc_defer_cmd_switch,
27881 @@ -424,17 +424,17 @@ static struct ata_port_operations ahci_ops = {
27882 .port_stop = ahci_port_stop,
27883 };
27884
27885 -static struct ata_port_operations ahci_vt8251_ops = {
27886 +static const struct ata_port_operations ahci_vt8251_ops = {
27887 .inherits = &ahci_ops,
27888 .hardreset = ahci_vt8251_hardreset,
27889 };
27890
27891 -static struct ata_port_operations ahci_p5wdh_ops = {
27892 +static const struct ata_port_operations ahci_p5wdh_ops = {
27893 .inherits = &ahci_ops,
27894 .hardreset = ahci_p5wdh_hardreset,
27895 };
27896
27897 -static struct ata_port_operations ahci_sb600_ops = {
27898 +static const struct ata_port_operations ahci_sb600_ops = {
27899 .inherits = &ahci_ops,
27900 .softreset = ahci_sb600_softreset,
27901 .pmp_softreset = ahci_sb600_softreset,
27902 diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
27903 index 99e7196..4968c77 100644
27904 --- a/drivers/ata/ata_generic.c
27905 +++ b/drivers/ata/ata_generic.c
27906 @@ -104,7 +104,7 @@ static struct scsi_host_template generic_sht = {
27907 ATA_BMDMA_SHT(DRV_NAME),
27908 };
27909
27910 -static struct ata_port_operations generic_port_ops = {
27911 +static const struct ata_port_operations generic_port_ops = {
27912 .inherits = &ata_bmdma_port_ops,
27913 .cable_detect = ata_cable_unknown,
27914 .set_mode = generic_set_mode,
27915 diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
27916 index c33591d..000c121 100644
27917 --- a/drivers/ata/ata_piix.c
27918 +++ b/drivers/ata/ata_piix.c
27919 @@ -318,7 +318,7 @@ static struct scsi_host_template piix_sht = {
27920 ATA_BMDMA_SHT(DRV_NAME),
27921 };
27922
27923 -static struct ata_port_operations piix_pata_ops = {
27924 +static const struct ata_port_operations piix_pata_ops = {
27925 .inherits = &ata_bmdma32_port_ops,
27926 .cable_detect = ata_cable_40wire,
27927 .set_piomode = piix_set_piomode,
27928 @@ -326,22 +326,22 @@ static struct ata_port_operations piix_pata_ops = {
27929 .prereset = piix_pata_prereset,
27930 };
27931
27932 -static struct ata_port_operations piix_vmw_ops = {
27933 +static const struct ata_port_operations piix_vmw_ops = {
27934 .inherits = &piix_pata_ops,
27935 .bmdma_status = piix_vmw_bmdma_status,
27936 };
27937
27938 -static struct ata_port_operations ich_pata_ops = {
27939 +static const struct ata_port_operations ich_pata_ops = {
27940 .inherits = &piix_pata_ops,
27941 .cable_detect = ich_pata_cable_detect,
27942 .set_dmamode = ich_set_dmamode,
27943 };
27944
27945 -static struct ata_port_operations piix_sata_ops = {
27946 +static const struct ata_port_operations piix_sata_ops = {
27947 .inherits = &ata_bmdma_port_ops,
27948 };
27949
27950 -static struct ata_port_operations piix_sidpr_sata_ops = {
27951 +static const struct ata_port_operations piix_sidpr_sata_ops = {
27952 .inherits = &piix_sata_ops,
27953 .hardreset = sata_std_hardreset,
27954 .scr_read = piix_sidpr_scr_read,
27955 diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
27956 index b0882cd..c295d65 100644
27957 --- a/drivers/ata/libata-acpi.c
27958 +++ b/drivers/ata/libata-acpi.c
27959 @@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
27960 ata_acpi_uevent(dev->link->ap, dev, event);
27961 }
27962
27963 -static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
27964 +static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
27965 .handler = ata_acpi_dev_notify_dock,
27966 .uevent = ata_acpi_dev_uevent,
27967 };
27968
27969 -static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
27970 +static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
27971 .handler = ata_acpi_ap_notify_dock,
27972 .uevent = ata_acpi_ap_uevent,
27973 };
27974 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27975 index d4f7f99..94f603e 100644
27976 --- a/drivers/ata/libata-core.c
27977 +++ b/drivers/ata/libata-core.c
27978 @@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27979 struct ata_port *ap;
27980 unsigned int tag;
27981
27982 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27983 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27984 ap = qc->ap;
27985
27986 qc->flags = 0;
27987 @@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
27988 struct ata_port *ap;
27989 struct ata_link *link;
27990
27991 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27992 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27993 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27994 ap = qc->ap;
27995 link = qc->dev->link;
27996 @@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device *gendev, void *res)
27997 * LOCKING:
27998 * None.
27999 */
28000 -static void ata_finalize_port_ops(struct ata_port_operations *ops)
28001 +static void ata_finalize_port_ops(const struct ata_port_operations *ops)
28002 {
28003 static DEFINE_SPINLOCK(lock);
28004 const struct ata_port_operations *cur;
28005 @@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
28006 return;
28007
28008 spin_lock(&lock);
28009 + pax_open_kernel();
28010
28011 for (cur = ops->inherits; cur; cur = cur->inherits) {
28012 void **inherit = (void **)cur;
28013 @@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
28014 if (IS_ERR(*pp))
28015 *pp = NULL;
28016
28017 - ops->inherits = NULL;
28018 + *(struct ata_port_operations **)&ops->inherits = NULL;
28019
28020 + pax_close_kernel();
28021 spin_unlock(&lock);
28022 }
28023
28024 @@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host)
28025 */
28026 /* KILLME - the only user left is ipr */
28027 void ata_host_init(struct ata_host *host, struct device *dev,
28028 - unsigned long flags, struct ata_port_operations *ops)
28029 + unsigned long flags, const struct ata_port_operations *ops)
28030 {
28031 spin_lock_init(&host->lock);
28032 host->dev = dev;
28033 @@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(struct ata_port *ap)
28034 /* truly dummy */
28035 }
28036
28037 -struct ata_port_operations ata_dummy_port_ops = {
28038 +const struct ata_port_operations ata_dummy_port_ops = {
28039 .qc_prep = ata_noop_qc_prep,
28040 .qc_issue = ata_dummy_qc_issue,
28041 .error_handler = ata_dummy_error_handler,
28042 diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
28043 index e5bdb9b..45a8e72 100644
28044 --- a/drivers/ata/libata-eh.c
28045 +++ b/drivers/ata/libata-eh.c
28046 @@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
28047 {
28048 struct ata_link *link;
28049
28050 + pax_track_stack();
28051 +
28052 ata_for_each_link(link, ap, HOST_FIRST)
28053 ata_eh_link_report(link);
28054 }
28055 @@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
28056 */
28057 void ata_std_error_handler(struct ata_port *ap)
28058 {
28059 - struct ata_port_operations *ops = ap->ops;
28060 + const struct ata_port_operations *ops = ap->ops;
28061 ata_reset_fn_t hardreset = ops->hardreset;
28062
28063 /* ignore built-in hardreset if SCR access is not available */
28064 diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
28065 index 51f0ffb..19ce3e3 100644
28066 --- a/drivers/ata/libata-pmp.c
28067 +++ b/drivers/ata/libata-pmp.c
28068 @@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries)
28069 */
28070 static int sata_pmp_eh_recover(struct ata_port *ap)
28071 {
28072 - struct ata_port_operations *ops = ap->ops;
28073 + const struct ata_port_operations *ops = ap->ops;
28074 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
28075 struct ata_link *pmp_link = &ap->link;
28076 struct ata_device *pmp_dev = pmp_link->device;
28077 diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
28078 index d8f35fe..288180a 100644
28079 --- a/drivers/ata/pata_acpi.c
28080 +++ b/drivers/ata/pata_acpi.c
28081 @@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_sht = {
28082 ATA_BMDMA_SHT(DRV_NAME),
28083 };
28084
28085 -static struct ata_port_operations pacpi_ops = {
28086 +static const struct ata_port_operations pacpi_ops = {
28087 .inherits = &ata_bmdma_port_ops,
28088 .qc_issue = pacpi_qc_issue,
28089 .cable_detect = pacpi_cable_detect,
28090 diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
28091 index 9434114..1f2f364 100644
28092 --- a/drivers/ata/pata_ali.c
28093 +++ b/drivers/ata/pata_ali.c
28094 @@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht = {
28095 * Port operations for PIO only ALi
28096 */
28097
28098 -static struct ata_port_operations ali_early_port_ops = {
28099 +static const struct ata_port_operations ali_early_port_ops = {
28100 .inherits = &ata_sff_port_ops,
28101 .cable_detect = ata_cable_40wire,
28102 .set_piomode = ali_set_piomode,
28103 @@ -382,7 +382,7 @@ static const struct ata_port_operations ali_dma_base_ops = {
28104 * Port operations for DMA capable ALi without cable
28105 * detect
28106 */
28107 -static struct ata_port_operations ali_20_port_ops = {
28108 +static const struct ata_port_operations ali_20_port_ops = {
28109 .inherits = &ali_dma_base_ops,
28110 .cable_detect = ata_cable_40wire,
28111 .mode_filter = ali_20_filter,
28112 @@ -393,7 +393,7 @@ static struct ata_port_operations ali_20_port_ops = {
28113 /*
28114 * Port operations for DMA capable ALi with cable detect
28115 */
28116 -static struct ata_port_operations ali_c2_port_ops = {
28117 +static const struct ata_port_operations ali_c2_port_ops = {
28118 .inherits = &ali_dma_base_ops,
28119 .check_atapi_dma = ali_check_atapi_dma,
28120 .cable_detect = ali_c2_cable_detect,
28121 @@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2_port_ops = {
28122 /*
28123 * Port operations for DMA capable ALi with cable detect
28124 */
28125 -static struct ata_port_operations ali_c4_port_ops = {
28126 +static const struct ata_port_operations ali_c4_port_ops = {
28127 .inherits = &ali_dma_base_ops,
28128 .check_atapi_dma = ali_check_atapi_dma,
28129 .cable_detect = ali_c2_cable_detect,
28130 @@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4_port_ops = {
28131 /*
28132 * Port operations for DMA capable ALi with cable detect and LBA48
28133 */
28134 -static struct ata_port_operations ali_c5_port_ops = {
28135 +static const struct ata_port_operations ali_c5_port_ops = {
28136 .inherits = &ali_dma_base_ops,
28137 .check_atapi_dma = ali_check_atapi_dma,
28138 .dev_config = ali_warn_atapi_dma,
28139 diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
28140 index 567f3f7..c8ee0da 100644
28141 --- a/drivers/ata/pata_amd.c
28142 +++ b/drivers/ata/pata_amd.c
28143 @@ -397,28 +397,28 @@ static const struct ata_port_operations amd_base_port_ops = {
28144 .prereset = amd_pre_reset,
28145 };
28146
28147 -static struct ata_port_operations amd33_port_ops = {
28148 +static const struct ata_port_operations amd33_port_ops = {
28149 .inherits = &amd_base_port_ops,
28150 .cable_detect = ata_cable_40wire,
28151 .set_piomode = amd33_set_piomode,
28152 .set_dmamode = amd33_set_dmamode,
28153 };
28154
28155 -static struct ata_port_operations amd66_port_ops = {
28156 +static const struct ata_port_operations amd66_port_ops = {
28157 .inherits = &amd_base_port_ops,
28158 .cable_detect = ata_cable_unknown,
28159 .set_piomode = amd66_set_piomode,
28160 .set_dmamode = amd66_set_dmamode,
28161 };
28162
28163 -static struct ata_port_operations amd100_port_ops = {
28164 +static const struct ata_port_operations amd100_port_ops = {
28165 .inherits = &amd_base_port_ops,
28166 .cable_detect = ata_cable_unknown,
28167 .set_piomode = amd100_set_piomode,
28168 .set_dmamode = amd100_set_dmamode,
28169 };
28170
28171 -static struct ata_port_operations amd133_port_ops = {
28172 +static const struct ata_port_operations amd133_port_ops = {
28173 .inherits = &amd_base_port_ops,
28174 .cable_detect = amd_cable_detect,
28175 .set_piomode = amd133_set_piomode,
28176 @@ -433,13 +433,13 @@ static const struct ata_port_operations nv_base_port_ops = {
28177 .host_stop = nv_host_stop,
28178 };
28179
28180 -static struct ata_port_operations nv100_port_ops = {
28181 +static const struct ata_port_operations nv100_port_ops = {
28182 .inherits = &nv_base_port_ops,
28183 .set_piomode = nv100_set_piomode,
28184 .set_dmamode = nv100_set_dmamode,
28185 };
28186
28187 -static struct ata_port_operations nv133_port_ops = {
28188 +static const struct ata_port_operations nv133_port_ops = {
28189 .inherits = &nv_base_port_ops,
28190 .set_piomode = nv133_set_piomode,
28191 .set_dmamode = nv133_set_dmamode,
28192 diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
28193 index d332cfd..4b7eaae 100644
28194 --- a/drivers/ata/pata_artop.c
28195 +++ b/drivers/ata/pata_artop.c
28196 @@ -311,7 +311,7 @@ static struct scsi_host_template artop_sht = {
28197 ATA_BMDMA_SHT(DRV_NAME),
28198 };
28199
28200 -static struct ata_port_operations artop6210_ops = {
28201 +static const struct ata_port_operations artop6210_ops = {
28202 .inherits = &ata_bmdma_port_ops,
28203 .cable_detect = ata_cable_40wire,
28204 .set_piomode = artop6210_set_piomode,
28205 @@ -320,7 +320,7 @@ static struct ata_port_operations artop6210_ops = {
28206 .qc_defer = artop6210_qc_defer,
28207 };
28208
28209 -static struct ata_port_operations artop6260_ops = {
28210 +static const struct ata_port_operations artop6260_ops = {
28211 .inherits = &ata_bmdma_port_ops,
28212 .cable_detect = artop6260_cable_detect,
28213 .set_piomode = artop6260_set_piomode,
28214 diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
28215 index 5c129f9..7bb7ccb 100644
28216 --- a/drivers/ata/pata_at32.c
28217 +++ b/drivers/ata/pata_at32.c
28218 @@ -172,7 +172,7 @@ static struct scsi_host_template at32_sht = {
28219 ATA_PIO_SHT(DRV_NAME),
28220 };
28221
28222 -static struct ata_port_operations at32_port_ops = {
28223 +static const struct ata_port_operations at32_port_ops = {
28224 .inherits = &ata_sff_port_ops,
28225 .cable_detect = ata_cable_40wire,
28226 .set_piomode = pata_at32_set_piomode,
28227 diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
28228 index 41c94b1..829006d 100644
28229 --- a/drivers/ata/pata_at91.c
28230 +++ b/drivers/ata/pata_at91.c
28231 @@ -195,7 +195,7 @@ static struct scsi_host_template pata_at91_sht = {
28232 ATA_PIO_SHT(DRV_NAME),
28233 };
28234
28235 -static struct ata_port_operations pata_at91_port_ops = {
28236 +static const struct ata_port_operations pata_at91_port_ops = {
28237 .inherits = &ata_sff_port_ops,
28238
28239 .sff_data_xfer = pata_at91_data_xfer_noirq,
28240 diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
28241 index ae4454d..d391eb4 100644
28242 --- a/drivers/ata/pata_atiixp.c
28243 +++ b/drivers/ata/pata_atiixp.c
28244 @@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_sht = {
28245 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28246 };
28247
28248 -static struct ata_port_operations atiixp_port_ops = {
28249 +static const struct ata_port_operations atiixp_port_ops = {
28250 .inherits = &ata_bmdma_port_ops,
28251
28252 .qc_prep = ata_sff_dumb_qc_prep,
28253 diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
28254 index 6fe7ded..2a425dc 100644
28255 --- a/drivers/ata/pata_atp867x.c
28256 +++ b/drivers/ata/pata_atp867x.c
28257 @@ -274,7 +274,7 @@ static struct scsi_host_template atp867x_sht = {
28258 ATA_BMDMA_SHT(DRV_NAME),
28259 };
28260
28261 -static struct ata_port_operations atp867x_ops = {
28262 +static const struct ata_port_operations atp867x_ops = {
28263 .inherits = &ata_bmdma_port_ops,
28264 .cable_detect = atp867x_cable_detect,
28265 .set_piomode = atp867x_set_piomode,
28266 diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
28267 index c4b47a3..b27a367 100644
28268 --- a/drivers/ata/pata_bf54x.c
28269 +++ b/drivers/ata/pata_bf54x.c
28270 @@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sht = {
28271 .dma_boundary = ATA_DMA_BOUNDARY,
28272 };
28273
28274 -static struct ata_port_operations bfin_pata_ops = {
28275 +static const struct ata_port_operations bfin_pata_ops = {
28276 .inherits = &ata_sff_port_ops,
28277
28278 .set_piomode = bfin_set_piomode,
28279 diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
28280 index 5acf9fa..84248be 100644
28281 --- a/drivers/ata/pata_cmd640.c
28282 +++ b/drivers/ata/pata_cmd640.c
28283 @@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_sht = {
28284 ATA_BMDMA_SHT(DRV_NAME),
28285 };
28286
28287 -static struct ata_port_operations cmd640_port_ops = {
28288 +static const struct ata_port_operations cmd640_port_ops = {
28289 .inherits = &ata_bmdma_port_ops,
28290 /* In theory xfer_noirq is not needed once we kill the prefetcher */
28291 .sff_data_xfer = ata_sff_data_xfer_noirq,
28292 diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
28293 index ccd2694..c869c3d 100644
28294 --- a/drivers/ata/pata_cmd64x.c
28295 +++ b/drivers/ata/pata_cmd64x.c
28296 @@ -271,18 +271,18 @@ static const struct ata_port_operations cmd64x_base_ops = {
28297 .set_dmamode = cmd64x_set_dmamode,
28298 };
28299
28300 -static struct ata_port_operations cmd64x_port_ops = {
28301 +static const struct ata_port_operations cmd64x_port_ops = {
28302 .inherits = &cmd64x_base_ops,
28303 .cable_detect = ata_cable_40wire,
28304 };
28305
28306 -static struct ata_port_operations cmd646r1_port_ops = {
28307 +static const struct ata_port_operations cmd646r1_port_ops = {
28308 .inherits = &cmd64x_base_ops,
28309 .bmdma_stop = cmd646r1_bmdma_stop,
28310 .cable_detect = ata_cable_40wire,
28311 };
28312
28313 -static struct ata_port_operations cmd648_port_ops = {
28314 +static const struct ata_port_operations cmd648_port_ops = {
28315 .inherits = &cmd64x_base_ops,
28316 .bmdma_stop = cmd648_bmdma_stop,
28317 .cable_detect = cmd648_cable_detect,
28318 diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
28319 index 0df83cf..d7595b0 100644
28320 --- a/drivers/ata/pata_cs5520.c
28321 +++ b/drivers/ata/pata_cs5520.c
28322 @@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_sht = {
28323 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28324 };
28325
28326 -static struct ata_port_operations cs5520_port_ops = {
28327 +static const struct ata_port_operations cs5520_port_ops = {
28328 .inherits = &ata_bmdma_port_ops,
28329 .qc_prep = ata_sff_dumb_qc_prep,
28330 .cable_detect = ata_cable_40wire,
28331 diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
28332 index c974b05..6d26b11 100644
28333 --- a/drivers/ata/pata_cs5530.c
28334 +++ b/drivers/ata/pata_cs5530.c
28335 @@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_sht = {
28336 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28337 };
28338
28339 -static struct ata_port_operations cs5530_port_ops = {
28340 +static const struct ata_port_operations cs5530_port_ops = {
28341 .inherits = &ata_bmdma_port_ops,
28342
28343 .qc_prep = ata_sff_dumb_qc_prep,
28344 diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
28345 index 403f561..aacd26b 100644
28346 --- a/drivers/ata/pata_cs5535.c
28347 +++ b/drivers/ata/pata_cs5535.c
28348 @@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_sht = {
28349 ATA_BMDMA_SHT(DRV_NAME),
28350 };
28351
28352 -static struct ata_port_operations cs5535_port_ops = {
28353 +static const struct ata_port_operations cs5535_port_ops = {
28354 .inherits = &ata_bmdma_port_ops,
28355 .cable_detect = cs5535_cable_detect,
28356 .set_piomode = cs5535_set_piomode,
28357 diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
28358 index 6da4cb4..de24a25 100644
28359 --- a/drivers/ata/pata_cs5536.c
28360 +++ b/drivers/ata/pata_cs5536.c
28361 @@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_sht = {
28362 ATA_BMDMA_SHT(DRV_NAME),
28363 };
28364
28365 -static struct ata_port_operations cs5536_port_ops = {
28366 +static const struct ata_port_operations cs5536_port_ops = {
28367 .inherits = &ata_bmdma_port_ops,
28368 .cable_detect = cs5536_cable_detect,
28369 .set_piomode = cs5536_set_piomode,
28370 diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
28371 index 8fb040b..b16a9c9 100644
28372 --- a/drivers/ata/pata_cypress.c
28373 +++ b/drivers/ata/pata_cypress.c
28374 @@ -113,7 +113,7 @@ static struct scsi_host_template cy82c693_sht = {
28375 ATA_BMDMA_SHT(DRV_NAME),
28376 };
28377
28378 -static struct ata_port_operations cy82c693_port_ops = {
28379 +static const struct ata_port_operations cy82c693_port_ops = {
28380 .inherits = &ata_bmdma_port_ops,
28381 .cable_detect = ata_cable_40wire,
28382 .set_piomode = cy82c693_set_piomode,
28383 diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
28384 index 2a6412f..555ee11 100644
28385 --- a/drivers/ata/pata_efar.c
28386 +++ b/drivers/ata/pata_efar.c
28387 @@ -222,7 +222,7 @@ static struct scsi_host_template efar_sht = {
28388 ATA_BMDMA_SHT(DRV_NAME),
28389 };
28390
28391 -static struct ata_port_operations efar_ops = {
28392 +static const struct ata_port_operations efar_ops = {
28393 .inherits = &ata_bmdma_port_ops,
28394 .cable_detect = efar_cable_detect,
28395 .set_piomode = efar_set_piomode,
28396 diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
28397 index b9d8836..0b92030 100644
28398 --- a/drivers/ata/pata_hpt366.c
28399 +++ b/drivers/ata/pata_hpt366.c
28400 @@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_sht = {
28401 * Configuration for HPT366/68
28402 */
28403
28404 -static struct ata_port_operations hpt366_port_ops = {
28405 +static const struct ata_port_operations hpt366_port_ops = {
28406 .inherits = &ata_bmdma_port_ops,
28407 .cable_detect = hpt36x_cable_detect,
28408 .mode_filter = hpt366_filter,
28409 diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
28410 index 5af7f19..00c4980 100644
28411 --- a/drivers/ata/pata_hpt37x.c
28412 +++ b/drivers/ata/pata_hpt37x.c
28413 @@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_sht = {
28414 * Configuration for HPT370
28415 */
28416
28417 -static struct ata_port_operations hpt370_port_ops = {
28418 +static const struct ata_port_operations hpt370_port_ops = {
28419 .inherits = &ata_bmdma_port_ops,
28420
28421 .bmdma_stop = hpt370_bmdma_stop,
28422 @@ -591,7 +591,7 @@ static struct ata_port_operations hpt370_port_ops = {
28423 * Configuration for HPT370A. Close to 370 but less filters
28424 */
28425
28426 -static struct ata_port_operations hpt370a_port_ops = {
28427 +static const struct ata_port_operations hpt370a_port_ops = {
28428 .inherits = &hpt370_port_ops,
28429 .mode_filter = hpt370a_filter,
28430 };
28431 @@ -601,7 +601,7 @@ static struct ata_port_operations hpt370a_port_ops = {
28432 * and DMA mode setting functionality.
28433 */
28434
28435 -static struct ata_port_operations hpt372_port_ops = {
28436 +static const struct ata_port_operations hpt372_port_ops = {
28437 .inherits = &ata_bmdma_port_ops,
28438
28439 .bmdma_stop = hpt37x_bmdma_stop,
28440 @@ -616,7 +616,7 @@ static struct ata_port_operations hpt372_port_ops = {
28441 * but we have a different cable detection procedure for function 1.
28442 */
28443
28444 -static struct ata_port_operations hpt374_fn1_port_ops = {
28445 +static const struct ata_port_operations hpt374_fn1_port_ops = {
28446 .inherits = &hpt372_port_ops,
28447 .prereset = hpt374_fn1_pre_reset,
28448 };
28449 diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
28450 index 100f227..2e39382 100644
28451 --- a/drivers/ata/pata_hpt3x2n.c
28452 +++ b/drivers/ata/pata_hpt3x2n.c
28453 @@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n_sht = {
28454 * Configuration for HPT3x2n.
28455 */
28456
28457 -static struct ata_port_operations hpt3x2n_port_ops = {
28458 +static const struct ata_port_operations hpt3x2n_port_ops = {
28459 .inherits = &ata_bmdma_port_ops,
28460
28461 .bmdma_stop = hpt3x2n_bmdma_stop,
28462 diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
28463 index 7e31025..6fca8f4 100644
28464 --- a/drivers/ata/pata_hpt3x3.c
28465 +++ b/drivers/ata/pata_hpt3x3.c
28466 @@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_sht = {
28467 ATA_BMDMA_SHT(DRV_NAME),
28468 };
28469
28470 -static struct ata_port_operations hpt3x3_port_ops = {
28471 +static const struct ata_port_operations hpt3x3_port_ops = {
28472 .inherits = &ata_bmdma_port_ops,
28473 .cable_detect = ata_cable_40wire,
28474 .set_piomode = hpt3x3_set_piomode,
28475 diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
28476 index b663b7f..9a26c2a 100644
28477 --- a/drivers/ata/pata_icside.c
28478 +++ b/drivers/ata/pata_icside.c
28479 @@ -319,7 +319,7 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
28480 }
28481 }
28482
28483 -static struct ata_port_operations pata_icside_port_ops = {
28484 +static const struct ata_port_operations pata_icside_port_ops = {
28485 .inherits = &ata_sff_port_ops,
28486 /* no need to build any PRD tables for DMA */
28487 .qc_prep = ata_noop_qc_prep,
28488 diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
28489 index 4bceb88..457dfb6 100644
28490 --- a/drivers/ata/pata_isapnp.c
28491 +++ b/drivers/ata/pata_isapnp.c
28492 @@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_sht = {
28493 ATA_PIO_SHT(DRV_NAME),
28494 };
28495
28496 -static struct ata_port_operations isapnp_port_ops = {
28497 +static const struct ata_port_operations isapnp_port_ops = {
28498 .inherits = &ata_sff_port_ops,
28499 .cable_detect = ata_cable_40wire,
28500 };
28501
28502 -static struct ata_port_operations isapnp_noalt_port_ops = {
28503 +static const struct ata_port_operations isapnp_noalt_port_ops = {
28504 .inherits = &ata_sff_port_ops,
28505 .cable_detect = ata_cable_40wire,
28506 /* No altstatus so we don't want to use the lost interrupt poll */
28507 diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
28508 index f156da8..24976e2 100644
28509 --- a/drivers/ata/pata_it8213.c
28510 +++ b/drivers/ata/pata_it8213.c
28511 @@ -234,7 +234,7 @@ static struct scsi_host_template it8213_sht = {
28512 };
28513
28514
28515 -static struct ata_port_operations it8213_ops = {
28516 +static const struct ata_port_operations it8213_ops = {
28517 .inherits = &ata_bmdma_port_ops,
28518 .cable_detect = it8213_cable_detect,
28519 .set_piomode = it8213_set_piomode,
28520 diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
28521 index 188bc2f..ca9e785 100644
28522 --- a/drivers/ata/pata_it821x.c
28523 +++ b/drivers/ata/pata_it821x.c
28524 @@ -800,7 +800,7 @@ static struct scsi_host_template it821x_sht = {
28525 ATA_BMDMA_SHT(DRV_NAME),
28526 };
28527
28528 -static struct ata_port_operations it821x_smart_port_ops = {
28529 +static const struct ata_port_operations it821x_smart_port_ops = {
28530 .inherits = &ata_bmdma_port_ops,
28531
28532 .check_atapi_dma= it821x_check_atapi_dma,
28533 @@ -814,7 +814,7 @@ static struct ata_port_operations it821x_smart_port_ops = {
28534 .port_start = it821x_port_start,
28535 };
28536
28537 -static struct ata_port_operations it821x_passthru_port_ops = {
28538 +static const struct ata_port_operations it821x_passthru_port_ops = {
28539 .inherits = &ata_bmdma_port_ops,
28540
28541 .check_atapi_dma= it821x_check_atapi_dma,
28542 @@ -830,7 +830,7 @@ static struct ata_port_operations it821x_passthru_port_ops = {
28543 .port_start = it821x_port_start,
28544 };
28545
28546 -static struct ata_port_operations it821x_rdc_port_ops = {
28547 +static const struct ata_port_operations it821x_rdc_port_ops = {
28548 .inherits = &ata_bmdma_port_ops,
28549
28550 .check_atapi_dma= it821x_check_atapi_dma,
28551 diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
28552 index ba54b08..4b952b7 100644
28553 --- a/drivers/ata/pata_ixp4xx_cf.c
28554 +++ b/drivers/ata/pata_ixp4xx_cf.c
28555 @@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_sht = {
28556 ATA_PIO_SHT(DRV_NAME),
28557 };
28558
28559 -static struct ata_port_operations ixp4xx_port_ops = {
28560 +static const struct ata_port_operations ixp4xx_port_ops = {
28561 .inherits = &ata_sff_port_ops,
28562 .sff_data_xfer = ixp4xx_mmio_data_xfer,
28563 .cable_detect = ata_cable_40wire,
28564 diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
28565 index 3a1474a..434b0ff 100644
28566 --- a/drivers/ata/pata_jmicron.c
28567 +++ b/drivers/ata/pata_jmicron.c
28568 @@ -111,7 +111,7 @@ static struct scsi_host_template jmicron_sht = {
28569 ATA_BMDMA_SHT(DRV_NAME),
28570 };
28571
28572 -static struct ata_port_operations jmicron_ops = {
28573 +static const struct ata_port_operations jmicron_ops = {
28574 .inherits = &ata_bmdma_port_ops,
28575 .prereset = jmicron_pre_reset,
28576 };
28577 diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
28578 index 6932e56..220e71d 100644
28579 --- a/drivers/ata/pata_legacy.c
28580 +++ b/drivers/ata/pata_legacy.c
28581 @@ -106,7 +106,7 @@ struct legacy_probe {
28582
28583 struct legacy_controller {
28584 const char *name;
28585 - struct ata_port_operations *ops;
28586 + const struct ata_port_operations *ops;
28587 unsigned int pio_mask;
28588 unsigned int flags;
28589 unsigned int pflags;
28590 @@ -223,12 +223,12 @@ static const struct ata_port_operations legacy_base_port_ops = {
28591 * pio_mask as well.
28592 */
28593
28594 -static struct ata_port_operations simple_port_ops = {
28595 +static const struct ata_port_operations simple_port_ops = {
28596 .inherits = &legacy_base_port_ops,
28597 .sff_data_xfer = ata_sff_data_xfer_noirq,
28598 };
28599
28600 -static struct ata_port_operations legacy_port_ops = {
28601 +static const struct ata_port_operations legacy_port_ops = {
28602 .inherits = &legacy_base_port_ops,
28603 .sff_data_xfer = ata_sff_data_xfer_noirq,
28604 .set_mode = legacy_set_mode,
28605 @@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
28606 return buflen;
28607 }
28608
28609 -static struct ata_port_operations pdc20230_port_ops = {
28610 +static const struct ata_port_operations pdc20230_port_ops = {
28611 .inherits = &legacy_base_port_ops,
28612 .set_piomode = pdc20230_set_piomode,
28613 .sff_data_xfer = pdc_data_xfer_vlb,
28614 @@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
28615 ioread8(ap->ioaddr.status_addr);
28616 }
28617
28618 -static struct ata_port_operations ht6560a_port_ops = {
28619 +static const struct ata_port_operations ht6560a_port_ops = {
28620 .inherits = &legacy_base_port_ops,
28621 .set_piomode = ht6560a_set_piomode,
28622 };
28623 @@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
28624 ioread8(ap->ioaddr.status_addr);
28625 }
28626
28627 -static struct ata_port_operations ht6560b_port_ops = {
28628 +static const struct ata_port_operations ht6560b_port_ops = {
28629 .inherits = &legacy_base_port_ops,
28630 .set_piomode = ht6560b_set_piomode,
28631 };
28632 @@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(struct ata_port *ap,
28633 }
28634
28635
28636 -static struct ata_port_operations opti82c611a_port_ops = {
28637 +static const struct ata_port_operations opti82c611a_port_ops = {
28638 .inherits = &legacy_base_port_ops,
28639 .set_piomode = opti82c611a_set_piomode,
28640 };
28641 @@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc)
28642 return ata_sff_qc_issue(qc);
28643 }
28644
28645 -static struct ata_port_operations opti82c46x_port_ops = {
28646 +static const struct ata_port_operations opti82c46x_port_ops = {
28647 .inherits = &legacy_base_port_ops,
28648 .set_piomode = opti82c46x_set_piomode,
28649 .qc_issue = opti82c46x_qc_issue,
28650 @@ -771,20 +771,20 @@ static int qdi_port(struct platform_device *dev,
28651 return 0;
28652 }
28653
28654 -static struct ata_port_operations qdi6500_port_ops = {
28655 +static const struct ata_port_operations qdi6500_port_ops = {
28656 .inherits = &legacy_base_port_ops,
28657 .set_piomode = qdi6500_set_piomode,
28658 .qc_issue = qdi_qc_issue,
28659 .sff_data_xfer = vlb32_data_xfer,
28660 };
28661
28662 -static struct ata_port_operations qdi6580_port_ops = {
28663 +static const struct ata_port_operations qdi6580_port_ops = {
28664 .inherits = &legacy_base_port_ops,
28665 .set_piomode = qdi6580_set_piomode,
28666 .sff_data_xfer = vlb32_data_xfer,
28667 };
28668
28669 -static struct ata_port_operations qdi6580dp_port_ops = {
28670 +static const struct ata_port_operations qdi6580dp_port_ops = {
28671 .inherits = &legacy_base_port_ops,
28672 .set_piomode = qdi6580dp_set_piomode,
28673 .sff_data_xfer = vlb32_data_xfer,
28674 @@ -855,7 +855,7 @@ static int winbond_port(struct platform_device *dev,
28675 return 0;
28676 }
28677
28678 -static struct ata_port_operations winbond_port_ops = {
28679 +static const struct ata_port_operations winbond_port_ops = {
28680 .inherits = &legacy_base_port_ops,
28681 .set_piomode = winbond_set_piomode,
28682 .sff_data_xfer = vlb32_data_xfer,
28683 @@ -978,7 +978,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
28684 int pio_modes = controller->pio_mask;
28685 unsigned long io = probe->port;
28686 u32 mask = (1 << probe->slot);
28687 - struct ata_port_operations *ops = controller->ops;
28688 + const struct ata_port_operations *ops = controller->ops;
28689 struct legacy_data *ld = &legacy_data[probe->slot];
28690 struct ata_host *host = NULL;
28691 struct ata_port *ap;
28692 diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
28693 index 2096fb7..4d090fc 100644
28694 --- a/drivers/ata/pata_marvell.c
28695 +++ b/drivers/ata/pata_marvell.c
28696 @@ -100,7 +100,7 @@ static struct scsi_host_template marvell_sht = {
28697 ATA_BMDMA_SHT(DRV_NAME),
28698 };
28699
28700 -static struct ata_port_operations marvell_ops = {
28701 +static const struct ata_port_operations marvell_ops = {
28702 .inherits = &ata_bmdma_port_ops,
28703 .cable_detect = marvell_cable_detect,
28704 .prereset = marvell_pre_reset,
28705 diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
28706 index 99d41be..7d56aa8 100644
28707 --- a/drivers/ata/pata_mpc52xx.c
28708 +++ b/drivers/ata/pata_mpc52xx.c
28709 @@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx_ata_sht = {
28710 ATA_PIO_SHT(DRV_NAME),
28711 };
28712
28713 -static struct ata_port_operations mpc52xx_ata_port_ops = {
28714 +static const struct ata_port_operations mpc52xx_ata_port_ops = {
28715 .inherits = &ata_bmdma_port_ops,
28716 .sff_dev_select = mpc52xx_ata_dev_select,
28717 .set_piomode = mpc52xx_ata_set_piomode,
28718 diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
28719 index b21f002..0a27e7f 100644
28720 --- a/drivers/ata/pata_mpiix.c
28721 +++ b/drivers/ata/pata_mpiix.c
28722 @@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_sht = {
28723 ATA_PIO_SHT(DRV_NAME),
28724 };
28725
28726 -static struct ata_port_operations mpiix_port_ops = {
28727 +static const struct ata_port_operations mpiix_port_ops = {
28728 .inherits = &ata_sff_port_ops,
28729 .qc_issue = mpiix_qc_issue,
28730 .cable_detect = ata_cable_40wire,
28731 diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
28732 index f0d52f7..89c3be3 100644
28733 --- a/drivers/ata/pata_netcell.c
28734 +++ b/drivers/ata/pata_netcell.c
28735 @@ -34,7 +34,7 @@ static struct scsi_host_template netcell_sht = {
28736 ATA_BMDMA_SHT(DRV_NAME),
28737 };
28738
28739 -static struct ata_port_operations netcell_ops = {
28740 +static const struct ata_port_operations netcell_ops = {
28741 .inherits = &ata_bmdma_port_ops,
28742 .cable_detect = ata_cable_80wire,
28743 .read_id = netcell_read_id,
28744 diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
28745 index dd53a66..a3f4317 100644
28746 --- a/drivers/ata/pata_ninja32.c
28747 +++ b/drivers/ata/pata_ninja32.c
28748 @@ -81,7 +81,7 @@ static struct scsi_host_template ninja32_sht = {
28749 ATA_BMDMA_SHT(DRV_NAME),
28750 };
28751
28752 -static struct ata_port_operations ninja32_port_ops = {
28753 +static const struct ata_port_operations ninja32_port_ops = {
28754 .inherits = &ata_bmdma_port_ops,
28755 .sff_dev_select = ninja32_dev_select,
28756 .cable_detect = ata_cable_40wire,
28757 diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
28758 index ca53fac..9aa93ef 100644
28759 --- a/drivers/ata/pata_ns87410.c
28760 +++ b/drivers/ata/pata_ns87410.c
28761 @@ -132,7 +132,7 @@ static struct scsi_host_template ns87410_sht = {
28762 ATA_PIO_SHT(DRV_NAME),
28763 };
28764
28765 -static struct ata_port_operations ns87410_port_ops = {
28766 +static const struct ata_port_operations ns87410_port_ops = {
28767 .inherits = &ata_sff_port_ops,
28768 .qc_issue = ns87410_qc_issue,
28769 .cable_detect = ata_cable_40wire,
28770 diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
28771 index 773b159..55f454e 100644
28772 --- a/drivers/ata/pata_ns87415.c
28773 +++ b/drivers/ata/pata_ns87415.c
28774 @@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct ata_port *ap)
28775 }
28776 #endif /* 87560 SuperIO Support */
28777
28778 -static struct ata_port_operations ns87415_pata_ops = {
28779 +static const struct ata_port_operations ns87415_pata_ops = {
28780 .inherits = &ata_bmdma_port_ops,
28781
28782 .check_atapi_dma = ns87415_check_atapi_dma,
28783 @@ -313,7 +313,7 @@ static struct ata_port_operations ns87415_pata_ops = {
28784 };
28785
28786 #if defined(CONFIG_SUPERIO)
28787 -static struct ata_port_operations ns87560_pata_ops = {
28788 +static const struct ata_port_operations ns87560_pata_ops = {
28789 .inherits = &ns87415_pata_ops,
28790 .sff_tf_read = ns87560_tf_read,
28791 .sff_check_status = ns87560_check_status,
28792 diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
28793 index d6f6956..639295b 100644
28794 --- a/drivers/ata/pata_octeon_cf.c
28795 +++ b/drivers/ata/pata_octeon_cf.c
28796 @@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
28797 return 0;
28798 }
28799
28800 +/* cannot be const */
28801 static struct ata_port_operations octeon_cf_ops = {
28802 .inherits = &ata_sff_port_ops,
28803 .check_atapi_dma = octeon_cf_check_atapi_dma,
28804 diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
28805 index 84ac503..adee1cd 100644
28806 --- a/drivers/ata/pata_oldpiix.c
28807 +++ b/drivers/ata/pata_oldpiix.c
28808 @@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix_sht = {
28809 ATA_BMDMA_SHT(DRV_NAME),
28810 };
28811
28812 -static struct ata_port_operations oldpiix_pata_ops = {
28813 +static const struct ata_port_operations oldpiix_pata_ops = {
28814 .inherits = &ata_bmdma_port_ops,
28815 .qc_issue = oldpiix_qc_issue,
28816 .cable_detect = ata_cable_40wire,
28817 diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
28818 index 99eddda..3a4c0aa 100644
28819 --- a/drivers/ata/pata_opti.c
28820 +++ b/drivers/ata/pata_opti.c
28821 @@ -152,7 +152,7 @@ static struct scsi_host_template opti_sht = {
28822 ATA_PIO_SHT(DRV_NAME),
28823 };
28824
28825 -static struct ata_port_operations opti_port_ops = {
28826 +static const struct ata_port_operations opti_port_ops = {
28827 .inherits = &ata_sff_port_ops,
28828 .cable_detect = ata_cable_40wire,
28829 .set_piomode = opti_set_piomode,
28830 diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
28831 index 86885a4..8e9968d 100644
28832 --- a/drivers/ata/pata_optidma.c
28833 +++ b/drivers/ata/pata_optidma.c
28834 @@ -337,7 +337,7 @@ static struct scsi_host_template optidma_sht = {
28835 ATA_BMDMA_SHT(DRV_NAME),
28836 };
28837
28838 -static struct ata_port_operations optidma_port_ops = {
28839 +static const struct ata_port_operations optidma_port_ops = {
28840 .inherits = &ata_bmdma_port_ops,
28841 .cable_detect = ata_cable_40wire,
28842 .set_piomode = optidma_set_pio_mode,
28843 @@ -346,7 +346,7 @@ static struct ata_port_operations optidma_port_ops = {
28844 .prereset = optidma_pre_reset,
28845 };
28846
28847 -static struct ata_port_operations optiplus_port_ops = {
28848 +static const struct ata_port_operations optiplus_port_ops = {
28849 .inherits = &optidma_port_ops,
28850 .set_piomode = optiplus_set_pio_mode,
28851 .set_dmamode = optiplus_set_dma_mode,
28852 diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
28853 index 11fb4cc..1a14022 100644
28854 --- a/drivers/ata/pata_palmld.c
28855 +++ b/drivers/ata/pata_palmld.c
28856 @@ -37,7 +37,7 @@ static struct scsi_host_template palmld_sht = {
28857 ATA_PIO_SHT(DRV_NAME),
28858 };
28859
28860 -static struct ata_port_operations palmld_port_ops = {
28861 +static const struct ata_port_operations palmld_port_ops = {
28862 .inherits = &ata_sff_port_ops,
28863 .sff_data_xfer = ata_sff_data_xfer_noirq,
28864 .cable_detect = ata_cable_40wire,
28865 diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
28866 index dc99e26..7f4b1e4 100644
28867 --- a/drivers/ata/pata_pcmcia.c
28868 +++ b/drivers/ata/pata_pcmcia.c
28869 @@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_sht = {
28870 ATA_PIO_SHT(DRV_NAME),
28871 };
28872
28873 -static struct ata_port_operations pcmcia_port_ops = {
28874 +static const struct ata_port_operations pcmcia_port_ops = {
28875 .inherits = &ata_sff_port_ops,
28876 .sff_data_xfer = ata_sff_data_xfer_noirq,
28877 .cable_detect = ata_cable_40wire,
28878 .set_mode = pcmcia_set_mode,
28879 };
28880
28881 -static struct ata_port_operations pcmcia_8bit_port_ops = {
28882 +static const struct ata_port_operations pcmcia_8bit_port_ops = {
28883 .inherits = &ata_sff_port_ops,
28884 .sff_data_xfer = ata_data_xfer_8bit,
28885 .cable_detect = ata_cable_40wire,
28886 @@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
28887 unsigned long io_base, ctl_base;
28888 void __iomem *io_addr, *ctl_addr;
28889 int n_ports = 1;
28890 - struct ata_port_operations *ops = &pcmcia_port_ops;
28891 + const struct ata_port_operations *ops = &pcmcia_port_ops;
28892
28893 info = kzalloc(sizeof(*info), GFP_KERNEL);
28894 if (info == NULL)
28895 diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
28896 index ca5cad0..3a1f125 100644
28897 --- a/drivers/ata/pata_pdc2027x.c
28898 +++ b/drivers/ata/pata_pdc2027x.c
28899 @@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027x_sht = {
28900 ATA_BMDMA_SHT(DRV_NAME),
28901 };
28902
28903 -static struct ata_port_operations pdc2027x_pata100_ops = {
28904 +static const struct ata_port_operations pdc2027x_pata100_ops = {
28905 .inherits = &ata_bmdma_port_ops,
28906 .check_atapi_dma = pdc2027x_check_atapi_dma,
28907 .cable_detect = pdc2027x_cable_detect,
28908 .prereset = pdc2027x_prereset,
28909 };
28910
28911 -static struct ata_port_operations pdc2027x_pata133_ops = {
28912 +static const struct ata_port_operations pdc2027x_pata133_ops = {
28913 .inherits = &pdc2027x_pata100_ops,
28914 .mode_filter = pdc2027x_mode_filter,
28915 .set_piomode = pdc2027x_set_piomode,
28916 diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
28917 index 2911120..4bf62aa 100644
28918 --- a/drivers/ata/pata_pdc202xx_old.c
28919 +++ b/drivers/ata/pata_pdc202xx_old.c
28920 @@ -274,7 +274,7 @@ static struct scsi_host_template pdc202xx_sht = {
28921 ATA_BMDMA_SHT(DRV_NAME),
28922 };
28923
28924 -static struct ata_port_operations pdc2024x_port_ops = {
28925 +static const struct ata_port_operations pdc2024x_port_ops = {
28926 .inherits = &ata_bmdma_port_ops,
28927
28928 .cable_detect = ata_cable_40wire,
28929 @@ -284,7 +284,7 @@ static struct ata_port_operations pdc2024x_port_ops = {
28930 .sff_exec_command = pdc202xx_exec_command,
28931 };
28932
28933 -static struct ata_port_operations pdc2026x_port_ops = {
28934 +static const struct ata_port_operations pdc2026x_port_ops = {
28935 .inherits = &pdc2024x_port_ops,
28936
28937 .check_atapi_dma = pdc2026x_check_atapi_dma,
28938 diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
28939 index 3f6ebc6..a18c358 100644
28940 --- a/drivers/ata/pata_platform.c
28941 +++ b/drivers/ata/pata_platform.c
28942 @@ -48,7 +48,7 @@ static struct scsi_host_template pata_platform_sht = {
28943 ATA_PIO_SHT(DRV_NAME),
28944 };
28945
28946 -static struct ata_port_operations pata_platform_port_ops = {
28947 +static const struct ata_port_operations pata_platform_port_ops = {
28948 .inherits = &ata_sff_port_ops,
28949 .sff_data_xfer = ata_sff_data_xfer_noirq,
28950 .cable_detect = ata_cable_unknown,
28951 diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
28952 index 45879dc..165a9f9 100644
28953 --- a/drivers/ata/pata_qdi.c
28954 +++ b/drivers/ata/pata_qdi.c
28955 @@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht = {
28956 ATA_PIO_SHT(DRV_NAME),
28957 };
28958
28959 -static struct ata_port_operations qdi6500_port_ops = {
28960 +static const struct ata_port_operations qdi6500_port_ops = {
28961 .inherits = &ata_sff_port_ops,
28962 .qc_issue = qdi_qc_issue,
28963 .sff_data_xfer = qdi_data_xfer,
28964 @@ -165,7 +165,7 @@ static struct ata_port_operations qdi6500_port_ops = {
28965 .set_piomode = qdi6500_set_piomode,
28966 };
28967
28968 -static struct ata_port_operations qdi6580_port_ops = {
28969 +static const struct ata_port_operations qdi6580_port_ops = {
28970 .inherits = &qdi6500_port_ops,
28971 .set_piomode = qdi6580_set_piomode,
28972 };
28973 diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
28974 index 4401b33..716c5cc 100644
28975 --- a/drivers/ata/pata_radisys.c
28976 +++ b/drivers/ata/pata_radisys.c
28977 @@ -187,7 +187,7 @@ static struct scsi_host_template radisys_sht = {
28978 ATA_BMDMA_SHT(DRV_NAME),
28979 };
28980
28981 -static struct ata_port_operations radisys_pata_ops = {
28982 +static const struct ata_port_operations radisys_pata_ops = {
28983 .inherits = &ata_bmdma_port_ops,
28984 .qc_issue = radisys_qc_issue,
28985 .cable_detect = ata_cable_unknown,
28986 diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
28987 index 45f1e10..fab6bca 100644
28988 --- a/drivers/ata/pata_rb532_cf.c
28989 +++ b/drivers/ata/pata_rb532_cf.c
28990 @@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
28991 return IRQ_HANDLED;
28992 }
28993
28994 -static struct ata_port_operations rb532_pata_port_ops = {
28995 +static const struct ata_port_operations rb532_pata_port_ops = {
28996 .inherits = &ata_sff_port_ops,
28997 .sff_data_xfer = ata_sff_data_xfer32,
28998 };
28999 diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
29000 index c843a1e..b5853c3 100644
29001 --- a/drivers/ata/pata_rdc.c
29002 +++ b/drivers/ata/pata_rdc.c
29003 @@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
29004 pci_write_config_byte(dev, 0x48, udma_enable);
29005 }
29006
29007 -static struct ata_port_operations rdc_pata_ops = {
29008 +static const struct ata_port_operations rdc_pata_ops = {
29009 .inherits = &ata_bmdma32_port_ops,
29010 .cable_detect = rdc_pata_cable_detect,
29011 .set_piomode = rdc_set_piomode,
29012 diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
29013 index a5e4dfe..080c8c9 100644
29014 --- a/drivers/ata/pata_rz1000.c
29015 +++ b/drivers/ata/pata_rz1000.c
29016 @@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_sht = {
29017 ATA_PIO_SHT(DRV_NAME),
29018 };
29019
29020 -static struct ata_port_operations rz1000_port_ops = {
29021 +static const struct ata_port_operations rz1000_port_ops = {
29022 .inherits = &ata_sff_port_ops,
29023 .cable_detect = ata_cable_40wire,
29024 .set_mode = rz1000_set_mode,
29025 diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
29026 index 3bbed83..e309daf 100644
29027 --- a/drivers/ata/pata_sc1200.c
29028 +++ b/drivers/ata/pata_sc1200.c
29029 @@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_sht = {
29030 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
29031 };
29032
29033 -static struct ata_port_operations sc1200_port_ops = {
29034 +static const struct ata_port_operations sc1200_port_ops = {
29035 .inherits = &ata_bmdma_port_ops,
29036 .qc_prep = ata_sff_dumb_qc_prep,
29037 .qc_issue = sc1200_qc_issue,
29038 diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
29039 index 4257d6b..4c1d9d5 100644
29040 --- a/drivers/ata/pata_scc.c
29041 +++ b/drivers/ata/pata_scc.c
29042 @@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht = {
29043 ATA_BMDMA_SHT(DRV_NAME),
29044 };
29045
29046 -static struct ata_port_operations scc_pata_ops = {
29047 +static const struct ata_port_operations scc_pata_ops = {
29048 .inherits = &ata_bmdma_port_ops,
29049
29050 .set_piomode = scc_set_piomode,
29051 diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
29052 index 99cceb4..e2e0a87 100644
29053 --- a/drivers/ata/pata_sch.c
29054 +++ b/drivers/ata/pata_sch.c
29055 @@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht = {
29056 ATA_BMDMA_SHT(DRV_NAME),
29057 };
29058
29059 -static struct ata_port_operations sch_pata_ops = {
29060 +static const struct ata_port_operations sch_pata_ops = {
29061 .inherits = &ata_bmdma_port_ops,
29062 .cable_detect = ata_cable_unknown,
29063 .set_piomode = sch_set_piomode,
29064 diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
29065 index beaed12..39969f1 100644
29066 --- a/drivers/ata/pata_serverworks.c
29067 +++ b/drivers/ata/pata_serverworks.c
29068 @@ -299,7 +299,7 @@ static struct scsi_host_template serverworks_sht = {
29069 ATA_BMDMA_SHT(DRV_NAME),
29070 };
29071
29072 -static struct ata_port_operations serverworks_osb4_port_ops = {
29073 +static const struct ata_port_operations serverworks_osb4_port_ops = {
29074 .inherits = &ata_bmdma_port_ops,
29075 .cable_detect = serverworks_cable_detect,
29076 .mode_filter = serverworks_osb4_filter,
29077 @@ -307,7 +307,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
29078 .set_dmamode = serverworks_set_dmamode,
29079 };
29080
29081 -static struct ata_port_operations serverworks_csb_port_ops = {
29082 +static const struct ata_port_operations serverworks_csb_port_ops = {
29083 .inherits = &serverworks_osb4_port_ops,
29084 .mode_filter = serverworks_csb_filter,
29085 };
29086 diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
29087 index a2ace48..0463b44 100644
29088 --- a/drivers/ata/pata_sil680.c
29089 +++ b/drivers/ata/pata_sil680.c
29090 @@ -194,7 +194,7 @@ static struct scsi_host_template sil680_sht = {
29091 ATA_BMDMA_SHT(DRV_NAME),
29092 };
29093
29094 -static struct ata_port_operations sil680_port_ops = {
29095 +static const struct ata_port_operations sil680_port_ops = {
29096 .inherits = &ata_bmdma32_port_ops,
29097 .cable_detect = sil680_cable_detect,
29098 .set_piomode = sil680_set_piomode,
29099 diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
29100 index 488e77b..b3724d5 100644
29101 --- a/drivers/ata/pata_sis.c
29102 +++ b/drivers/ata/pata_sis.c
29103 @@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht = {
29104 ATA_BMDMA_SHT(DRV_NAME),
29105 };
29106
29107 -static struct ata_port_operations sis_133_for_sata_ops = {
29108 +static const struct ata_port_operations sis_133_for_sata_ops = {
29109 .inherits = &ata_bmdma_port_ops,
29110 .set_piomode = sis_133_set_piomode,
29111 .set_dmamode = sis_133_set_dmamode,
29112 .cable_detect = sis_133_cable_detect,
29113 };
29114
29115 -static struct ata_port_operations sis_base_ops = {
29116 +static const struct ata_port_operations sis_base_ops = {
29117 .inherits = &ata_bmdma_port_ops,
29118 .prereset = sis_pre_reset,
29119 };
29120
29121 -static struct ata_port_operations sis_133_ops = {
29122 +static const struct ata_port_operations sis_133_ops = {
29123 .inherits = &sis_base_ops,
29124 .set_piomode = sis_133_set_piomode,
29125 .set_dmamode = sis_133_set_dmamode,
29126 .cable_detect = sis_133_cable_detect,
29127 };
29128
29129 -static struct ata_port_operations sis_133_early_ops = {
29130 +static const struct ata_port_operations sis_133_early_ops = {
29131 .inherits = &sis_base_ops,
29132 .set_piomode = sis_100_set_piomode,
29133 .set_dmamode = sis_133_early_set_dmamode,
29134 .cable_detect = sis_66_cable_detect,
29135 };
29136
29137 -static struct ata_port_operations sis_100_ops = {
29138 +static const struct ata_port_operations sis_100_ops = {
29139 .inherits = &sis_base_ops,
29140 .set_piomode = sis_100_set_piomode,
29141 .set_dmamode = sis_100_set_dmamode,
29142 .cable_detect = sis_66_cable_detect,
29143 };
29144
29145 -static struct ata_port_operations sis_66_ops = {
29146 +static const struct ata_port_operations sis_66_ops = {
29147 .inherits = &sis_base_ops,
29148 .set_piomode = sis_old_set_piomode,
29149 .set_dmamode = sis_66_set_dmamode,
29150 .cable_detect = sis_66_cable_detect,
29151 };
29152
29153 -static struct ata_port_operations sis_old_ops = {
29154 +static const struct ata_port_operations sis_old_ops = {
29155 .inherits = &sis_base_ops,
29156 .set_piomode = sis_old_set_piomode,
29157 .set_dmamode = sis_old_set_dmamode,
29158 diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
29159 index 29f733c..43e9ca0 100644
29160 --- a/drivers/ata/pata_sl82c105.c
29161 +++ b/drivers/ata/pata_sl82c105.c
29162 @@ -231,7 +231,7 @@ static struct scsi_host_template sl82c105_sht = {
29163 ATA_BMDMA_SHT(DRV_NAME),
29164 };
29165
29166 -static struct ata_port_operations sl82c105_port_ops = {
29167 +static const struct ata_port_operations sl82c105_port_ops = {
29168 .inherits = &ata_bmdma_port_ops,
29169 .qc_defer = sl82c105_qc_defer,
29170 .bmdma_start = sl82c105_bmdma_start,
29171 diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
29172 index f1f13ff..df39e99 100644
29173 --- a/drivers/ata/pata_triflex.c
29174 +++ b/drivers/ata/pata_triflex.c
29175 @@ -178,7 +178,7 @@ static struct scsi_host_template triflex_sht = {
29176 ATA_BMDMA_SHT(DRV_NAME),
29177 };
29178
29179 -static struct ata_port_operations triflex_port_ops = {
29180 +static const struct ata_port_operations triflex_port_ops = {
29181 .inherits = &ata_bmdma_port_ops,
29182 .bmdma_start = triflex_bmdma_start,
29183 .bmdma_stop = triflex_bmdma_stop,
29184 diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
29185 index 1d73b8d..98a4b29 100644
29186 --- a/drivers/ata/pata_via.c
29187 +++ b/drivers/ata/pata_via.c
29188 @@ -419,7 +419,7 @@ static struct scsi_host_template via_sht = {
29189 ATA_BMDMA_SHT(DRV_NAME),
29190 };
29191
29192 -static struct ata_port_operations via_port_ops = {
29193 +static const struct ata_port_operations via_port_ops = {
29194 .inherits = &ata_bmdma_port_ops,
29195 .cable_detect = via_cable_detect,
29196 .set_piomode = via_set_piomode,
29197 @@ -429,7 +429,7 @@ static struct ata_port_operations via_port_ops = {
29198 .port_start = via_port_start,
29199 };
29200
29201 -static struct ata_port_operations via_port_ops_noirq = {
29202 +static const struct ata_port_operations via_port_ops_noirq = {
29203 .inherits = &via_port_ops,
29204 .sff_data_xfer = ata_sff_data_xfer_noirq,
29205 };
29206 diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
29207 index 6d8619b..ad511c4 100644
29208 --- a/drivers/ata/pata_winbond.c
29209 +++ b/drivers/ata/pata_winbond.c
29210 @@ -125,7 +125,7 @@ static struct scsi_host_template winbond_sht = {
29211 ATA_PIO_SHT(DRV_NAME),
29212 };
29213
29214 -static struct ata_port_operations winbond_port_ops = {
29215 +static const struct ata_port_operations winbond_port_ops = {
29216 .inherits = &ata_sff_port_ops,
29217 .sff_data_xfer = winbond_data_xfer,
29218 .cable_detect = ata_cable_40wire,
29219 diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
29220 index 6c65b07..f996ec7 100644
29221 --- a/drivers/ata/pdc_adma.c
29222 +++ b/drivers/ata/pdc_adma.c
29223 @@ -145,7 +145,7 @@ static struct scsi_host_template adma_ata_sht = {
29224 .dma_boundary = ADMA_DMA_BOUNDARY,
29225 };
29226
29227 -static struct ata_port_operations adma_ata_ops = {
29228 +static const struct ata_port_operations adma_ata_ops = {
29229 .inherits = &ata_sff_port_ops,
29230
29231 .lost_interrupt = ATA_OP_NULL,
29232 diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
29233 index 172b57e..c49bc1e 100644
29234 --- a/drivers/ata/sata_fsl.c
29235 +++ b/drivers/ata/sata_fsl.c
29236 @@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fsl_sht = {
29237 .dma_boundary = ATA_DMA_BOUNDARY,
29238 };
29239
29240 -static struct ata_port_operations sata_fsl_ops = {
29241 +static const struct ata_port_operations sata_fsl_ops = {
29242 .inherits = &sata_pmp_port_ops,
29243
29244 .qc_defer = ata_std_qc_defer,
29245 diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
29246 index 4406902..60603ef 100644
29247 --- a/drivers/ata/sata_inic162x.c
29248 +++ b/drivers/ata/sata_inic162x.c
29249 @@ -721,7 +721,7 @@ static int inic_port_start(struct ata_port *ap)
29250 return 0;
29251 }
29252
29253 -static struct ata_port_operations inic_port_ops = {
29254 +static const struct ata_port_operations inic_port_ops = {
29255 .inherits = &sata_port_ops,
29256
29257 .check_atapi_dma = inic_check_atapi_dma,
29258 diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
29259 index cf41126..8107be6 100644
29260 --- a/drivers/ata/sata_mv.c
29261 +++ b/drivers/ata/sata_mv.c
29262 @@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht = {
29263 .dma_boundary = MV_DMA_BOUNDARY,
29264 };
29265
29266 -static struct ata_port_operations mv5_ops = {
29267 +static const struct ata_port_operations mv5_ops = {
29268 .inherits = &ata_sff_port_ops,
29269
29270 .lost_interrupt = ATA_OP_NULL,
29271 @@ -678,7 +678,7 @@ static struct ata_port_operations mv5_ops = {
29272 .port_stop = mv_port_stop,
29273 };
29274
29275 -static struct ata_port_operations mv6_ops = {
29276 +static const struct ata_port_operations mv6_ops = {
29277 .inherits = &mv5_ops,
29278 .dev_config = mv6_dev_config,
29279 .scr_read = mv_scr_read,
29280 @@ -698,7 +698,7 @@ static struct ata_port_operations mv6_ops = {
29281 .bmdma_status = mv_bmdma_status,
29282 };
29283
29284 -static struct ata_port_operations mv_iie_ops = {
29285 +static const struct ata_port_operations mv_iie_ops = {
29286 .inherits = &mv6_ops,
29287 .dev_config = ATA_OP_NULL,
29288 .qc_prep = mv_qc_prep_iie,
29289 diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
29290 index ae2297c..d5c9c33 100644
29291 --- a/drivers/ata/sata_nv.c
29292 +++ b/drivers/ata/sata_nv.c
29293 @@ -464,7 +464,7 @@ static struct scsi_host_template nv_swncq_sht = {
29294 * cases. Define nv_hardreset() which only kicks in for post-boot
29295 * probing and use it for all variants.
29296 */
29297 -static struct ata_port_operations nv_generic_ops = {
29298 +static const struct ata_port_operations nv_generic_ops = {
29299 .inherits = &ata_bmdma_port_ops,
29300 .lost_interrupt = ATA_OP_NULL,
29301 .scr_read = nv_scr_read,
29302 @@ -472,20 +472,20 @@ static struct ata_port_operations nv_generic_ops = {
29303 .hardreset = nv_hardreset,
29304 };
29305
29306 -static struct ata_port_operations nv_nf2_ops = {
29307 +static const struct ata_port_operations nv_nf2_ops = {
29308 .inherits = &nv_generic_ops,
29309 .freeze = nv_nf2_freeze,
29310 .thaw = nv_nf2_thaw,
29311 };
29312
29313 -static struct ata_port_operations nv_ck804_ops = {
29314 +static const struct ata_port_operations nv_ck804_ops = {
29315 .inherits = &nv_generic_ops,
29316 .freeze = nv_ck804_freeze,
29317 .thaw = nv_ck804_thaw,
29318 .host_stop = nv_ck804_host_stop,
29319 };
29320
29321 -static struct ata_port_operations nv_adma_ops = {
29322 +static const struct ata_port_operations nv_adma_ops = {
29323 .inherits = &nv_ck804_ops,
29324
29325 .check_atapi_dma = nv_adma_check_atapi_dma,
29326 @@ -509,7 +509,7 @@ static struct ata_port_operations nv_adma_ops = {
29327 .host_stop = nv_adma_host_stop,
29328 };
29329
29330 -static struct ata_port_operations nv_swncq_ops = {
29331 +static const struct ata_port_operations nv_swncq_ops = {
29332 .inherits = &nv_generic_ops,
29333
29334 .qc_defer = ata_std_qc_defer,
29335 diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
29336 index 07d8d00..6cc70bb 100644
29337 --- a/drivers/ata/sata_promise.c
29338 +++ b/drivers/ata/sata_promise.c
29339 @@ -195,7 +195,7 @@ static const struct ata_port_operations pdc_common_ops = {
29340 .error_handler = pdc_error_handler,
29341 };
29342
29343 -static struct ata_port_operations pdc_sata_ops = {
29344 +static const struct ata_port_operations pdc_sata_ops = {
29345 .inherits = &pdc_common_ops,
29346 .cable_detect = pdc_sata_cable_detect,
29347 .freeze = pdc_sata_freeze,
29348 @@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sata_ops = {
29349
29350 /* First-generation chips need a more restrictive ->check_atapi_dma op,
29351 and ->freeze/thaw that ignore the hotplug controls. */
29352 -static struct ata_port_operations pdc_old_sata_ops = {
29353 +static const struct ata_port_operations pdc_old_sata_ops = {
29354 .inherits = &pdc_sata_ops,
29355 .freeze = pdc_freeze,
29356 .thaw = pdc_thaw,
29357 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
29358 };
29359
29360 -static struct ata_port_operations pdc_pata_ops = {
29361 +static const struct ata_port_operations pdc_pata_ops = {
29362 .inherits = &pdc_common_ops,
29363 .cable_detect = pdc_pata_cable_detect,
29364 .freeze = pdc_freeze,
29365 diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
29366 index 326c0cf..36ecebe 100644
29367 --- a/drivers/ata/sata_qstor.c
29368 +++ b/drivers/ata/sata_qstor.c
29369 @@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_sht = {
29370 .dma_boundary = QS_DMA_BOUNDARY,
29371 };
29372
29373 -static struct ata_port_operations qs_ata_ops = {
29374 +static const struct ata_port_operations qs_ata_ops = {
29375 .inherits = &ata_sff_port_ops,
29376
29377 .check_atapi_dma = qs_check_atapi_dma,
29378 diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
29379 index 3cb69d5..0871d3c 100644
29380 --- a/drivers/ata/sata_sil.c
29381 +++ b/drivers/ata/sata_sil.c
29382 @@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht = {
29383 .sg_tablesize = ATA_MAX_PRD
29384 };
29385
29386 -static struct ata_port_operations sil_ops = {
29387 +static const struct ata_port_operations sil_ops = {
29388 .inherits = &ata_bmdma32_port_ops,
29389 .dev_config = sil_dev_config,
29390 .set_mode = sil_set_mode,
29391 diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
29392 index e6946fc..eddb794 100644
29393 --- a/drivers/ata/sata_sil24.c
29394 +++ b/drivers/ata/sata_sil24.c
29395 @@ -388,7 +388,7 @@ static struct scsi_host_template sil24_sht = {
29396 .dma_boundary = ATA_DMA_BOUNDARY,
29397 };
29398
29399 -static struct ata_port_operations sil24_ops = {
29400 +static const struct ata_port_operations sil24_ops = {
29401 .inherits = &sata_pmp_port_ops,
29402
29403 .qc_defer = sil24_qc_defer,
29404 diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
29405 index f8a91bf..9cb06b6 100644
29406 --- a/drivers/ata/sata_sis.c
29407 +++ b/drivers/ata/sata_sis.c
29408 @@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht = {
29409 ATA_BMDMA_SHT(DRV_NAME),
29410 };
29411
29412 -static struct ata_port_operations sis_ops = {
29413 +static const struct ata_port_operations sis_ops = {
29414 .inherits = &ata_bmdma_port_ops,
29415 .scr_read = sis_scr_read,
29416 .scr_write = sis_scr_write,
29417 diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
29418 index 7257f2d..d04c6f5 100644
29419 --- a/drivers/ata/sata_svw.c
29420 +++ b/drivers/ata/sata_svw.c
29421 @@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata_sht = {
29422 };
29423
29424
29425 -static struct ata_port_operations k2_sata_ops = {
29426 +static const struct ata_port_operations k2_sata_ops = {
29427 .inherits = &ata_bmdma_port_ops,
29428 .sff_tf_load = k2_sata_tf_load,
29429 .sff_tf_read = k2_sata_tf_read,
29430 diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
29431 index bbcf970..cd0df0d 100644
29432 --- a/drivers/ata/sata_sx4.c
29433 +++ b/drivers/ata/sata_sx4.c
29434 @@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sata_sht = {
29435 };
29436
29437 /* TODO: inherit from base port_ops after converting to new EH */
29438 -static struct ata_port_operations pdc_20621_ops = {
29439 +static const struct ata_port_operations pdc_20621_ops = {
29440 .inherits = &ata_sff_port_ops,
29441
29442 .check_atapi_dma = pdc_check_atapi_dma,
29443 diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
29444 index e5bff47..089d859 100644
29445 --- a/drivers/ata/sata_uli.c
29446 +++ b/drivers/ata/sata_uli.c
29447 @@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht = {
29448 ATA_BMDMA_SHT(DRV_NAME),
29449 };
29450
29451 -static struct ata_port_operations uli_ops = {
29452 +static const struct ata_port_operations uli_ops = {
29453 .inherits = &ata_bmdma_port_ops,
29454 .scr_read = uli_scr_read,
29455 .scr_write = uli_scr_write,
29456 diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
29457 index f5dcca7..77b94eb 100644
29458 --- a/drivers/ata/sata_via.c
29459 +++ b/drivers/ata/sata_via.c
29460 @@ -115,32 +115,32 @@ static struct scsi_host_template svia_sht = {
29461 ATA_BMDMA_SHT(DRV_NAME),
29462 };
29463
29464 -static struct ata_port_operations svia_base_ops = {
29465 +static const struct ata_port_operations svia_base_ops = {
29466 .inherits = &ata_bmdma_port_ops,
29467 .sff_tf_load = svia_tf_load,
29468 };
29469
29470 -static struct ata_port_operations vt6420_sata_ops = {
29471 +static const struct ata_port_operations vt6420_sata_ops = {
29472 .inherits = &svia_base_ops,
29473 .freeze = svia_noop_freeze,
29474 .prereset = vt6420_prereset,
29475 .bmdma_start = vt6420_bmdma_start,
29476 };
29477
29478 -static struct ata_port_operations vt6421_pata_ops = {
29479 +static const struct ata_port_operations vt6421_pata_ops = {
29480 .inherits = &svia_base_ops,
29481 .cable_detect = vt6421_pata_cable_detect,
29482 .set_piomode = vt6421_set_pio_mode,
29483 .set_dmamode = vt6421_set_dma_mode,
29484 };
29485
29486 -static struct ata_port_operations vt6421_sata_ops = {
29487 +static const struct ata_port_operations vt6421_sata_ops = {
29488 .inherits = &svia_base_ops,
29489 .scr_read = svia_scr_read,
29490 .scr_write = svia_scr_write,
29491 };
29492
29493 -static struct ata_port_operations vt8251_ops = {
29494 +static const struct ata_port_operations vt8251_ops = {
29495 .inherits = &svia_base_ops,
29496 .hardreset = sata_std_hardreset,
29497 .scr_read = vt8251_scr_read,
29498 diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
29499 index 8b2a278..51e65d3 100644
29500 --- a/drivers/ata/sata_vsc.c
29501 +++ b/drivers/ata/sata_vsc.c
29502 @@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sata_sht = {
29503 };
29504
29505
29506 -static struct ata_port_operations vsc_sata_ops = {
29507 +static const struct ata_port_operations vsc_sata_ops = {
29508 .inherits = &ata_bmdma_port_ops,
29509 /* The IRQ handling is not quite standard SFF behaviour so we
29510 cannot use the default lost interrupt handler */
29511 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
29512 index 5effec6..7e4019a 100644
29513 --- a/drivers/atm/adummy.c
29514 +++ b/drivers/atm/adummy.c
29515 @@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
29516 vcc->pop(vcc, skb);
29517 else
29518 dev_kfree_skb_any(skb);
29519 - atomic_inc(&vcc->stats->tx);
29520 + atomic_inc_unchecked(&vcc->stats->tx);
29521
29522 return 0;
29523 }
29524 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
29525 index 66e1813..26a27c6 100644
29526 --- a/drivers/atm/ambassador.c
29527 +++ b/drivers/atm/ambassador.c
29528 @@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
29529 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
29530
29531 // VC layer stats
29532 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29533 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29534
29535 // free the descriptor
29536 kfree (tx_descr);
29537 @@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29538 dump_skb ("<<<", vc, skb);
29539
29540 // VC layer stats
29541 - atomic_inc(&atm_vcc->stats->rx);
29542 + atomic_inc_unchecked(&atm_vcc->stats->rx);
29543 __net_timestamp(skb);
29544 // end of our responsability
29545 atm_vcc->push (atm_vcc, skb);
29546 @@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29547 } else {
29548 PRINTK (KERN_INFO, "dropped over-size frame");
29549 // should we count this?
29550 - atomic_inc(&atm_vcc->stats->rx_drop);
29551 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29552 }
29553
29554 } else {
29555 @@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
29556 }
29557
29558 if (check_area (skb->data, skb->len)) {
29559 - atomic_inc(&atm_vcc->stats->tx_err);
29560 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
29561 return -ENOMEM; // ?
29562 }
29563
29564 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
29565 index 02ad83d..6daffeb 100644
29566 --- a/drivers/atm/atmtcp.c
29567 +++ b/drivers/atm/atmtcp.c
29568 @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29569 if (vcc->pop) vcc->pop(vcc,skb);
29570 else dev_kfree_skb(skb);
29571 if (dev_data) return 0;
29572 - atomic_inc(&vcc->stats->tx_err);
29573 + atomic_inc_unchecked(&vcc->stats->tx_err);
29574 return -ENOLINK;
29575 }
29576 size = skb->len+sizeof(struct atmtcp_hdr);
29577 @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29578 if (!new_skb) {
29579 if (vcc->pop) vcc->pop(vcc,skb);
29580 else dev_kfree_skb(skb);
29581 - atomic_inc(&vcc->stats->tx_err);
29582 + atomic_inc_unchecked(&vcc->stats->tx_err);
29583 return -ENOBUFS;
29584 }
29585 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
29586 @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29587 if (vcc->pop) vcc->pop(vcc,skb);
29588 else dev_kfree_skb(skb);
29589 out_vcc->push(out_vcc,new_skb);
29590 - atomic_inc(&vcc->stats->tx);
29591 - atomic_inc(&out_vcc->stats->rx);
29592 + atomic_inc_unchecked(&vcc->stats->tx);
29593 + atomic_inc_unchecked(&out_vcc->stats->rx);
29594 return 0;
29595 }
29596
29597 @@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29598 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
29599 read_unlock(&vcc_sklist_lock);
29600 if (!out_vcc) {
29601 - atomic_inc(&vcc->stats->tx_err);
29602 + atomic_inc_unchecked(&vcc->stats->tx_err);
29603 goto done;
29604 }
29605 skb_pull(skb,sizeof(struct atmtcp_hdr));
29606 @@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29607 __net_timestamp(new_skb);
29608 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
29609 out_vcc->push(out_vcc,new_skb);
29610 - atomic_inc(&vcc->stats->tx);
29611 - atomic_inc(&out_vcc->stats->rx);
29612 + atomic_inc_unchecked(&vcc->stats->tx);
29613 + atomic_inc_unchecked(&out_vcc->stats->rx);
29614 done:
29615 if (vcc->pop) vcc->pop(vcc,skb);
29616 else dev_kfree_skb(skb);
29617 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
29618 index 0c30261..3da356e 100644
29619 --- a/drivers/atm/eni.c
29620 +++ b/drivers/atm/eni.c
29621 @@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
29622 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
29623 vcc->dev->number);
29624 length = 0;
29625 - atomic_inc(&vcc->stats->rx_err);
29626 + atomic_inc_unchecked(&vcc->stats->rx_err);
29627 }
29628 else {
29629 length = ATM_CELL_SIZE-1; /* no HEC */
29630 @@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29631 size);
29632 }
29633 eff = length = 0;
29634 - atomic_inc(&vcc->stats->rx_err);
29635 + atomic_inc_unchecked(&vcc->stats->rx_err);
29636 }
29637 else {
29638 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
29639 @@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29640 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
29641 vcc->dev->number,vcc->vci,length,size << 2,descr);
29642 length = eff = 0;
29643 - atomic_inc(&vcc->stats->rx_err);
29644 + atomic_inc_unchecked(&vcc->stats->rx_err);
29645 }
29646 }
29647 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
29648 @@ -770,7 +770,7 @@ rx_dequeued++;
29649 vcc->push(vcc,skb);
29650 pushed++;
29651 }
29652 - atomic_inc(&vcc->stats->rx);
29653 + atomic_inc_unchecked(&vcc->stats->rx);
29654 }
29655 wake_up(&eni_dev->rx_wait);
29656 }
29657 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
29658 PCI_DMA_TODEVICE);
29659 if (vcc->pop) vcc->pop(vcc,skb);
29660 else dev_kfree_skb_irq(skb);
29661 - atomic_inc(&vcc->stats->tx);
29662 + atomic_inc_unchecked(&vcc->stats->tx);
29663 wake_up(&eni_dev->tx_wait);
29664 dma_complete++;
29665 }
29666 @@ -1570,7 +1570,7 @@ tx_complete++;
29667 /*--------------------------------- entries ---------------------------------*/
29668
29669
29670 -static const char *media_name[] __devinitdata = {
29671 +static const char *media_name[] __devinitconst = {
29672 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
29673 "UTP", "05?", "06?", "07?", /* 4- 7 */
29674 "TAXI","09?", "10?", "11?", /* 8-11 */
29675 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
29676 index cd5049a..a51209f 100644
29677 --- a/drivers/atm/firestream.c
29678 +++ b/drivers/atm/firestream.c
29679 @@ -748,7 +748,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
29680 }
29681 }
29682
29683 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29684 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29685
29686 fs_dprintk (FS_DEBUG_TXMEM, "i");
29687 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
29688 @@ -815,7 +815,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29689 #endif
29690 skb_put (skb, qe->p1 & 0xffff);
29691 ATM_SKB(skb)->vcc = atm_vcc;
29692 - atomic_inc(&atm_vcc->stats->rx);
29693 + atomic_inc_unchecked(&atm_vcc->stats->rx);
29694 __net_timestamp(skb);
29695 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
29696 atm_vcc->push (atm_vcc, skb);
29697 @@ -836,12 +836,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29698 kfree (pe);
29699 }
29700 if (atm_vcc)
29701 - atomic_inc(&atm_vcc->stats->rx_drop);
29702 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29703 break;
29704 case 0x1f: /* Reassembly abort: no buffers. */
29705 /* Silently increment error counter. */
29706 if (atm_vcc)
29707 - atomic_inc(&atm_vcc->stats->rx_drop);
29708 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29709 break;
29710 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
29711 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
29712 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
29713 index f766cc4..a34002e 100644
29714 --- a/drivers/atm/fore200e.c
29715 +++ b/drivers/atm/fore200e.c
29716 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
29717 #endif
29718 /* check error condition */
29719 if (*entry->status & STATUS_ERROR)
29720 - atomic_inc(&vcc->stats->tx_err);
29721 + atomic_inc_unchecked(&vcc->stats->tx_err);
29722 else
29723 - atomic_inc(&vcc->stats->tx);
29724 + atomic_inc_unchecked(&vcc->stats->tx);
29725 }
29726 }
29727
29728 @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29729 if (skb == NULL) {
29730 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
29731
29732 - atomic_inc(&vcc->stats->rx_drop);
29733 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29734 return -ENOMEM;
29735 }
29736
29737 @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29738
29739 dev_kfree_skb_any(skb);
29740
29741 - atomic_inc(&vcc->stats->rx_drop);
29742 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29743 return -ENOMEM;
29744 }
29745
29746 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29747
29748 vcc->push(vcc, skb);
29749 - atomic_inc(&vcc->stats->rx);
29750 + atomic_inc_unchecked(&vcc->stats->rx);
29751
29752 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29753
29754 @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
29755 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
29756 fore200e->atm_dev->number,
29757 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
29758 - atomic_inc(&vcc->stats->rx_err);
29759 + atomic_inc_unchecked(&vcc->stats->rx_err);
29760 }
29761 }
29762
29763 @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
29764 goto retry_here;
29765 }
29766
29767 - atomic_inc(&vcc->stats->tx_err);
29768 + atomic_inc_unchecked(&vcc->stats->tx_err);
29769
29770 fore200e->tx_sat++;
29771 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
29772 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
29773 index 7066703..2b130de 100644
29774 --- a/drivers/atm/he.c
29775 +++ b/drivers/atm/he.c
29776 @@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29777
29778 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
29779 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
29780 - atomic_inc(&vcc->stats->rx_drop);
29781 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29782 goto return_host_buffers;
29783 }
29784
29785 @@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29786 RBRQ_LEN_ERR(he_dev->rbrq_head)
29787 ? "LEN_ERR" : "",
29788 vcc->vpi, vcc->vci);
29789 - atomic_inc(&vcc->stats->rx_err);
29790 + atomic_inc_unchecked(&vcc->stats->rx_err);
29791 goto return_host_buffers;
29792 }
29793
29794 @@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29795 vcc->push(vcc, skb);
29796 spin_lock(&he_dev->global_lock);
29797
29798 - atomic_inc(&vcc->stats->rx);
29799 + atomic_inc_unchecked(&vcc->stats->rx);
29800
29801 return_host_buffers:
29802 ++pdus_assembled;
29803 @@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
29804 tpd->vcc->pop(tpd->vcc, tpd->skb);
29805 else
29806 dev_kfree_skb_any(tpd->skb);
29807 - atomic_inc(&tpd->vcc->stats->tx_err);
29808 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
29809 }
29810 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
29811 return;
29812 @@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29813 vcc->pop(vcc, skb);
29814 else
29815 dev_kfree_skb_any(skb);
29816 - atomic_inc(&vcc->stats->tx_err);
29817 + atomic_inc_unchecked(&vcc->stats->tx_err);
29818 return -EINVAL;
29819 }
29820
29821 @@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29822 vcc->pop(vcc, skb);
29823 else
29824 dev_kfree_skb_any(skb);
29825 - atomic_inc(&vcc->stats->tx_err);
29826 + atomic_inc_unchecked(&vcc->stats->tx_err);
29827 return -EINVAL;
29828 }
29829 #endif
29830 @@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29831 vcc->pop(vcc, skb);
29832 else
29833 dev_kfree_skb_any(skb);
29834 - atomic_inc(&vcc->stats->tx_err);
29835 + atomic_inc_unchecked(&vcc->stats->tx_err);
29836 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29837 return -ENOMEM;
29838 }
29839 @@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29840 vcc->pop(vcc, skb);
29841 else
29842 dev_kfree_skb_any(skb);
29843 - atomic_inc(&vcc->stats->tx_err);
29844 + atomic_inc_unchecked(&vcc->stats->tx_err);
29845 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29846 return -ENOMEM;
29847 }
29848 @@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29849 __enqueue_tpd(he_dev, tpd, cid);
29850 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29851
29852 - atomic_inc(&vcc->stats->tx);
29853 + atomic_inc_unchecked(&vcc->stats->tx);
29854
29855 return 0;
29856 }
29857 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
29858 index 4e49021..01b1512 100644
29859 --- a/drivers/atm/horizon.c
29860 +++ b/drivers/atm/horizon.c
29861 @@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
29862 {
29863 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
29864 // VC layer stats
29865 - atomic_inc(&vcc->stats->rx);
29866 + atomic_inc_unchecked(&vcc->stats->rx);
29867 __net_timestamp(skb);
29868 // end of our responsability
29869 vcc->push (vcc, skb);
29870 @@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
29871 dev->tx_iovec = NULL;
29872
29873 // VC layer stats
29874 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29875 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29876
29877 // free the skb
29878 hrz_kfree_skb (skb);
29879 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
29880 index e33ae00..9deb4ab 100644
29881 --- a/drivers/atm/idt77252.c
29882 +++ b/drivers/atm/idt77252.c
29883 @@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
29884 else
29885 dev_kfree_skb(skb);
29886
29887 - atomic_inc(&vcc->stats->tx);
29888 + atomic_inc_unchecked(&vcc->stats->tx);
29889 }
29890
29891 atomic_dec(&scq->used);
29892 @@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29893 if ((sb = dev_alloc_skb(64)) == NULL) {
29894 printk("%s: Can't allocate buffers for aal0.\n",
29895 card->name);
29896 - atomic_add(i, &vcc->stats->rx_drop);
29897 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
29898 break;
29899 }
29900 if (!atm_charge(vcc, sb->truesize)) {
29901 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
29902 card->name);
29903 - atomic_add(i - 1, &vcc->stats->rx_drop);
29904 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
29905 dev_kfree_skb(sb);
29906 break;
29907 }
29908 @@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29909 ATM_SKB(sb)->vcc = vcc;
29910 __net_timestamp(sb);
29911 vcc->push(vcc, sb);
29912 - atomic_inc(&vcc->stats->rx);
29913 + atomic_inc_unchecked(&vcc->stats->rx);
29914
29915 cell += ATM_CELL_PAYLOAD;
29916 }
29917 @@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29918 "(CDC: %08x)\n",
29919 card->name, len, rpp->len, readl(SAR_REG_CDC));
29920 recycle_rx_pool_skb(card, rpp);
29921 - atomic_inc(&vcc->stats->rx_err);
29922 + atomic_inc_unchecked(&vcc->stats->rx_err);
29923 return;
29924 }
29925 if (stat & SAR_RSQE_CRC) {
29926 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
29927 recycle_rx_pool_skb(card, rpp);
29928 - atomic_inc(&vcc->stats->rx_err);
29929 + atomic_inc_unchecked(&vcc->stats->rx_err);
29930 return;
29931 }
29932 if (skb_queue_len(&rpp->queue) > 1) {
29933 @@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29934 RXPRINTK("%s: Can't alloc RX skb.\n",
29935 card->name);
29936 recycle_rx_pool_skb(card, rpp);
29937 - atomic_inc(&vcc->stats->rx_err);
29938 + atomic_inc_unchecked(&vcc->stats->rx_err);
29939 return;
29940 }
29941 if (!atm_charge(vcc, skb->truesize)) {
29942 @@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29943 __net_timestamp(skb);
29944
29945 vcc->push(vcc, skb);
29946 - atomic_inc(&vcc->stats->rx);
29947 + atomic_inc_unchecked(&vcc->stats->rx);
29948
29949 return;
29950 }
29951 @@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29952 __net_timestamp(skb);
29953
29954 vcc->push(vcc, skb);
29955 - atomic_inc(&vcc->stats->rx);
29956 + atomic_inc_unchecked(&vcc->stats->rx);
29957
29958 if (skb->truesize > SAR_FB_SIZE_3)
29959 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
29960 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
29961 if (vcc->qos.aal != ATM_AAL0) {
29962 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
29963 card->name, vpi, vci);
29964 - atomic_inc(&vcc->stats->rx_drop);
29965 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29966 goto drop;
29967 }
29968
29969 if ((sb = dev_alloc_skb(64)) == NULL) {
29970 printk("%s: Can't allocate buffers for AAL0.\n",
29971 card->name);
29972 - atomic_inc(&vcc->stats->rx_err);
29973 + atomic_inc_unchecked(&vcc->stats->rx_err);
29974 goto drop;
29975 }
29976
29977 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
29978 ATM_SKB(sb)->vcc = vcc;
29979 __net_timestamp(sb);
29980 vcc->push(vcc, sb);
29981 - atomic_inc(&vcc->stats->rx);
29982 + atomic_inc_unchecked(&vcc->stats->rx);
29983
29984 drop:
29985 skb_pull(queue, 64);
29986 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29987
29988 if (vc == NULL) {
29989 printk("%s: NULL connection in send().\n", card->name);
29990 - atomic_inc(&vcc->stats->tx_err);
29991 + atomic_inc_unchecked(&vcc->stats->tx_err);
29992 dev_kfree_skb(skb);
29993 return -EINVAL;
29994 }
29995 if (!test_bit(VCF_TX, &vc->flags)) {
29996 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
29997 - atomic_inc(&vcc->stats->tx_err);
29998 + atomic_inc_unchecked(&vcc->stats->tx_err);
29999 dev_kfree_skb(skb);
30000 return -EINVAL;
30001 }
30002 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
30003 break;
30004 default:
30005 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
30006 - atomic_inc(&vcc->stats->tx_err);
30007 + atomic_inc_unchecked(&vcc->stats->tx_err);
30008 dev_kfree_skb(skb);
30009 return -EINVAL;
30010 }
30011
30012 if (skb_shinfo(skb)->nr_frags != 0) {
30013 printk("%s: No scatter-gather yet.\n", card->name);
30014 - atomic_inc(&vcc->stats->tx_err);
30015 + atomic_inc_unchecked(&vcc->stats->tx_err);
30016 dev_kfree_skb(skb);
30017 return -EINVAL;
30018 }
30019 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
30020
30021 err = queue_skb(card, vc, skb, oam);
30022 if (err) {
30023 - atomic_inc(&vcc->stats->tx_err);
30024 + atomic_inc_unchecked(&vcc->stats->tx_err);
30025 dev_kfree_skb(skb);
30026 return err;
30027 }
30028 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
30029 skb = dev_alloc_skb(64);
30030 if (!skb) {
30031 printk("%s: Out of memory in send_oam().\n", card->name);
30032 - atomic_inc(&vcc->stats->tx_err);
30033 + atomic_inc_unchecked(&vcc->stats->tx_err);
30034 return -ENOMEM;
30035 }
30036 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
30037 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
30038 index b2c1b37..faa672b 100644
30039 --- a/drivers/atm/iphase.c
30040 +++ b/drivers/atm/iphase.c
30041 @@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
30042 status = (u_short) (buf_desc_ptr->desc_mode);
30043 if (status & (RX_CER | RX_PTE | RX_OFL))
30044 {
30045 - atomic_inc(&vcc->stats->rx_err);
30046 + atomic_inc_unchecked(&vcc->stats->rx_err);
30047 IF_ERR(printk("IA: bad packet, dropping it");)
30048 if (status & RX_CER) {
30049 IF_ERR(printk(" cause: packet CRC error\n");)
30050 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
30051 len = dma_addr - buf_addr;
30052 if (len > iadev->rx_buf_sz) {
30053 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
30054 - atomic_inc(&vcc->stats->rx_err);
30055 + atomic_inc_unchecked(&vcc->stats->rx_err);
30056 goto out_free_desc;
30057 }
30058
30059 @@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30060 ia_vcc = INPH_IA_VCC(vcc);
30061 if (ia_vcc == NULL)
30062 {
30063 - atomic_inc(&vcc->stats->rx_err);
30064 + atomic_inc_unchecked(&vcc->stats->rx_err);
30065 dev_kfree_skb_any(skb);
30066 atm_return(vcc, atm_guess_pdu2truesize(len));
30067 goto INCR_DLE;
30068 @@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30069 if ((length > iadev->rx_buf_sz) || (length >
30070 (skb->len - sizeof(struct cpcs_trailer))))
30071 {
30072 - atomic_inc(&vcc->stats->rx_err);
30073 + atomic_inc_unchecked(&vcc->stats->rx_err);
30074 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
30075 length, skb->len);)
30076 dev_kfree_skb_any(skb);
30077 @@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30078
30079 IF_RX(printk("rx_dle_intr: skb push");)
30080 vcc->push(vcc,skb);
30081 - atomic_inc(&vcc->stats->rx);
30082 + atomic_inc_unchecked(&vcc->stats->rx);
30083 iadev->rx_pkt_cnt++;
30084 }
30085 INCR_DLE:
30086 @@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
30087 {
30088 struct k_sonet_stats *stats;
30089 stats = &PRIV(_ia_dev[board])->sonet_stats;
30090 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
30091 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
30092 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
30093 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
30094 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
30095 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
30096 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
30097 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
30098 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
30099 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
30100 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
30101 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
30102 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
30103 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
30104 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
30105 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
30106 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
30107 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
30108 }
30109 ia_cmds.status = 0;
30110 break;
30111 @@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
30112 if ((desc == 0) || (desc > iadev->num_tx_desc))
30113 {
30114 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
30115 - atomic_inc(&vcc->stats->tx);
30116 + atomic_inc_unchecked(&vcc->stats->tx);
30117 if (vcc->pop)
30118 vcc->pop(vcc, skb);
30119 else
30120 @@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
30121 ATM_DESC(skb) = vcc->vci;
30122 skb_queue_tail(&iadev->tx_dma_q, skb);
30123
30124 - atomic_inc(&vcc->stats->tx);
30125 + atomic_inc_unchecked(&vcc->stats->tx);
30126 iadev->tx_pkt_cnt++;
30127 /* Increment transaction counter */
30128 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
30129
30130 #if 0
30131 /* add flow control logic */
30132 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
30133 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
30134 if (iavcc->vc_desc_cnt > 10) {
30135 vcc->tx_quota = vcc->tx_quota * 3 / 4;
30136 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
30137 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
30138 index cf97c34..8d30655 100644
30139 --- a/drivers/atm/lanai.c
30140 +++ b/drivers/atm/lanai.c
30141 @@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
30142 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
30143 lanai_endtx(lanai, lvcc);
30144 lanai_free_skb(lvcc->tx.atmvcc, skb);
30145 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
30146 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
30147 }
30148
30149 /* Try to fill the buffer - don't call unless there is backlog */
30150 @@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
30151 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
30152 __net_timestamp(skb);
30153 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
30154 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
30155 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
30156 out:
30157 lvcc->rx.buf.ptr = end;
30158 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
30159 @@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30160 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
30161 "vcc %d\n", lanai->number, (unsigned int) s, vci);
30162 lanai->stats.service_rxnotaal5++;
30163 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30164 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30165 return 0;
30166 }
30167 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
30168 @@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30169 int bytes;
30170 read_unlock(&vcc_sklist_lock);
30171 DPRINTK("got trashed rx pdu on vci %d\n", vci);
30172 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30173 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30174 lvcc->stats.x.aal5.service_trash++;
30175 bytes = (SERVICE_GET_END(s) * 16) -
30176 (((unsigned long) lvcc->rx.buf.ptr) -
30177 @@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30178 }
30179 if (s & SERVICE_STREAM) {
30180 read_unlock(&vcc_sklist_lock);
30181 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30182 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30183 lvcc->stats.x.aal5.service_stream++;
30184 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
30185 "PDU on VCI %d!\n", lanai->number, vci);
30186 @@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30187 return 0;
30188 }
30189 DPRINTK("got rx crc error on vci %d\n", vci);
30190 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30191 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30192 lvcc->stats.x.aal5.service_rxcrc++;
30193 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
30194 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
30195 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
30196 index 3da804b..d3b0eed 100644
30197 --- a/drivers/atm/nicstar.c
30198 +++ b/drivers/atm/nicstar.c
30199 @@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30200 if ((vc = (vc_map *) vcc->dev_data) == NULL)
30201 {
30202 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
30203 - atomic_inc(&vcc->stats->tx_err);
30204 + atomic_inc_unchecked(&vcc->stats->tx_err);
30205 dev_kfree_skb_any(skb);
30206 return -EINVAL;
30207 }
30208 @@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30209 if (!vc->tx)
30210 {
30211 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
30212 - atomic_inc(&vcc->stats->tx_err);
30213 + atomic_inc_unchecked(&vcc->stats->tx_err);
30214 dev_kfree_skb_any(skb);
30215 return -EINVAL;
30216 }
30217 @@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30218 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
30219 {
30220 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
30221 - atomic_inc(&vcc->stats->tx_err);
30222 + atomic_inc_unchecked(&vcc->stats->tx_err);
30223 dev_kfree_skb_any(skb);
30224 return -EINVAL;
30225 }
30226 @@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30227 if (skb_shinfo(skb)->nr_frags != 0)
30228 {
30229 printk("nicstar%d: No scatter-gather yet.\n", card->index);
30230 - atomic_inc(&vcc->stats->tx_err);
30231 + atomic_inc_unchecked(&vcc->stats->tx_err);
30232 dev_kfree_skb_any(skb);
30233 return -EINVAL;
30234 }
30235 @@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30236
30237 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
30238 {
30239 - atomic_inc(&vcc->stats->tx_err);
30240 + atomic_inc_unchecked(&vcc->stats->tx_err);
30241 dev_kfree_skb_any(skb);
30242 return -EIO;
30243 }
30244 - atomic_inc(&vcc->stats->tx);
30245 + atomic_inc_unchecked(&vcc->stats->tx);
30246
30247 return 0;
30248 }
30249 @@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30250 {
30251 printk("nicstar%d: Can't allocate buffers for aal0.\n",
30252 card->index);
30253 - atomic_add(i,&vcc->stats->rx_drop);
30254 + atomic_add_unchecked(i,&vcc->stats->rx_drop);
30255 break;
30256 }
30257 if (!atm_charge(vcc, sb->truesize))
30258 {
30259 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
30260 card->index);
30261 - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
30262 + atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
30263 dev_kfree_skb_any(sb);
30264 break;
30265 }
30266 @@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30267 ATM_SKB(sb)->vcc = vcc;
30268 __net_timestamp(sb);
30269 vcc->push(vcc, sb);
30270 - atomic_inc(&vcc->stats->rx);
30271 + atomic_inc_unchecked(&vcc->stats->rx);
30272 cell += ATM_CELL_PAYLOAD;
30273 }
30274
30275 @@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30276 if (iovb == NULL)
30277 {
30278 printk("nicstar%d: Out of iovec buffers.\n", card->index);
30279 - atomic_inc(&vcc->stats->rx_drop);
30280 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30281 recycle_rx_buf(card, skb);
30282 return;
30283 }
30284 @@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30285 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
30286 {
30287 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
30288 - atomic_inc(&vcc->stats->rx_err);
30289 + atomic_inc_unchecked(&vcc->stats->rx_err);
30290 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
30291 NS_SKB(iovb)->iovcnt = 0;
30292 iovb->len = 0;
30293 @@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30294 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
30295 card->index);
30296 which_list(card, skb);
30297 - atomic_inc(&vcc->stats->rx_err);
30298 + atomic_inc_unchecked(&vcc->stats->rx_err);
30299 recycle_rx_buf(card, skb);
30300 vc->rx_iov = NULL;
30301 recycle_iov_buf(card, iovb);
30302 @@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30303 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
30304 card->index);
30305 which_list(card, skb);
30306 - atomic_inc(&vcc->stats->rx_err);
30307 + atomic_inc_unchecked(&vcc->stats->rx_err);
30308 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30309 NS_SKB(iovb)->iovcnt);
30310 vc->rx_iov = NULL;
30311 @@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30312 printk(" - PDU size mismatch.\n");
30313 else
30314 printk(".\n");
30315 - atomic_inc(&vcc->stats->rx_err);
30316 + atomic_inc_unchecked(&vcc->stats->rx_err);
30317 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30318 NS_SKB(iovb)->iovcnt);
30319 vc->rx_iov = NULL;
30320 @@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30321 if (!atm_charge(vcc, skb->truesize))
30322 {
30323 push_rxbufs(card, skb);
30324 - atomic_inc(&vcc->stats->rx_drop);
30325 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30326 }
30327 else
30328 {
30329 @@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30330 ATM_SKB(skb)->vcc = vcc;
30331 __net_timestamp(skb);
30332 vcc->push(vcc, skb);
30333 - atomic_inc(&vcc->stats->rx);
30334 + atomic_inc_unchecked(&vcc->stats->rx);
30335 }
30336 }
30337 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
30338 @@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30339 if (!atm_charge(vcc, sb->truesize))
30340 {
30341 push_rxbufs(card, sb);
30342 - atomic_inc(&vcc->stats->rx_drop);
30343 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30344 }
30345 else
30346 {
30347 @@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30348 ATM_SKB(sb)->vcc = vcc;
30349 __net_timestamp(sb);
30350 vcc->push(vcc, sb);
30351 - atomic_inc(&vcc->stats->rx);
30352 + atomic_inc_unchecked(&vcc->stats->rx);
30353 }
30354
30355 push_rxbufs(card, skb);
30356 @@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30357 if (!atm_charge(vcc, skb->truesize))
30358 {
30359 push_rxbufs(card, skb);
30360 - atomic_inc(&vcc->stats->rx_drop);
30361 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30362 }
30363 else
30364 {
30365 @@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30366 ATM_SKB(skb)->vcc = vcc;
30367 __net_timestamp(skb);
30368 vcc->push(vcc, skb);
30369 - atomic_inc(&vcc->stats->rx);
30370 + atomic_inc_unchecked(&vcc->stats->rx);
30371 }
30372
30373 push_rxbufs(card, sb);
30374 @@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30375 if (hb == NULL)
30376 {
30377 printk("nicstar%d: Out of huge buffers.\n", card->index);
30378 - atomic_inc(&vcc->stats->rx_drop);
30379 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30380 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30381 NS_SKB(iovb)->iovcnt);
30382 vc->rx_iov = NULL;
30383 @@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30384 }
30385 else
30386 dev_kfree_skb_any(hb);
30387 - atomic_inc(&vcc->stats->rx_drop);
30388 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30389 }
30390 else
30391 {
30392 @@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30393 #endif /* NS_USE_DESTRUCTORS */
30394 __net_timestamp(hb);
30395 vcc->push(vcc, hb);
30396 - atomic_inc(&vcc->stats->rx);
30397 + atomic_inc_unchecked(&vcc->stats->rx);
30398 }
30399 }
30400
30401 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
30402 index 84c93ff..e6ed269 100644
30403 --- a/drivers/atm/solos-pci.c
30404 +++ b/drivers/atm/solos-pci.c
30405 @@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
30406 }
30407 atm_charge(vcc, skb->truesize);
30408 vcc->push(vcc, skb);
30409 - atomic_inc(&vcc->stats->rx);
30410 + atomic_inc_unchecked(&vcc->stats->rx);
30411 break;
30412
30413 case PKT_STATUS:
30414 @@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *buf)
30415 char msg[500];
30416 char item[10];
30417
30418 + pax_track_stack();
30419 +
30420 len = buf->len;
30421 for (i = 0; i < len; i++){
30422 if(i % 8 == 0)
30423 @@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_card *card)
30424 vcc = SKB_CB(oldskb)->vcc;
30425
30426 if (vcc) {
30427 - atomic_inc(&vcc->stats->tx);
30428 + atomic_inc_unchecked(&vcc->stats->tx);
30429 solos_pop(vcc, oldskb);
30430 } else
30431 dev_kfree_skb_irq(oldskb);
30432 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
30433 index 6dd3f59..ee377f3 100644
30434 --- a/drivers/atm/suni.c
30435 +++ b/drivers/atm/suni.c
30436 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
30437
30438
30439 #define ADD_LIMITED(s,v) \
30440 - atomic_add((v),&stats->s); \
30441 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
30442 + atomic_add_unchecked((v),&stats->s); \
30443 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
30444
30445
30446 static void suni_hz(unsigned long from_timer)
30447 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
30448 index fc8cb07..4a80e53 100644
30449 --- a/drivers/atm/uPD98402.c
30450 +++ b/drivers/atm/uPD98402.c
30451 @@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
30452 struct sonet_stats tmp;
30453 int error = 0;
30454
30455 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30456 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30457 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
30458 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
30459 if (zero && !error) {
30460 @@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
30461
30462
30463 #define ADD_LIMITED(s,v) \
30464 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
30465 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
30466 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30467 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
30468 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
30469 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30470
30471
30472 static void stat_event(struct atm_dev *dev)
30473 @@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev *dev)
30474 if (reason & uPD98402_INT_PFM) stat_event(dev);
30475 if (reason & uPD98402_INT_PCO) {
30476 (void) GET(PCOCR); /* clear interrupt cause */
30477 - atomic_add(GET(HECCT),
30478 + atomic_add_unchecked(GET(HECCT),
30479 &PRIV(dev)->sonet_stats.uncorr_hcs);
30480 }
30481 if ((reason & uPD98402_INT_RFO) &&
30482 @@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev *dev)
30483 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
30484 uPD98402_INT_LOS),PIMR); /* enable them */
30485 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
30486 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30487 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
30488 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
30489 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30490 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
30491 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
30492 return 0;
30493 }
30494
30495 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
30496 index 2e9635b..32927b4 100644
30497 --- a/drivers/atm/zatm.c
30498 +++ b/drivers/atm/zatm.c
30499 @@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30500 }
30501 if (!size) {
30502 dev_kfree_skb_irq(skb);
30503 - if (vcc) atomic_inc(&vcc->stats->rx_err);
30504 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
30505 continue;
30506 }
30507 if (!atm_charge(vcc,skb->truesize)) {
30508 @@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30509 skb->len = size;
30510 ATM_SKB(skb)->vcc = vcc;
30511 vcc->push(vcc,skb);
30512 - atomic_inc(&vcc->stats->rx);
30513 + atomic_inc_unchecked(&vcc->stats->rx);
30514 }
30515 zout(pos & 0xffff,MTA(mbx));
30516 #if 0 /* probably a stupid idea */
30517 @@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
30518 skb_queue_head(&zatm_vcc->backlog,skb);
30519 break;
30520 }
30521 - atomic_inc(&vcc->stats->tx);
30522 + atomic_inc_unchecked(&vcc->stats->tx);
30523 wake_up(&zatm_vcc->tx_wait);
30524 }
30525
30526 diff --git a/drivers/base/bus.c b/drivers/base/bus.c
30527 index 63c143e..fece183 100644
30528 --- a/drivers/base/bus.c
30529 +++ b/drivers/base/bus.c
30530 @@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
30531 return ret;
30532 }
30533
30534 -static struct sysfs_ops driver_sysfs_ops = {
30535 +static const struct sysfs_ops driver_sysfs_ops = {
30536 .show = drv_attr_show,
30537 .store = drv_attr_store,
30538 };
30539 @@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
30540 return ret;
30541 }
30542
30543 -static struct sysfs_ops bus_sysfs_ops = {
30544 +static const struct sysfs_ops bus_sysfs_ops = {
30545 .show = bus_attr_show,
30546 .store = bus_attr_store,
30547 };
30548 @@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
30549 return 0;
30550 }
30551
30552 -static struct kset_uevent_ops bus_uevent_ops = {
30553 +static const struct kset_uevent_ops bus_uevent_ops = {
30554 .filter = bus_uevent_filter,
30555 };
30556
30557 diff --git a/drivers/base/class.c b/drivers/base/class.c
30558 index 6e2c3b0..cb61871 100644
30559 --- a/drivers/base/class.c
30560 +++ b/drivers/base/class.c
30561 @@ -63,7 +63,7 @@ static void class_release(struct kobject *kobj)
30562 kfree(cp);
30563 }
30564
30565 -static struct sysfs_ops class_sysfs_ops = {
30566 +static const struct sysfs_ops class_sysfs_ops = {
30567 .show = class_attr_show,
30568 .store = class_attr_store,
30569 };
30570 diff --git a/drivers/base/core.c b/drivers/base/core.c
30571 index f33d768..a9358d0 100644
30572 --- a/drivers/base/core.c
30573 +++ b/drivers/base/core.c
30574 @@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
30575 return ret;
30576 }
30577
30578 -static struct sysfs_ops dev_sysfs_ops = {
30579 +static const struct sysfs_ops dev_sysfs_ops = {
30580 .show = dev_attr_show,
30581 .store = dev_attr_store,
30582 };
30583 @@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
30584 return retval;
30585 }
30586
30587 -static struct kset_uevent_ops device_uevent_ops = {
30588 +static const struct kset_uevent_ops device_uevent_ops = {
30589 .filter = dev_uevent_filter,
30590 .name = dev_uevent_name,
30591 .uevent = dev_uevent,
30592 diff --git a/drivers/base/memory.c b/drivers/base/memory.c
30593 index 989429c..2272b00 100644
30594 --- a/drivers/base/memory.c
30595 +++ b/drivers/base/memory.c
30596 @@ -44,7 +44,7 @@ static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uev
30597 return retval;
30598 }
30599
30600 -static struct kset_uevent_ops memory_uevent_ops = {
30601 +static const struct kset_uevent_ops memory_uevent_ops = {
30602 .name = memory_uevent_name,
30603 .uevent = memory_uevent,
30604 };
30605 diff --git a/drivers/base/sys.c b/drivers/base/sys.c
30606 index 3f202f7..61c4a6f 100644
30607 --- a/drivers/base/sys.c
30608 +++ b/drivers/base/sys.c
30609 @@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struct attribute *attr,
30610 return -EIO;
30611 }
30612
30613 -static struct sysfs_ops sysfs_ops = {
30614 +static const struct sysfs_ops sysfs_ops = {
30615 .show = sysdev_show,
30616 .store = sysdev_store,
30617 };
30618 @@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct kobject *kobj, struct attribute *attr,
30619 return -EIO;
30620 }
30621
30622 -static struct sysfs_ops sysfs_class_ops = {
30623 +static const struct sysfs_ops sysfs_class_ops = {
30624 .show = sysdev_class_show,
30625 .store = sysdev_class_store,
30626 };
30627 diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
30628 index eb4fa19..1954777 100644
30629 --- a/drivers/block/DAC960.c
30630 +++ b/drivers/block/DAC960.c
30631 @@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
30632 unsigned long flags;
30633 int Channel, TargetID;
30634
30635 + pax_track_stack();
30636 +
30637 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
30638 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
30639 sizeof(DAC960_SCSI_Inquiry_T) +
30640 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
30641 index 68b90d9..7e2e3f3 100644
30642 --- a/drivers/block/cciss.c
30643 +++ b/drivers/block/cciss.c
30644 @@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
30645 int err;
30646 u32 cp;
30647
30648 + memset(&arg64, 0, sizeof(arg64));
30649 +
30650 err = 0;
30651 err |=
30652 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
30653 @@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ctlr)
30654 /* Wait (up to 20 seconds) for a command to complete */
30655
30656 for (i = 20 * HZ; i > 0; i--) {
30657 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
30658 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
30659 if (done == FIFO_EMPTY)
30660 schedule_timeout_uninterruptible(1);
30661 else
30662 @@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
30663 resend_cmd1:
30664
30665 /* Disable interrupt on the board. */
30666 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
30667 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
30668
30669 /* Make sure there is room in the command FIFO */
30670 /* Actually it should be completely empty at this time */
30671 @@ -2884,13 +2886,13 @@ resend_cmd1:
30672 /* tape side of the driver. */
30673 for (i = 200000; i > 0; i--) {
30674 /* if fifo isn't full go */
30675 - if (!(h->access.fifo_full(h)))
30676 + if (!(h->access->fifo_full(h)))
30677 break;
30678 udelay(10);
30679 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
30680 " waiting!\n", h->ctlr);
30681 }
30682 - h->access.submit_command(h, c); /* Send the cmd */
30683 + h->access->submit_command(h, c); /* Send the cmd */
30684 do {
30685 complete = pollcomplete(h->ctlr);
30686
30687 @@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
30688 while (!hlist_empty(&h->reqQ)) {
30689 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
30690 /* can't do anything if fifo is full */
30691 - if ((h->access.fifo_full(h))) {
30692 + if ((h->access->fifo_full(h))) {
30693 printk(KERN_WARNING "cciss: fifo full\n");
30694 break;
30695 }
30696 @@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
30697 h->Qdepth--;
30698
30699 /* Tell the controller execute command */
30700 - h->access.submit_command(h, c);
30701 + h->access->submit_command(h, c);
30702
30703 /* Put job onto the completed Q */
30704 addQ(&h->cmpQ, c);
30705 @@ -3393,17 +3395,17 @@ startio:
30706
30707 static inline unsigned long get_next_completion(ctlr_info_t *h)
30708 {
30709 - return h->access.command_completed(h);
30710 + return h->access->command_completed(h);
30711 }
30712
30713 static inline int interrupt_pending(ctlr_info_t *h)
30714 {
30715 - return h->access.intr_pending(h);
30716 + return h->access->intr_pending(h);
30717 }
30718
30719 static inline long interrupt_not_for_us(ctlr_info_t *h)
30720 {
30721 - return (((h->access.intr_pending(h) == 0) ||
30722 + return (((h->access->intr_pending(h) == 0) ||
30723 (h->interrupts_enabled == 0)));
30724 }
30725
30726 @@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
30727 */
30728 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
30729 c->product_name = products[prod_index].product_name;
30730 - c->access = *(products[prod_index].access);
30731 + c->access = products[prod_index].access;
30732 c->nr_cmds = c->max_commands - 4;
30733 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
30734 (readb(&c->cfgtable->Signature[1]) != 'I') ||
30735 @@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30736 }
30737
30738 /* make sure the board interrupts are off */
30739 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
30740 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
30741 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
30742 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
30743 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
30744 @@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30745 cciss_scsi_setup(i);
30746
30747 /* Turn the interrupts on so we can service requests */
30748 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
30749 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
30750
30751 /* Get the firmware version */
30752 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
30753 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
30754 index 04d6bf8..36e712d 100644
30755 --- a/drivers/block/cciss.h
30756 +++ b/drivers/block/cciss.h
30757 @@ -90,7 +90,7 @@ struct ctlr_info
30758 // information about each logical volume
30759 drive_info_struct *drv[CISS_MAX_LUN];
30760
30761 - struct access_method access;
30762 + struct access_method *access;
30763
30764 /* queue and queue Info */
30765 struct hlist_head reqQ;
30766 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
30767 index 6422651..bb1bdef 100644
30768 --- a/drivers/block/cpqarray.c
30769 +++ b/drivers/block/cpqarray.c
30770 @@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30771 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
30772 goto Enomem4;
30773 }
30774 - hba[i]->access.set_intr_mask(hba[i], 0);
30775 + hba[i]->access->set_intr_mask(hba[i], 0);
30776 if (request_irq(hba[i]->intr, do_ida_intr,
30777 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
30778 {
30779 @@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30780 add_timer(&hba[i]->timer);
30781
30782 /* Enable IRQ now that spinlock and rate limit timer are set up */
30783 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30784 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30785
30786 for(j=0; j<NWD; j++) {
30787 struct gendisk *disk = ida_gendisk[i][j];
30788 @@ -695,7 +695,7 @@ DBGINFO(
30789 for(i=0; i<NR_PRODUCTS; i++) {
30790 if (board_id == products[i].board_id) {
30791 c->product_name = products[i].product_name;
30792 - c->access = *(products[i].access);
30793 + c->access = products[i].access;
30794 break;
30795 }
30796 }
30797 @@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(void)
30798 hba[ctlr]->intr = intr;
30799 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
30800 hba[ctlr]->product_name = products[j].product_name;
30801 - hba[ctlr]->access = *(products[j].access);
30802 + hba[ctlr]->access = products[j].access;
30803 hba[ctlr]->ctlr = ctlr;
30804 hba[ctlr]->board_id = board_id;
30805 hba[ctlr]->pci_dev = NULL; /* not PCI */
30806 @@ -896,6 +896,8 @@ static void do_ida_request(struct request_queue *q)
30807 struct scatterlist tmp_sg[SG_MAX];
30808 int i, dir, seg;
30809
30810 + pax_track_stack();
30811 +
30812 if (blk_queue_plugged(q))
30813 goto startio;
30814
30815 @@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
30816
30817 while((c = h->reqQ) != NULL) {
30818 /* Can't do anything if we're busy */
30819 - if (h->access.fifo_full(h) == 0)
30820 + if (h->access->fifo_full(h) == 0)
30821 return;
30822
30823 /* Get the first entry from the request Q */
30824 @@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
30825 h->Qdepth--;
30826
30827 /* Tell the controller to do our bidding */
30828 - h->access.submit_command(h, c);
30829 + h->access->submit_command(h, c);
30830
30831 /* Get onto the completion Q */
30832 addQ(&h->cmpQ, c);
30833 @@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30834 unsigned long flags;
30835 __u32 a,a1;
30836
30837 - istat = h->access.intr_pending(h);
30838 + istat = h->access->intr_pending(h);
30839 /* Is this interrupt for us? */
30840 if (istat == 0)
30841 return IRQ_NONE;
30842 @@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30843 */
30844 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
30845 if (istat & FIFO_NOT_EMPTY) {
30846 - while((a = h->access.command_completed(h))) {
30847 + while((a = h->access->command_completed(h))) {
30848 a1 = a; a &= ~3;
30849 if ((c = h->cmpQ) == NULL)
30850 {
30851 @@ -1434,11 +1436,11 @@ static int sendcmd(
30852 /*
30853 * Disable interrupt
30854 */
30855 - info_p->access.set_intr_mask(info_p, 0);
30856 + info_p->access->set_intr_mask(info_p, 0);
30857 /* Make sure there is room in the command FIFO */
30858 /* Actually it should be completely empty at this time. */
30859 for (i = 200000; i > 0; i--) {
30860 - temp = info_p->access.fifo_full(info_p);
30861 + temp = info_p->access->fifo_full(info_p);
30862 if (temp != 0) {
30863 break;
30864 }
30865 @@ -1451,7 +1453,7 @@ DBG(
30866 /*
30867 * Send the cmd
30868 */
30869 - info_p->access.submit_command(info_p, c);
30870 + info_p->access->submit_command(info_p, c);
30871 complete = pollcomplete(ctlr);
30872
30873 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
30874 @@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t *host)
30875 * we check the new geometry. Then turn interrupts back on when
30876 * we're done.
30877 */
30878 - host->access.set_intr_mask(host, 0);
30879 + host->access->set_intr_mask(host, 0);
30880 getgeometry(ctlr);
30881 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
30882 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
30883
30884 for(i=0; i<NWD; i++) {
30885 struct gendisk *disk = ida_gendisk[ctlr][i];
30886 @@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
30887 /* Wait (up to 2 seconds) for a command to complete */
30888
30889 for (i = 200000; i > 0; i--) {
30890 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
30891 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
30892 if (done == 0) {
30893 udelay(10); /* a short fixed delay */
30894 } else
30895 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
30896 index be73e9d..7fbf140 100644
30897 --- a/drivers/block/cpqarray.h
30898 +++ b/drivers/block/cpqarray.h
30899 @@ -99,7 +99,7 @@ struct ctlr_info {
30900 drv_info_t drv[NWD];
30901 struct proc_dir_entry *proc;
30902
30903 - struct access_method access;
30904 + struct access_method *access;
30905
30906 cmdlist_t *reqQ;
30907 cmdlist_t *cmpQ;
30908 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
30909 index 8ec2d70..2804b30 100644
30910 --- a/drivers/block/loop.c
30911 +++ b/drivers/block/loop.c
30912 @@ -282,7 +282,7 @@ static int __do_lo_send_write(struct file *file,
30913 mm_segment_t old_fs = get_fs();
30914
30915 set_fs(get_ds());
30916 - bw = file->f_op->write(file, buf, len, &pos);
30917 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
30918 set_fs(old_fs);
30919 if (likely(bw == len))
30920 return 0;
30921 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
30922 index 26ada47..083c480 100644
30923 --- a/drivers/block/nbd.c
30924 +++ b/drivers/block/nbd.c
30925 @@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
30926 struct kvec iov;
30927 sigset_t blocked, oldset;
30928
30929 + pax_track_stack();
30930 +
30931 if (unlikely(!sock)) {
30932 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
30933 lo->disk->disk_name, (send ? "send" : "recv"));
30934 @@ -569,6 +571,8 @@ static void do_nbd_request(struct request_queue *q)
30935 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
30936 unsigned int cmd, unsigned long arg)
30937 {
30938 + pax_track_stack();
30939 +
30940 switch (cmd) {
30941 case NBD_DISCONNECT: {
30942 struct request sreq;
30943 diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
30944 index a5d585d..d087be3 100644
30945 --- a/drivers/block/pktcdvd.c
30946 +++ b/drivers/block/pktcdvd.c
30947 @@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kobject *kobj,
30948 return len;
30949 }
30950
30951 -static struct sysfs_ops kobj_pkt_ops = {
30952 +static const struct sysfs_ops kobj_pkt_ops = {
30953 .show = kobj_pkt_show,
30954 .store = kobj_pkt_store
30955 };
30956 diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
30957 index 59cccc9..a4592ec 100644
30958 --- a/drivers/cdrom/cdrom.c
30959 +++ b/drivers/cdrom/cdrom.c
30960 @@ -2057,11 +2057,6 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
30961 if (!nr)
30962 return -ENOMEM;
30963
30964 - if (!access_ok(VERIFY_WRITE, ubuf, nframes * CD_FRAMESIZE_RAW)) {
30965 - ret = -EFAULT;
30966 - goto out;
30967 - }
30968 -
30969 cgc.data_direction = CGC_DATA_READ;
30970 while (nframes > 0) {
30971 if (nr > nframes)
30972 @@ -2070,7 +2065,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
30973 ret = cdrom_read_block(cdi, &cgc, lba, nr, 1, CD_FRAMESIZE_RAW);
30974 if (ret)
30975 break;
30976 - if (__copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) {
30977 + if (copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) {
30978 ret = -EFAULT;
30979 break;
30980 }
30981 @@ -2078,7 +2073,6 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
30982 nframes -= nr;
30983 lba += nr;
30984 }
30985 -out:
30986 kfree(cgc.buffer);
30987 return ret;
30988 }
30989 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
30990 index 6aad99e..89cd142 100644
30991 --- a/drivers/char/Kconfig
30992 +++ b/drivers/char/Kconfig
30993 @@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
30994
30995 config DEVKMEM
30996 bool "/dev/kmem virtual device support"
30997 - default y
30998 + default n
30999 + depends on !GRKERNSEC_KMEM
31000 help
31001 Say Y here if you want to support the /dev/kmem device. The
31002 /dev/kmem device is rarely used, but can be used for certain
31003 @@ -1114,6 +1115,7 @@ config DEVPORT
31004 bool
31005 depends on !M68K
31006 depends on ISA || PCI
31007 + depends on !GRKERNSEC_KMEM
31008 default y
31009
31010 source "drivers/s390/char/Kconfig"
31011 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
31012 index a96f319..a778a5b 100644
31013 --- a/drivers/char/agp/frontend.c
31014 +++ b/drivers/char/agp/frontend.c
31015 @@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
31016 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
31017 return -EFAULT;
31018
31019 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
31020 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
31021 return -EFAULT;
31022
31023 client = agp_find_client_by_pid(reserve.pid);
31024 diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
31025 index d8cff90..9628e70 100644
31026 --- a/drivers/char/briq_panel.c
31027 +++ b/drivers/char/briq_panel.c
31028 @@ -10,6 +10,7 @@
31029 #include <linux/types.h>
31030 #include <linux/errno.h>
31031 #include <linux/tty.h>
31032 +#include <linux/mutex.h>
31033 #include <linux/timer.h>
31034 #include <linux/kernel.h>
31035 #include <linux/wait.h>
31036 @@ -36,6 +37,7 @@ static int vfd_is_open;
31037 static unsigned char vfd[40];
31038 static int vfd_cursor;
31039 static unsigned char ledpb, led;
31040 +static DEFINE_MUTEX(vfd_mutex);
31041
31042 static void update_vfd(void)
31043 {
31044 @@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
31045 if (!vfd_is_open)
31046 return -EBUSY;
31047
31048 + mutex_lock(&vfd_mutex);
31049 for (;;) {
31050 char c;
31051 if (!indx)
31052 break;
31053 - if (get_user(c, buf))
31054 + if (get_user(c, buf)) {
31055 + mutex_unlock(&vfd_mutex);
31056 return -EFAULT;
31057 + }
31058 if (esc) {
31059 set_led(c);
31060 esc = 0;
31061 @@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
31062 buf++;
31063 }
31064 update_vfd();
31065 + mutex_unlock(&vfd_mutex);
31066
31067 return len;
31068 }
31069 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
31070 index 31e7c91..161afc0 100644
31071 --- a/drivers/char/genrtc.c
31072 +++ b/drivers/char/genrtc.c
31073 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *inode, struct file *file,
31074 switch (cmd) {
31075
31076 case RTC_PLL_GET:
31077 + memset(&pll, 0, sizeof(pll));
31078 if (get_rtc_pll(&pll))
31079 return -EINVAL;
31080 else
31081 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
31082 index 006466d..a2bb21c 100644
31083 --- a/drivers/char/hpet.c
31084 +++ b/drivers/char/hpet.c
31085 @@ -430,7 +430,7 @@ static int hpet_release(struct inode *inode, struct file *file)
31086 return 0;
31087 }
31088
31089 -static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
31090 +static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
31091
31092 static int
31093 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
31094 @@ -565,7 +565,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
31095 }
31096
31097 static int
31098 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
31099 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
31100 {
31101 struct hpet_timer __iomem *timer;
31102 struct hpet __iomem *hpet;
31103 @@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
31104 {
31105 struct hpet_info info;
31106
31107 + memset(&info, 0, sizeof(info));
31108 +
31109 if (devp->hd_ireqfreq)
31110 info.hi_ireqfreq =
31111 hpet_time_div(hpetp, devp->hd_ireqfreq);
31112 - else
31113 - info.hi_ireqfreq = 0;
31114 info.hi_flags =
31115 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
31116 info.hi_hpet = hpetp->hp_which;
31117 diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c
31118 index 0afc8b8..6913fc3 100644
31119 --- a/drivers/char/hvc_beat.c
31120 +++ b/drivers/char/hvc_beat.c
31121 @@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt)
31122 return cnt;
31123 }
31124
31125 -static struct hv_ops hvc_beat_get_put_ops = {
31126 +static const struct hv_ops hvc_beat_get_put_ops = {
31127 .get_chars = hvc_beat_get_chars,
31128 .put_chars = hvc_beat_put_chars,
31129 };
31130 diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
31131 index 98097f2..407dddc 100644
31132 --- a/drivers/char/hvc_console.c
31133 +++ b/drivers/char/hvc_console.c
31134 @@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_index(int index)
31135 * console interfaces but can still be used as a tty device. This has to be
31136 * static because kmalloc will not work during early console init.
31137 */
31138 -static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
31139 +static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
31140 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
31141 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
31142
31143 @@ -249,7 +249,7 @@ static void destroy_hvc_struct(struct kref *kref)
31144 * vty adapters do NOT get an hvc_instantiate() callback since they
31145 * appear after early console init.
31146 */
31147 -int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
31148 +int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
31149 {
31150 struct hvc_struct *hp;
31151
31152 @@ -758,7 +758,7 @@ static const struct tty_operations hvc_ops = {
31153 };
31154
31155 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
31156 - struct hv_ops *ops, int outbuf_size)
31157 + const struct hv_ops *ops, int outbuf_size)
31158 {
31159 struct hvc_struct *hp;
31160 int i;
31161 diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
31162 index 10950ca..ed176c3 100644
31163 --- a/drivers/char/hvc_console.h
31164 +++ b/drivers/char/hvc_console.h
31165 @@ -55,7 +55,7 @@ struct hvc_struct {
31166 int outbuf_size;
31167 int n_outbuf;
31168 uint32_t vtermno;
31169 - struct hv_ops *ops;
31170 + const struct hv_ops *ops;
31171 int irq_requested;
31172 int data;
31173 struct winsize ws;
31174 @@ -76,11 +76,11 @@ struct hv_ops {
31175 };
31176
31177 /* Register a vterm and a slot index for use as a console (console_init) */
31178 -extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
31179 +extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
31180
31181 /* register a vterm for hvc tty operation (module_init or hotplug add) */
31182 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
31183 - struct hv_ops *ops, int outbuf_size);
31184 + const struct hv_ops *ops, int outbuf_size);
31185 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
31186 extern int hvc_remove(struct hvc_struct *hp);
31187
31188 diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
31189 index 936d05b..fd02426 100644
31190 --- a/drivers/char/hvc_iseries.c
31191 +++ b/drivers/char/hvc_iseries.c
31192 @@ -197,7 +197,7 @@ done:
31193 return sent;
31194 }
31195
31196 -static struct hv_ops hvc_get_put_ops = {
31197 +static const struct hv_ops hvc_get_put_ops = {
31198 .get_chars = get_chars,
31199 .put_chars = put_chars,
31200 .notifier_add = notifier_add_irq,
31201 diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
31202 index b0e168f..69cda2a 100644
31203 --- a/drivers/char/hvc_iucv.c
31204 +++ b/drivers/char/hvc_iucv.c
31205 @@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(struct device *dev)
31206
31207
31208 /* HVC operations */
31209 -static struct hv_ops hvc_iucv_ops = {
31210 +static const struct hv_ops hvc_iucv_ops = {
31211 .get_chars = hvc_iucv_get_chars,
31212 .put_chars = hvc_iucv_put_chars,
31213 .notifier_add = hvc_iucv_notifier_add,
31214 diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
31215 index 88590d0..61c4a61 100644
31216 --- a/drivers/char/hvc_rtas.c
31217 +++ b/drivers/char/hvc_rtas.c
31218 @@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
31219 return i;
31220 }
31221
31222 -static struct hv_ops hvc_rtas_get_put_ops = {
31223 +static const struct hv_ops hvc_rtas_get_put_ops = {
31224 .get_chars = hvc_rtas_read_console,
31225 .put_chars = hvc_rtas_write_console,
31226 };
31227 diff --git a/drivers/char/hvc_udbg.c b/drivers/char/hvc_udbg.c
31228 index bd63ba8..b0957e6 100644
31229 --- a/drivers/char/hvc_udbg.c
31230 +++ b/drivers/char/hvc_udbg.c
31231 @@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno, char *buf, int count)
31232 return i;
31233 }
31234
31235 -static struct hv_ops hvc_udbg_ops = {
31236 +static const struct hv_ops hvc_udbg_ops = {
31237 .get_chars = hvc_udbg_get,
31238 .put_chars = hvc_udbg_put,
31239 };
31240 diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
31241 index 10be343..27370e9 100644
31242 --- a/drivers/char/hvc_vio.c
31243 +++ b/drivers/char/hvc_vio.c
31244 @@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t vtermno, char *buf, int count)
31245 return got;
31246 }
31247
31248 -static struct hv_ops hvc_get_put_ops = {
31249 +static const struct hv_ops hvc_get_put_ops = {
31250 .get_chars = filtered_get_chars,
31251 .put_chars = hvc_put_chars,
31252 .notifier_add = notifier_add_irq,
31253 diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
31254 index a6ee32b..94f8c26 100644
31255 --- a/drivers/char/hvc_xen.c
31256 +++ b/drivers/char/hvc_xen.c
31257 @@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno, char *buf, int len)
31258 return recv;
31259 }
31260
31261 -static struct hv_ops hvc_ops = {
31262 +static const struct hv_ops hvc_ops = {
31263 .get_chars = read_console,
31264 .put_chars = write_console,
31265 .notifier_add = notifier_add_irq,
31266 diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
31267 index 266b858..f3ee0bb 100644
31268 --- a/drivers/char/hvcs.c
31269 +++ b/drivers/char/hvcs.c
31270 @@ -82,6 +82,7 @@
31271 #include <asm/hvcserver.h>
31272 #include <asm/uaccess.h>
31273 #include <asm/vio.h>
31274 +#include <asm/local.h>
31275
31276 /*
31277 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
31278 @@ -269,7 +270,7 @@ struct hvcs_struct {
31279 unsigned int index;
31280
31281 struct tty_struct *tty;
31282 - int open_count;
31283 + local_t open_count;
31284
31285 /*
31286 * Used to tell the driver kernel_thread what operations need to take
31287 @@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
31288
31289 spin_lock_irqsave(&hvcsd->lock, flags);
31290
31291 - if (hvcsd->open_count > 0) {
31292 + if (local_read(&hvcsd->open_count) > 0) {
31293 spin_unlock_irqrestore(&hvcsd->lock, flags);
31294 printk(KERN_INFO "HVCS: vterm state unchanged. "
31295 "The hvcs device node is still in use.\n");
31296 @@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
31297 if ((retval = hvcs_partner_connect(hvcsd)))
31298 goto error_release;
31299
31300 - hvcsd->open_count = 1;
31301 + local_set(&hvcsd->open_count, 1);
31302 hvcsd->tty = tty;
31303 tty->driver_data = hvcsd;
31304
31305 @@ -1169,7 +1170,7 @@ fast_open:
31306
31307 spin_lock_irqsave(&hvcsd->lock, flags);
31308 kref_get(&hvcsd->kref);
31309 - hvcsd->open_count++;
31310 + local_inc(&hvcsd->open_count);
31311 hvcsd->todo_mask |= HVCS_SCHED_READ;
31312 spin_unlock_irqrestore(&hvcsd->lock, flags);
31313
31314 @@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
31315 hvcsd = tty->driver_data;
31316
31317 spin_lock_irqsave(&hvcsd->lock, flags);
31318 - if (--hvcsd->open_count == 0) {
31319 + if (local_dec_and_test(&hvcsd->open_count)) {
31320
31321 vio_disable_interrupts(hvcsd->vdev);
31322
31323 @@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
31324 free_irq(irq, hvcsd);
31325 kref_put(&hvcsd->kref, destroy_hvcs_struct);
31326 return;
31327 - } else if (hvcsd->open_count < 0) {
31328 + } else if (local_read(&hvcsd->open_count) < 0) {
31329 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
31330 " is missmanaged.\n",
31331 - hvcsd->vdev->unit_address, hvcsd->open_count);
31332 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
31333 }
31334
31335 spin_unlock_irqrestore(&hvcsd->lock, flags);
31336 @@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struct * tty)
31337
31338 spin_lock_irqsave(&hvcsd->lock, flags);
31339 /* Preserve this so that we know how many kref refs to put */
31340 - temp_open_count = hvcsd->open_count;
31341 + temp_open_count = local_read(&hvcsd->open_count);
31342
31343 /*
31344 * Don't kref put inside the spinlock because the destruction
31345 @@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struct * tty)
31346 hvcsd->tty->driver_data = NULL;
31347 hvcsd->tty = NULL;
31348
31349 - hvcsd->open_count = 0;
31350 + local_set(&hvcsd->open_count, 0);
31351
31352 /* This will drop any buffered data on the floor which is OK in a hangup
31353 * scenario. */
31354 @@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct *tty,
31355 * the middle of a write operation? This is a crummy place to do this
31356 * but we want to keep it all in the spinlock.
31357 */
31358 - if (hvcsd->open_count <= 0) {
31359 + if (local_read(&hvcsd->open_count) <= 0) {
31360 spin_unlock_irqrestore(&hvcsd->lock, flags);
31361 return -ENODEV;
31362 }
31363 @@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_struct *tty)
31364 {
31365 struct hvcs_struct *hvcsd = tty->driver_data;
31366
31367 - if (!hvcsd || hvcsd->open_count <= 0)
31368 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
31369 return 0;
31370
31371 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
31372 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
31373 index ec5e3f8..02455ba 100644
31374 --- a/drivers/char/ipmi/ipmi_msghandler.c
31375 +++ b/drivers/char/ipmi/ipmi_msghandler.c
31376 @@ -414,7 +414,7 @@ struct ipmi_smi {
31377 struct proc_dir_entry *proc_dir;
31378 char proc_dir_name[10];
31379
31380 - atomic_t stats[IPMI_NUM_STATS];
31381 + atomic_unchecked_t stats[IPMI_NUM_STATS];
31382
31383 /*
31384 * run_to_completion duplicate of smb_info, smi_info
31385 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
31386
31387
31388 #define ipmi_inc_stat(intf, stat) \
31389 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
31390 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
31391 #define ipmi_get_stat(intf, stat) \
31392 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
31393 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
31394
31395 static int is_lan_addr(struct ipmi_addr *addr)
31396 {
31397 @@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
31398 INIT_LIST_HEAD(&intf->cmd_rcvrs);
31399 init_waitqueue_head(&intf->waitq);
31400 for (i = 0; i < IPMI_NUM_STATS; i++)
31401 - atomic_set(&intf->stats[i], 0);
31402 + atomic_set_unchecked(&intf->stats[i], 0);
31403
31404 intf->proc_dir = NULL;
31405
31406 @@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
31407 struct ipmi_smi_msg smi_msg;
31408 struct ipmi_recv_msg recv_msg;
31409
31410 + pax_track_stack();
31411 +
31412 si = (struct ipmi_system_interface_addr *) &addr;
31413 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
31414 si->channel = IPMI_BMC_CHANNEL;
31415 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
31416 index abae8c9..8021979 100644
31417 --- a/drivers/char/ipmi/ipmi_si_intf.c
31418 +++ b/drivers/char/ipmi/ipmi_si_intf.c
31419 @@ -277,7 +277,7 @@ struct smi_info {
31420 unsigned char slave_addr;
31421
31422 /* Counters and things for the proc filesystem. */
31423 - atomic_t stats[SI_NUM_STATS];
31424 + atomic_unchecked_t stats[SI_NUM_STATS];
31425
31426 struct task_struct *thread;
31427
31428 @@ -285,9 +285,9 @@ struct smi_info {
31429 };
31430
31431 #define smi_inc_stat(smi, stat) \
31432 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
31433 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
31434 #define smi_get_stat(smi, stat) \
31435 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
31436 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
31437
31438 #define SI_MAX_PARMS 4
31439
31440 @@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info *new_smi)
31441 atomic_set(&new_smi->req_events, 0);
31442 new_smi->run_to_completion = 0;
31443 for (i = 0; i < SI_NUM_STATS; i++)
31444 - atomic_set(&new_smi->stats[i], 0);
31445 + atomic_set_unchecked(&new_smi->stats[i], 0);
31446
31447 new_smi->interrupt_disabled = 0;
31448 atomic_set(&new_smi->stop_operation, 0);
31449 diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
31450 index 402838f..55e2200 100644
31451 --- a/drivers/char/istallion.c
31452 +++ b/drivers/char/istallion.c
31453 @@ -187,7 +187,6 @@ static struct ktermios stli_deftermios = {
31454 * re-used for each stats call.
31455 */
31456 static comstats_t stli_comstats;
31457 -static combrd_t stli_brdstats;
31458 static struct asystats stli_cdkstats;
31459
31460 /*****************************************************************************/
31461 @@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __user *bp)
31462 {
31463 struct stlibrd *brdp;
31464 unsigned int i;
31465 + combrd_t stli_brdstats;
31466
31467 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
31468 return -EFAULT;
31469 @@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stliport __user *arg)
31470 struct stliport stli_dummyport;
31471 struct stliport *portp;
31472
31473 + pax_track_stack();
31474 +
31475 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
31476 return -EFAULT;
31477 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
31478 @@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stlibrd __user *arg)
31479 struct stlibrd stli_dummybrd;
31480 struct stlibrd *brdp;
31481
31482 + pax_track_stack();
31483 +
31484 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
31485 return -EFAULT;
31486 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
31487 diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
31488 index 950837c..e55a288 100644
31489 --- a/drivers/char/keyboard.c
31490 +++ b/drivers/char/keyboard.c
31491 @@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
31492 kbd->kbdmode == VC_MEDIUMRAW) &&
31493 value != KVAL(K_SAK))
31494 return; /* SAK is allowed even in raw mode */
31495 +
31496 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
31497 + {
31498 + void *func = fn_handler[value];
31499 + if (func == fn_show_state || func == fn_show_ptregs ||
31500 + func == fn_show_mem)
31501 + return;
31502 + }
31503 +#endif
31504 +
31505 fn_handler[value](vc);
31506 }
31507
31508 @@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_ids[] = {
31509 .evbit = { BIT_MASK(EV_SND) },
31510 },
31511
31512 - { }, /* Terminating entry */
31513 + { 0 }, /* Terminating entry */
31514 };
31515
31516 MODULE_DEVICE_TABLE(input, kbd_ids);
31517 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
31518 index 87c67b4..230527a 100644
31519 --- a/drivers/char/mbcs.c
31520 +++ b/drivers/char/mbcs.c
31521 @@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
31522 return 0;
31523 }
31524
31525 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
31526 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
31527 {
31528 .part_num = MBCS_PART_NUM,
31529 .mfg_num = MBCS_MFG_NUM,
31530 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
31531 index 1270f64..8495f49 100644
31532 --- a/drivers/char/mem.c
31533 +++ b/drivers/char/mem.c
31534 @@ -18,6 +18,7 @@
31535 #include <linux/raw.h>
31536 #include <linux/tty.h>
31537 #include <linux/capability.h>
31538 +#include <linux/security.h>
31539 #include <linux/ptrace.h>
31540 #include <linux/device.h>
31541 #include <linux/highmem.h>
31542 @@ -35,6 +36,10 @@
31543 # include <linux/efi.h>
31544 #endif
31545
31546 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31547 +extern struct file_operations grsec_fops;
31548 +#endif
31549 +
31550 static inline unsigned long size_inside_page(unsigned long start,
31551 unsigned long size)
31552 {
31553 @@ -102,9 +107,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31554
31555 while (cursor < to) {
31556 if (!devmem_is_allowed(pfn)) {
31557 +#ifdef CONFIG_GRKERNSEC_KMEM
31558 + gr_handle_mem_readwrite(from, to);
31559 +#else
31560 printk(KERN_INFO
31561 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
31562 current->comm, from, to);
31563 +#endif
31564 return 0;
31565 }
31566 cursor += PAGE_SIZE;
31567 @@ -112,6 +121,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31568 }
31569 return 1;
31570 }
31571 +#elif defined(CONFIG_GRKERNSEC_KMEM)
31572 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31573 +{
31574 + return 0;
31575 +}
31576 #else
31577 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31578 {
31579 @@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31580 #endif
31581
31582 while (count > 0) {
31583 + char *temp;
31584 +
31585 /*
31586 * Handle first page in case it's not aligned
31587 */
31588 @@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31589 if (!ptr)
31590 return -EFAULT;
31591
31592 - if (copy_to_user(buf, ptr, sz)) {
31593 +#ifdef CONFIG_PAX_USERCOPY
31594 + temp = kmalloc(sz, GFP_KERNEL);
31595 + if (!temp) {
31596 + unxlate_dev_mem_ptr(p, ptr);
31597 + return -ENOMEM;
31598 + }
31599 + memcpy(temp, ptr, sz);
31600 +#else
31601 + temp = ptr;
31602 +#endif
31603 +
31604 + if (copy_to_user(buf, temp, sz)) {
31605 +
31606 +#ifdef CONFIG_PAX_USERCOPY
31607 + kfree(temp);
31608 +#endif
31609 +
31610 unxlate_dev_mem_ptr(p, ptr);
31611 return -EFAULT;
31612 }
31613
31614 +#ifdef CONFIG_PAX_USERCOPY
31615 + kfree(temp);
31616 +#endif
31617 +
31618 unxlate_dev_mem_ptr(p, ptr);
31619
31620 buf += sz;
31621 @@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31622 size_t count, loff_t *ppos)
31623 {
31624 unsigned long p = *ppos;
31625 - ssize_t low_count, read, sz;
31626 + ssize_t low_count, read, sz, err = 0;
31627 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
31628 - int err = 0;
31629
31630 read = 0;
31631 if (p < (unsigned long) high_memory) {
31632 @@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31633 }
31634 #endif
31635 while (low_count > 0) {
31636 + char *temp;
31637 +
31638 sz = size_inside_page(p, low_count);
31639
31640 /*
31641 @@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31642 */
31643 kbuf = xlate_dev_kmem_ptr((char *)p);
31644
31645 - if (copy_to_user(buf, kbuf, sz))
31646 +#ifdef CONFIG_PAX_USERCOPY
31647 + temp = kmalloc(sz, GFP_KERNEL);
31648 + if (!temp)
31649 + return -ENOMEM;
31650 + memcpy(temp, kbuf, sz);
31651 +#else
31652 + temp = kbuf;
31653 +#endif
31654 +
31655 + err = copy_to_user(buf, temp, sz);
31656 +
31657 +#ifdef CONFIG_PAX_USERCOPY
31658 + kfree(temp);
31659 +#endif
31660 +
31661 + if (err)
31662 return -EFAULT;
31663 buf += sz;
31664 p += sz;
31665 @@ -889,6 +941,9 @@ static const struct memdev {
31666 #ifdef CONFIG_CRASH_DUMP
31667 [12] = { "oldmem", 0, &oldmem_fops, NULL },
31668 #endif
31669 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31670 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
31671 +#endif
31672 };
31673
31674 static int memory_open(struct inode *inode, struct file *filp)
31675 diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
31676 index 918711a..4ffaf5e 100644
31677 --- a/drivers/char/mmtimer.c
31678 +++ b/drivers/char/mmtimer.c
31679 @@ -756,7 +756,7 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
31680 return err;
31681 }
31682
31683 -static struct k_clock sgi_clock = {
31684 +static k_clock_no_const sgi_clock = {
31685 .res = 0,
31686 .clock_set = sgi_clock_set,
31687 .clock_get = sgi_clock_get,
31688 diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
31689 index 674b3ab..a8d1970 100644
31690 --- a/drivers/char/pcmcia/ipwireless/tty.c
31691 +++ b/drivers/char/pcmcia/ipwireless/tty.c
31692 @@ -29,6 +29,7 @@
31693 #include <linux/tty_driver.h>
31694 #include <linux/tty_flip.h>
31695 #include <linux/uaccess.h>
31696 +#include <asm/local.h>
31697
31698 #include "tty.h"
31699 #include "network.h"
31700 @@ -51,7 +52,7 @@ struct ipw_tty {
31701 int tty_type;
31702 struct ipw_network *network;
31703 struct tty_struct *linux_tty;
31704 - int open_count;
31705 + local_t open_count;
31706 unsigned int control_lines;
31707 struct mutex ipw_tty_mutex;
31708 int tx_bytes_queued;
31709 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31710 mutex_unlock(&tty->ipw_tty_mutex);
31711 return -ENODEV;
31712 }
31713 - if (tty->open_count == 0)
31714 + if (local_read(&tty->open_count) == 0)
31715 tty->tx_bytes_queued = 0;
31716
31717 - tty->open_count++;
31718 + local_inc(&tty->open_count);
31719
31720 tty->linux_tty = linux_tty;
31721 linux_tty->driver_data = tty;
31722 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31723
31724 static void do_ipw_close(struct ipw_tty *tty)
31725 {
31726 - tty->open_count--;
31727 -
31728 - if (tty->open_count == 0) {
31729 + if (local_dec_return(&tty->open_count) == 0) {
31730 struct tty_struct *linux_tty = tty->linux_tty;
31731
31732 if (linux_tty != NULL) {
31733 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
31734 return;
31735
31736 mutex_lock(&tty->ipw_tty_mutex);
31737 - if (tty->open_count == 0) {
31738 + if (local_read(&tty->open_count) == 0) {
31739 mutex_unlock(&tty->ipw_tty_mutex);
31740 return;
31741 }
31742 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
31743 return;
31744 }
31745
31746 - if (!tty->open_count) {
31747 + if (!local_read(&tty->open_count)) {
31748 mutex_unlock(&tty->ipw_tty_mutex);
31749 return;
31750 }
31751 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
31752 return -ENODEV;
31753
31754 mutex_lock(&tty->ipw_tty_mutex);
31755 - if (!tty->open_count) {
31756 + if (!local_read(&tty->open_count)) {
31757 mutex_unlock(&tty->ipw_tty_mutex);
31758 return -EINVAL;
31759 }
31760 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
31761 if (!tty)
31762 return -ENODEV;
31763
31764 - if (!tty->open_count)
31765 + if (!local_read(&tty->open_count))
31766 return -EINVAL;
31767
31768 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
31769 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
31770 if (!tty)
31771 return 0;
31772
31773 - if (!tty->open_count)
31774 + if (!local_read(&tty->open_count))
31775 return 0;
31776
31777 return tty->tx_bytes_queued;
31778 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty, struct file *file)
31779 if (!tty)
31780 return -ENODEV;
31781
31782 - if (!tty->open_count)
31783 + if (!local_read(&tty->open_count))
31784 return -EINVAL;
31785
31786 return get_control_lines(tty);
31787 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty, struct file *file,
31788 if (!tty)
31789 return -ENODEV;
31790
31791 - if (!tty->open_count)
31792 + if (!local_read(&tty->open_count))
31793 return -EINVAL;
31794
31795 return set_control_lines(tty, set, clear);
31796 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file,
31797 if (!tty)
31798 return -ENODEV;
31799
31800 - if (!tty->open_count)
31801 + if (!local_read(&tty->open_count))
31802 return -EINVAL;
31803
31804 /* FIXME: Exactly how is the tty object locked here .. */
31805 @@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
31806 against a parallel ioctl etc */
31807 mutex_lock(&ttyj->ipw_tty_mutex);
31808 }
31809 - while (ttyj->open_count)
31810 + while (local_read(&ttyj->open_count))
31811 do_ipw_close(ttyj);
31812 ipwireless_disassociate_network_ttys(network,
31813 ttyj->channel_idx);
31814 diff --git a/drivers/char/pty.c b/drivers/char/pty.c
31815 index 62f282e..e45c45c 100644
31816 --- a/drivers/char/pty.c
31817 +++ b/drivers/char/pty.c
31818 @@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
31819 register_sysctl_table(pty_root_table);
31820
31821 /* Now create the /dev/ptmx special device */
31822 + pax_open_kernel();
31823 tty_default_fops(&ptmx_fops);
31824 - ptmx_fops.open = ptmx_open;
31825 + *(void **)&ptmx_fops.open = ptmx_open;
31826 + pax_close_kernel();
31827
31828 cdev_init(&ptmx_cdev, &ptmx_fops);
31829 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
31830 diff --git a/drivers/char/random.c b/drivers/char/random.c
31831 index 3a19e2d..6ed09d3 100644
31832 --- a/drivers/char/random.c
31833 +++ b/drivers/char/random.c
31834 @@ -254,8 +254,13 @@
31835 /*
31836 * Configuration information
31837 */
31838 +#ifdef CONFIG_GRKERNSEC_RANDNET
31839 +#define INPUT_POOL_WORDS 512
31840 +#define OUTPUT_POOL_WORDS 128
31841 +#else
31842 #define INPUT_POOL_WORDS 128
31843 #define OUTPUT_POOL_WORDS 32
31844 +#endif
31845 #define SEC_XFER_SIZE 512
31846
31847 /*
31848 @@ -292,10 +297,17 @@ static struct poolinfo {
31849 int poolwords;
31850 int tap1, tap2, tap3, tap4, tap5;
31851 } poolinfo_table[] = {
31852 +#ifdef CONFIG_GRKERNSEC_RANDNET
31853 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
31854 + { 512, 411, 308, 208, 104, 1 },
31855 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
31856 + { 128, 103, 76, 51, 25, 1 },
31857 +#else
31858 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
31859 { 128, 103, 76, 51, 25, 1 },
31860 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
31861 { 32, 26, 20, 14, 7, 1 },
31862 +#endif
31863 #if 0
31864 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
31865 { 2048, 1638, 1231, 819, 411, 1 },
31866 @@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
31867 #include <linux/sysctl.h>
31868
31869 static int min_read_thresh = 8, min_write_thresh;
31870 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
31871 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
31872 static int max_write_thresh = INPUT_POOL_WORDS * 32;
31873 static char sysctl_bootid[16];
31874
31875 diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
31876 index 0e29a23..0efc2c2 100644
31877 --- a/drivers/char/rocket.c
31878 +++ b/drivers/char/rocket.c
31879 @@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
31880 struct rocket_ports tmp;
31881 int board;
31882
31883 + pax_track_stack();
31884 +
31885 if (!retports)
31886 return -EFAULT;
31887 memset(&tmp, 0, sizeof (tmp));
31888 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
31889 index 8c262aa..4d3b058 100644
31890 --- a/drivers/char/sonypi.c
31891 +++ b/drivers/char/sonypi.c
31892 @@ -55,6 +55,7 @@
31893 #include <asm/uaccess.h>
31894 #include <asm/io.h>
31895 #include <asm/system.h>
31896 +#include <asm/local.h>
31897
31898 #include <linux/sonypi.h>
31899
31900 @@ -491,7 +492,7 @@ static struct sonypi_device {
31901 spinlock_t fifo_lock;
31902 wait_queue_head_t fifo_proc_list;
31903 struct fasync_struct *fifo_async;
31904 - int open_count;
31905 + local_t open_count;
31906 int model;
31907 struct input_dev *input_jog_dev;
31908 struct input_dev *input_key_dev;
31909 @@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
31910 static int sonypi_misc_release(struct inode *inode, struct file *file)
31911 {
31912 mutex_lock(&sonypi_device.lock);
31913 - sonypi_device.open_count--;
31914 + local_dec(&sonypi_device.open_count);
31915 mutex_unlock(&sonypi_device.lock);
31916 return 0;
31917 }
31918 @@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
31919 lock_kernel();
31920 mutex_lock(&sonypi_device.lock);
31921 /* Flush input queue on first open */
31922 - if (!sonypi_device.open_count)
31923 + if (!local_read(&sonypi_device.open_count))
31924 kfifo_reset(sonypi_device.fifo);
31925 - sonypi_device.open_count++;
31926 + local_inc(&sonypi_device.open_count);
31927 mutex_unlock(&sonypi_device.lock);
31928 unlock_kernel();
31929 return 0;
31930 diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
31931 index db6dcfa..13834cb 100644
31932 --- a/drivers/char/stallion.c
31933 +++ b/drivers/char/stallion.c
31934 @@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlport __user *arg)
31935 struct stlport stl_dummyport;
31936 struct stlport *portp;
31937
31938 + pax_track_stack();
31939 +
31940 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
31941 return -EFAULT;
31942 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
31943 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
31944 index a0789f6..cea3902 100644
31945 --- a/drivers/char/tpm/tpm.c
31946 +++ b/drivers/char/tpm/tpm.c
31947 @@ -405,7 +405,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
31948 chip->vendor.req_complete_val)
31949 goto out_recv;
31950
31951 - if ((status == chip->vendor.req_canceled)) {
31952 + if (status == chip->vendor.req_canceled) {
31953 dev_err(chip->dev, "Operation Canceled\n");
31954 rc = -ECANCELED;
31955 goto out;
31956 @@ -824,6 +824,8 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
31957
31958 struct tpm_chip *chip = dev_get_drvdata(dev);
31959
31960 + pax_track_stack();
31961 +
31962 tpm_cmd.header.in = tpm_readpubek_header;
31963 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
31964 "attempting to read the PUBEK");
31965 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
31966 index bf2170f..ce8cab9 100644
31967 --- a/drivers/char/tpm/tpm_bios.c
31968 +++ b/drivers/char/tpm/tpm_bios.c
31969 @@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
31970 event = addr;
31971
31972 if ((event->event_type == 0 && event->event_size == 0) ||
31973 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
31974 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
31975 return NULL;
31976
31977 return addr;
31978 @@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
31979 return NULL;
31980
31981 if ((event->event_type == 0 && event->event_size == 0) ||
31982 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
31983 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
31984 return NULL;
31985
31986 (*pos)++;
31987 @@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
31988 int i;
31989
31990 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
31991 - seq_putc(m, data[i]);
31992 + if (!seq_putc(m, data[i]))
31993 + return -EFAULT;
31994
31995 return 0;
31996 }
31997 @@ -409,8 +410,13 @@ static int read_log(struct tpm_bios_log *log)
31998 log->bios_event_log_end = log->bios_event_log + len;
31999
32000 virt = acpi_os_map_memory(start, len);
32001 + if (!virt) {
32002 + kfree(log->bios_event_log);
32003 + log->bios_event_log = NULL;
32004 + return -EFAULT;
32005 + }
32006
32007 - memcpy(log->bios_event_log, virt, len);
32008 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
32009
32010 acpi_os_unmap_memory(virt, len);
32011 return 0;
32012 diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
32013 index 123cedf..6664cb4 100644
32014 --- a/drivers/char/tty_io.c
32015 +++ b/drivers/char/tty_io.c
32016 @@ -146,7 +146,7 @@ static int tty_open(struct inode *, struct file *);
32017 static int tty_release(struct inode *, struct file *);
32018 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
32019 #ifdef CONFIG_COMPAT
32020 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
32021 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
32022 unsigned long arg);
32023 #else
32024 #define tty_compat_ioctl NULL
32025 @@ -1774,6 +1774,7 @@ got_driver:
32026
32027 if (IS_ERR(tty)) {
32028 mutex_unlock(&tty_mutex);
32029 + tty_driver_kref_put(driver);
32030 return PTR_ERR(tty);
32031 }
32032 }
32033 @@ -2603,8 +2604,10 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
32034 return retval;
32035 }
32036
32037 +EXPORT_SYMBOL(tty_ioctl);
32038 +
32039 #ifdef CONFIG_COMPAT
32040 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
32041 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
32042 unsigned long arg)
32043 {
32044 struct inode *inode = file->f_dentry->d_inode;
32045 @@ -2628,6 +2631,8 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
32046
32047 return retval;
32048 }
32049 +
32050 +EXPORT_SYMBOL(tty_compat_ioctl);
32051 #endif
32052
32053 /*
32054 @@ -3073,7 +3078,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
32055
32056 void tty_default_fops(struct file_operations *fops)
32057 {
32058 - *fops = tty_fops;
32059 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
32060 }
32061
32062 /*
32063 diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
32064 index d814a3d..b55b9c9 100644
32065 --- a/drivers/char/tty_ldisc.c
32066 +++ b/drivers/char/tty_ldisc.c
32067 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *ld)
32068 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
32069 struct tty_ldisc_ops *ldo = ld->ops;
32070
32071 - ldo->refcount--;
32072 + atomic_dec(&ldo->refcount);
32073 module_put(ldo->owner);
32074 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
32075
32076 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
32077 spin_lock_irqsave(&tty_ldisc_lock, flags);
32078 tty_ldiscs[disc] = new_ldisc;
32079 new_ldisc->num = disc;
32080 - new_ldisc->refcount = 0;
32081 + atomic_set(&new_ldisc->refcount, 0);
32082 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
32083
32084 return ret;
32085 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
32086 return -EINVAL;
32087
32088 spin_lock_irqsave(&tty_ldisc_lock, flags);
32089 - if (tty_ldiscs[disc]->refcount)
32090 + if (atomic_read(&tty_ldiscs[disc]->refcount))
32091 ret = -EBUSY;
32092 else
32093 tty_ldiscs[disc] = NULL;
32094 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
32095 if (ldops) {
32096 ret = ERR_PTR(-EAGAIN);
32097 if (try_module_get(ldops->owner)) {
32098 - ldops->refcount++;
32099 + atomic_inc(&ldops->refcount);
32100 ret = ldops;
32101 }
32102 }
32103 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
32104 unsigned long flags;
32105
32106 spin_lock_irqsave(&tty_ldisc_lock, flags);
32107 - ldops->refcount--;
32108 + atomic_dec(&ldops->refcount);
32109 module_put(ldops->owner);
32110 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
32111 }
32112 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
32113 index a035ae3..c27fe2c 100644
32114 --- a/drivers/char/virtio_console.c
32115 +++ b/drivers/char/virtio_console.c
32116 @@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *buf, int count)
32117 * virtqueue, so we let the drivers do some boutique early-output thing. */
32118 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
32119 {
32120 - virtio_cons.put_chars = put_chars;
32121 + pax_open_kernel();
32122 + *(void **)&virtio_cons.put_chars = put_chars;
32123 + pax_close_kernel();
32124 return hvc_instantiate(0, 0, &virtio_cons);
32125 }
32126
32127 @@ -213,11 +215,13 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
32128 out_vq = vqs[1];
32129
32130 /* Start using the new console output. */
32131 - virtio_cons.get_chars = get_chars;
32132 - virtio_cons.put_chars = put_chars;
32133 - virtio_cons.notifier_add = notifier_add_vio;
32134 - virtio_cons.notifier_del = notifier_del_vio;
32135 - virtio_cons.notifier_hangup = notifier_del_vio;
32136 + pax_open_kernel();
32137 + *(void **)&virtio_cons.get_chars = get_chars;
32138 + *(void **)&virtio_cons.put_chars = put_chars;
32139 + *(void **)&virtio_cons.notifier_add = notifier_add_vio;
32140 + *(void **)&virtio_cons.notifier_del = notifier_del_vio;
32141 + *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
32142 + pax_close_kernel();
32143
32144 /* The first argument of hvc_alloc() is the virtual console number, so
32145 * we use zero. The second argument is the parameter for the
32146 diff --git a/drivers/char/vt.c b/drivers/char/vt.c
32147 index 0c80c68..53d59c1 100644
32148 --- a/drivers/char/vt.c
32149 +++ b/drivers/char/vt.c
32150 @@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier);
32151
32152 static void notify_write(struct vc_data *vc, unsigned int unicode)
32153 {
32154 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
32155 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
32156 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
32157 }
32158
32159 diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
32160 index 6351a26..999af95 100644
32161 --- a/drivers/char/vt_ioctl.c
32162 +++ b/drivers/char/vt_ioctl.c
32163 @@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
32164 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
32165 return -EFAULT;
32166
32167 - if (!capable(CAP_SYS_TTY_CONFIG))
32168 - perm = 0;
32169 -
32170 switch (cmd) {
32171 case KDGKBENT:
32172 key_map = key_maps[s];
32173 @@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
32174 val = (i ? K_HOLE : K_NOSUCHMAP);
32175 return put_user(val, &user_kbe->kb_value);
32176 case KDSKBENT:
32177 + if (!capable(CAP_SYS_TTY_CONFIG))
32178 + perm = 0;
32179 +
32180 if (!perm)
32181 return -EPERM;
32182 +
32183 if (!i && v == K_NOSUCHMAP) {
32184 /* deallocate map */
32185 key_map = key_maps[s];
32186 @@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
32187 int i, j, k;
32188 int ret;
32189
32190 - if (!capable(CAP_SYS_TTY_CONFIG))
32191 - perm = 0;
32192 -
32193 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
32194 if (!kbs) {
32195 ret = -ENOMEM;
32196 @@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
32197 kfree(kbs);
32198 return ((p && *p) ? -EOVERFLOW : 0);
32199 case KDSKBSENT:
32200 + if (!capable(CAP_SYS_TTY_CONFIG))
32201 + perm = 0;
32202 +
32203 if (!perm) {
32204 ret = -EPERM;
32205 goto reterr;
32206 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
32207 index c7ae026..1769c1d 100644
32208 --- a/drivers/cpufreq/cpufreq.c
32209 +++ b/drivers/cpufreq/cpufreq.c
32210 @@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct kobject *kobj)
32211 complete(&policy->kobj_unregister);
32212 }
32213
32214 -static struct sysfs_ops sysfs_ops = {
32215 +static const struct sysfs_ops sysfs_ops = {
32216 .show = show,
32217 .store = store,
32218 };
32219 diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
32220 index 97b0038..2056670 100644
32221 --- a/drivers/cpuidle/sysfs.c
32222 +++ b/drivers/cpuidle/sysfs.c
32223 @@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr,
32224 return ret;
32225 }
32226
32227 -static struct sysfs_ops cpuidle_sysfs_ops = {
32228 +static const struct sysfs_ops cpuidle_sysfs_ops = {
32229 .show = cpuidle_show,
32230 .store = cpuidle_store,
32231 };
32232 @@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct kobject * kobj,
32233 return ret;
32234 }
32235
32236 -static struct sysfs_ops cpuidle_state_sysfs_ops = {
32237 +static const struct sysfs_ops cpuidle_state_sysfs_ops = {
32238 .show = cpuidle_state_show,
32239 };
32240
32241 @@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpuidle = {
32242 .release = cpuidle_state_sysfs_release,
32243 };
32244
32245 -static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
32246 +static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
32247 {
32248 kobject_put(&device->kobjs[i]->kobj);
32249 wait_for_completion(&device->kobjs[i]->kobj_unregister);
32250 diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
32251 index 5f753fc..0377ae9 100644
32252 --- a/drivers/crypto/hifn_795x.c
32253 +++ b/drivers/crypto/hifn_795x.c
32254 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
32255 0xCA, 0x34, 0x2B, 0x2E};
32256 struct scatterlist sg;
32257
32258 + pax_track_stack();
32259 +
32260 memset(src, 0, sizeof(src));
32261 memset(ctx.key, 0, sizeof(ctx.key));
32262
32263 diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
32264 index 71e6482..de8d96c 100644
32265 --- a/drivers/crypto/padlock-aes.c
32266 +++ b/drivers/crypto/padlock-aes.c
32267 @@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
32268 struct crypto_aes_ctx gen_aes;
32269 int cpu;
32270
32271 + pax_track_stack();
32272 +
32273 if (key_len % 8) {
32274 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
32275 return -EINVAL;
32276 diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
32277 index dcc4ab7..cc834bb 100644
32278 --- a/drivers/dma/ioat/dma.c
32279 +++ b/drivers/dma/ioat/dma.c
32280 @@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
32281 return entry->show(&chan->common, page);
32282 }
32283
32284 -struct sysfs_ops ioat_sysfs_ops = {
32285 +const struct sysfs_ops ioat_sysfs_ops = {
32286 .show = ioat_attr_show,
32287 };
32288
32289 diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
32290 index bbc3e78..f2db62c 100644
32291 --- a/drivers/dma/ioat/dma.h
32292 +++ b/drivers/dma/ioat/dma.h
32293 @@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
32294 unsigned long *phys_complete);
32295 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
32296 void ioat_kobject_del(struct ioatdma_device *device);
32297 -extern struct sysfs_ops ioat_sysfs_ops;
32298 +extern const struct sysfs_ops ioat_sysfs_ops;
32299 extern struct ioat_sysfs_entry ioat_version_attr;
32300 extern struct ioat_sysfs_entry ioat_cap_attr;
32301 #endif /* IOATDMA_H */
32302 diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
32303 index 9908c9e..3ceb0e5 100644
32304 --- a/drivers/dma/ioat/dma_v3.c
32305 +++ b/drivers/dma/ioat/dma_v3.c
32306 @@ -71,10 +71,10 @@
32307 /* provide a lookup table for setting the source address in the base or
32308 * extended descriptor of an xor or pq descriptor
32309 */
32310 -static const u8 xor_idx_to_desc __read_mostly = 0xd0;
32311 -static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
32312 -static const u8 pq_idx_to_desc __read_mostly = 0xf8;
32313 -static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
32314 +static const u8 xor_idx_to_desc = 0xd0;
32315 +static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
32316 +static const u8 pq_idx_to_desc = 0xf8;
32317 +static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
32318
32319 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
32320 {
32321 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
32322 index 85c464a..afd1e73 100644
32323 --- a/drivers/edac/amd64_edac.c
32324 +++ b/drivers/edac/amd64_edac.c
32325 @@ -3099,7 +3099,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
32326 * PCI core identifies what devices are on a system during boot, and then
32327 * inquiry this table to see if this driver is for a given device found.
32328 */
32329 -static const struct pci_device_id amd64_pci_table[] __devinitdata = {
32330 +static const struct pci_device_id amd64_pci_table[] __devinitconst = {
32331 {
32332 .vendor = PCI_VENDOR_ID_AMD,
32333 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
32334 diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
32335 index 2b95f1a..4f52793 100644
32336 --- a/drivers/edac/amd76x_edac.c
32337 +++ b/drivers/edac/amd76x_edac.c
32338 @@ -322,7 +322,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
32339 edac_mc_free(mci);
32340 }
32341
32342 -static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
32343 +static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
32344 {
32345 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32346 AMD762},
32347 diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
32348 index d205d49..74c9672 100644
32349 --- a/drivers/edac/e752x_edac.c
32350 +++ b/drivers/edac/e752x_edac.c
32351 @@ -1282,7 +1282,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
32352 edac_mc_free(mci);
32353 }
32354
32355 -static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
32356 +static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
32357 {
32358 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32359 E7520},
32360 diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
32361 index c7d11cc..c59c1ca 100644
32362 --- a/drivers/edac/e7xxx_edac.c
32363 +++ b/drivers/edac/e7xxx_edac.c
32364 @@ -526,7 +526,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
32365 edac_mc_free(mci);
32366 }
32367
32368 -static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
32369 +static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
32370 {
32371 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32372 E7205},
32373 diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
32374 index 5376457..5fdedbc 100644
32375 --- a/drivers/edac/edac_device_sysfs.c
32376 +++ b/drivers/edac/edac_device_sysfs.c
32377 @@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
32378 }
32379
32380 /* edac_dev file operations for an 'ctl_info' */
32381 -static struct sysfs_ops device_ctl_info_ops = {
32382 +static const struct sysfs_ops device_ctl_info_ops = {
32383 .show = edac_dev_ctl_info_show,
32384 .store = edac_dev_ctl_info_store
32385 };
32386 @@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(struct kobject *kobj,
32387 }
32388
32389 /* edac_dev file operations for an 'instance' */
32390 -static struct sysfs_ops device_instance_ops = {
32391 +static const struct sysfs_ops device_instance_ops = {
32392 .show = edac_dev_instance_show,
32393 .store = edac_dev_instance_store
32394 };
32395 @@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(struct kobject *kobj,
32396 }
32397
32398 /* edac_dev file operations for a 'block' */
32399 -static struct sysfs_ops device_block_ops = {
32400 +static const struct sysfs_ops device_block_ops = {
32401 .show = edac_dev_block_show,
32402 .store = edac_dev_block_store
32403 };
32404 diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
32405 index e1d4ce0..88840e9 100644
32406 --- a/drivers/edac/edac_mc_sysfs.c
32407 +++ b/drivers/edac/edac_mc_sysfs.c
32408 @@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
32409 return -EIO;
32410 }
32411
32412 -static struct sysfs_ops csrowfs_ops = {
32413 +static const struct sysfs_ops csrowfs_ops = {
32414 .show = csrowdev_show,
32415 .store = csrowdev_store
32416 };
32417 @@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
32418 }
32419
32420 /* Intermediate show/store table */
32421 -static struct sysfs_ops mci_ops = {
32422 +static const struct sysfs_ops mci_ops = {
32423 .show = mcidev_show,
32424 .store = mcidev_store
32425 };
32426 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
32427 index 422728c..d8d9c88 100644
32428 --- a/drivers/edac/edac_pci_sysfs.c
32429 +++ b/drivers/edac/edac_pci_sysfs.c
32430 @@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
32431 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
32432 static int edac_pci_poll_msec = 1000; /* one second workq period */
32433
32434 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
32435 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
32436 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
32437 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
32438
32439 static struct kobject *edac_pci_top_main_kobj;
32440 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
32441 @@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(struct kobject *kobj,
32442 }
32443
32444 /* fs_ops table */
32445 -static struct sysfs_ops pci_instance_ops = {
32446 +static const struct sysfs_ops pci_instance_ops = {
32447 .show = edac_pci_instance_show,
32448 .store = edac_pci_instance_store
32449 };
32450 @@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
32451 return -EIO;
32452 }
32453
32454 -static struct sysfs_ops edac_pci_sysfs_ops = {
32455 +static const struct sysfs_ops edac_pci_sysfs_ops = {
32456 .show = edac_pci_dev_show,
32457 .store = edac_pci_dev_store
32458 };
32459 @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32460 edac_printk(KERN_CRIT, EDAC_PCI,
32461 "Signaled System Error on %s\n",
32462 pci_name(dev));
32463 - atomic_inc(&pci_nonparity_count);
32464 + atomic_inc_unchecked(&pci_nonparity_count);
32465 }
32466
32467 if (status & (PCI_STATUS_PARITY)) {
32468 @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32469 "Master Data Parity Error on %s\n",
32470 pci_name(dev));
32471
32472 - atomic_inc(&pci_parity_count);
32473 + atomic_inc_unchecked(&pci_parity_count);
32474 }
32475
32476 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32477 @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32478 "Detected Parity Error on %s\n",
32479 pci_name(dev));
32480
32481 - atomic_inc(&pci_parity_count);
32482 + atomic_inc_unchecked(&pci_parity_count);
32483 }
32484 }
32485
32486 @@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32487 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
32488 "Signaled System Error on %s\n",
32489 pci_name(dev));
32490 - atomic_inc(&pci_nonparity_count);
32491 + atomic_inc_unchecked(&pci_nonparity_count);
32492 }
32493
32494 if (status & (PCI_STATUS_PARITY)) {
32495 @@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32496 "Master Data Parity Error on "
32497 "%s\n", pci_name(dev));
32498
32499 - atomic_inc(&pci_parity_count);
32500 + atomic_inc_unchecked(&pci_parity_count);
32501 }
32502
32503 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32504 @@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32505 "Detected Parity Error on %s\n",
32506 pci_name(dev));
32507
32508 - atomic_inc(&pci_parity_count);
32509 + atomic_inc_unchecked(&pci_parity_count);
32510 }
32511 }
32512 }
32513 @@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
32514 if (!check_pci_errors)
32515 return;
32516
32517 - before_count = atomic_read(&pci_parity_count);
32518 + before_count = atomic_read_unchecked(&pci_parity_count);
32519
32520 /* scan all PCI devices looking for a Parity Error on devices and
32521 * bridges.
32522 @@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
32523 /* Only if operator has selected panic on PCI Error */
32524 if (edac_pci_get_panic_on_pe()) {
32525 /* If the count is different 'after' from 'before' */
32526 - if (before_count != atomic_read(&pci_parity_count))
32527 + if (before_count != atomic_read_unchecked(&pci_parity_count))
32528 panic("EDAC: PCI Parity Error");
32529 }
32530 }
32531 diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
32532 index 6c9a0f2..9c1cf7e 100644
32533 --- a/drivers/edac/i3000_edac.c
32534 +++ b/drivers/edac/i3000_edac.c
32535 @@ -471,7 +471,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
32536 edac_mc_free(mci);
32537 }
32538
32539 -static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
32540 +static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
32541 {
32542 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32543 I3000},
32544 diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
32545 index fde4db9..fe108f9 100644
32546 --- a/drivers/edac/i3200_edac.c
32547 +++ b/drivers/edac/i3200_edac.c
32548 @@ -444,7 +444,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
32549 edac_mc_free(mci);
32550 }
32551
32552 -static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
32553 +static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
32554 {
32555 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32556 I3200},
32557 diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
32558 index adc10a2..57d4ccf 100644
32559 --- a/drivers/edac/i5000_edac.c
32560 +++ b/drivers/edac/i5000_edac.c
32561 @@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
32562 *
32563 * The "E500P" device is the first device supported.
32564 */
32565 -static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
32566 +static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
32567 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
32568 .driver_data = I5000P},
32569
32570 diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
32571 index 22db05a..b2b5503 100644
32572 --- a/drivers/edac/i5100_edac.c
32573 +++ b/drivers/edac/i5100_edac.c
32574 @@ -944,7 +944,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
32575 edac_mc_free(mci);
32576 }
32577
32578 -static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
32579 +static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
32580 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
32581 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
32582 { 0, }
32583 diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
32584 index f99d106..f050710 100644
32585 --- a/drivers/edac/i5400_edac.c
32586 +++ b/drivers/edac/i5400_edac.c
32587 @@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
32588 *
32589 * The "E500P" device is the first device supported.
32590 */
32591 -static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
32592 +static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
32593 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
32594 {0,} /* 0 terminated list. */
32595 };
32596 diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
32597 index 577760a..9ce16ce 100644
32598 --- a/drivers/edac/i82443bxgx_edac.c
32599 +++ b/drivers/edac/i82443bxgx_edac.c
32600 @@ -381,7 +381,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
32601
32602 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
32603
32604 -static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
32605 +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
32606 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
32607 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
32608 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
32609 diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
32610 index c0088ba..64a7b98 100644
32611 --- a/drivers/edac/i82860_edac.c
32612 +++ b/drivers/edac/i82860_edac.c
32613 @@ -271,7 +271,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
32614 edac_mc_free(mci);
32615 }
32616
32617 -static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
32618 +static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
32619 {
32620 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32621 I82860},
32622 diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
32623 index b2d83b9..a34357b 100644
32624 --- a/drivers/edac/i82875p_edac.c
32625 +++ b/drivers/edac/i82875p_edac.c
32626 @@ -512,7 +512,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
32627 edac_mc_free(mci);
32628 }
32629
32630 -static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
32631 +static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
32632 {
32633 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32634 I82875P},
32635 diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
32636 index 2eed3ea..87bbbd1 100644
32637 --- a/drivers/edac/i82975x_edac.c
32638 +++ b/drivers/edac/i82975x_edac.c
32639 @@ -586,7 +586,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
32640 edac_mc_free(mci);
32641 }
32642
32643 -static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
32644 +static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
32645 {
32646 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32647 I82975X
32648 diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
32649 index 9900675..78ac2b6 100644
32650 --- a/drivers/edac/r82600_edac.c
32651 +++ b/drivers/edac/r82600_edac.c
32652 @@ -374,7 +374,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
32653 edac_mc_free(mci);
32654 }
32655
32656 -static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
32657 +static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
32658 {
32659 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
32660 },
32661 diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
32662 index d4ec605..4cfec4e 100644
32663 --- a/drivers/edac/x38_edac.c
32664 +++ b/drivers/edac/x38_edac.c
32665 @@ -441,7 +441,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
32666 edac_mc_free(mci);
32667 }
32668
32669 -static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
32670 +static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
32671 {
32672 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32673 X38},
32674 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
32675 index 3fc2ceb..daf098f 100644
32676 --- a/drivers/firewire/core-card.c
32677 +++ b/drivers/firewire/core-card.c
32678 @@ -558,7 +558,7 @@ void fw_card_release(struct kref *kref)
32679
32680 void fw_core_remove_card(struct fw_card *card)
32681 {
32682 - struct fw_card_driver dummy_driver = dummy_driver_template;
32683 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
32684
32685 card->driver->update_phy_reg(card, 4,
32686 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
32687 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
32688 index 4560d8f..36db24a 100644
32689 --- a/drivers/firewire/core-cdev.c
32690 +++ b/drivers/firewire/core-cdev.c
32691 @@ -1141,8 +1141,7 @@ static int init_iso_resource(struct client *client,
32692 int ret;
32693
32694 if ((request->channels == 0 && request->bandwidth == 0) ||
32695 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
32696 - request->bandwidth < 0)
32697 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
32698 return -EINVAL;
32699
32700 r = kmalloc(sizeof(*r), GFP_KERNEL);
32701 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
32702 index da628c7..cf54a2c 100644
32703 --- a/drivers/firewire/core-transaction.c
32704 +++ b/drivers/firewire/core-transaction.c
32705 @@ -36,6 +36,7 @@
32706 #include <linux/string.h>
32707 #include <linux/timer.h>
32708 #include <linux/types.h>
32709 +#include <linux/sched.h>
32710
32711 #include <asm/byteorder.h>
32712
32713 @@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
32714 struct transaction_callback_data d;
32715 struct fw_transaction t;
32716
32717 + pax_track_stack();
32718 +
32719 init_completion(&d.done);
32720 d.payload = payload;
32721 fw_send_request(card, &t, tcode, destination_id, generation, speed,
32722 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
32723 index 7ff6e75..a2965d9 100644
32724 --- a/drivers/firewire/core.h
32725 +++ b/drivers/firewire/core.h
32726 @@ -86,6 +86,7 @@ struct fw_card_driver {
32727
32728 int (*stop_iso)(struct fw_iso_context *ctx);
32729 };
32730 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
32731
32732 void fw_card_initialize(struct fw_card *card,
32733 const struct fw_card_driver *driver, struct device *device);
32734 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
32735 index 3a2ccb0..82fd7c4 100644
32736 --- a/drivers/firmware/dmi_scan.c
32737 +++ b/drivers/firmware/dmi_scan.c
32738 @@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
32739 }
32740 }
32741 else {
32742 - /*
32743 - * no iounmap() for that ioremap(); it would be a no-op, but
32744 - * it's so early in setup that sucker gets confused into doing
32745 - * what it shouldn't if we actually call it.
32746 - */
32747 p = dmi_ioremap(0xF0000, 0x10000);
32748 if (p == NULL)
32749 goto error;
32750 @@ -667,7 +662,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
32751 if (buf == NULL)
32752 return -1;
32753
32754 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
32755 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
32756
32757 iounmap(buf);
32758 return 0;
32759 diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
32760 index 9e4f59d..110e24e 100644
32761 --- a/drivers/firmware/edd.c
32762 +++ b/drivers/firmware/edd.c
32763 @@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, struct attribute *attr, char *buf)
32764 return ret;
32765 }
32766
32767 -static struct sysfs_ops edd_attr_ops = {
32768 +static const struct sysfs_ops edd_attr_ops = {
32769 .show = edd_attr_show,
32770 };
32771
32772 diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
32773 index f4f709d..082f06e 100644
32774 --- a/drivers/firmware/efivars.c
32775 +++ b/drivers/firmware/efivars.c
32776 @@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
32777 return ret;
32778 }
32779
32780 -static struct sysfs_ops efivar_attr_ops = {
32781 +static const struct sysfs_ops efivar_attr_ops = {
32782 .show = efivar_attr_show,
32783 .store = efivar_attr_store,
32784 };
32785 diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
32786 index 051d1eb..0a5d4e7 100644
32787 --- a/drivers/firmware/iscsi_ibft.c
32788 +++ b/drivers/firmware/iscsi_ibft.c
32789 @@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struct kobject *kobj,
32790 return ret;
32791 }
32792
32793 -static struct sysfs_ops ibft_attr_ops = {
32794 +static const struct sysfs_ops ibft_attr_ops = {
32795 .show = ibft_show_attribute,
32796 };
32797
32798 diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
32799 index 56f9234..8c58c7b 100644
32800 --- a/drivers/firmware/memmap.c
32801 +++ b/drivers/firmware/memmap.c
32802 @@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
32803 NULL
32804 };
32805
32806 -static struct sysfs_ops memmap_attr_ops = {
32807 +static const struct sysfs_ops memmap_attr_ops = {
32808 .show = memmap_attr_show,
32809 };
32810
32811 diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
32812 index b16c9a8..2af7d3f 100644
32813 --- a/drivers/gpio/vr41xx_giu.c
32814 +++ b/drivers/gpio/vr41xx_giu.c
32815 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
32816 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
32817 maskl, pendl, maskh, pendh);
32818
32819 - atomic_inc(&irq_err_count);
32820 + atomic_inc_unchecked(&irq_err_count);
32821
32822 return -EINVAL;
32823 }
32824 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
32825 index bea6efc..3dc0f42 100644
32826 --- a/drivers/gpu/drm/drm_crtc.c
32827 +++ b/drivers/gpu/drm/drm_crtc.c
32828 @@ -1323,7 +1323,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32829 */
32830 if ((out_resp->count_modes >= mode_count) && mode_count) {
32831 copied = 0;
32832 - mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
32833 + mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
32834 list_for_each_entry(mode, &connector->modes, head) {
32835 drm_crtc_convert_to_umode(&u_mode, mode);
32836 if (copy_to_user(mode_ptr + copied,
32837 @@ -1338,8 +1338,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32838
32839 if ((out_resp->count_props >= props_count) && props_count) {
32840 copied = 0;
32841 - prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
32842 - prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
32843 + prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
32844 + prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
32845 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
32846 if (connector->property_ids[i] != 0) {
32847 if (put_user(connector->property_ids[i],
32848 @@ -1361,7 +1361,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32849
32850 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
32851 copied = 0;
32852 - encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
32853 + encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
32854 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
32855 if (connector->encoder_ids[i] != 0) {
32856 if (put_user(connector->encoder_ids[i],
32857 @@ -1513,7 +1513,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
32858 }
32859
32860 for (i = 0; i < crtc_req->count_connectors; i++) {
32861 - set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
32862 + set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
32863 if (get_user(out_id, &set_connectors_ptr[i])) {
32864 ret = -EFAULT;
32865 goto out;
32866 @@ -2118,7 +2118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32867 out_resp->flags = property->flags;
32868
32869 if ((out_resp->count_values >= value_count) && value_count) {
32870 - values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
32871 + values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
32872 for (i = 0; i < value_count; i++) {
32873 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
32874 ret = -EFAULT;
32875 @@ -2131,7 +2131,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32876 if (property->flags & DRM_MODE_PROP_ENUM) {
32877 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
32878 copied = 0;
32879 - enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
32880 + enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
32881 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
32882
32883 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
32884 @@ -2154,7 +2154,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32885 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
32886 copied = 0;
32887 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
32888 - blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
32889 + blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
32890
32891 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
32892 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
32893 @@ -2226,7 +2226,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
32894 blob = obj_to_blob(obj);
32895
32896 if (out_resp->length == blob->length) {
32897 - blob_ptr = (void *)(unsigned long)out_resp->data;
32898 + blob_ptr = (void __user *)(unsigned long)out_resp->data;
32899 if (copy_to_user(blob_ptr, blob->data, blob->length)){
32900 ret = -EFAULT;
32901 goto done;
32902 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
32903 index 1b8745d..92fdbf6 100644
32904 --- a/drivers/gpu/drm/drm_crtc_helper.c
32905 +++ b/drivers/gpu/drm/drm_crtc_helper.c
32906 @@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
32907 struct drm_crtc *tmp;
32908 int crtc_mask = 1;
32909
32910 - WARN(!crtc, "checking null crtc?");
32911 + BUG_ON(!crtc);
32912
32913 dev = crtc->dev;
32914
32915 @@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
32916
32917 adjusted_mode = drm_mode_duplicate(dev, mode);
32918
32919 + pax_track_stack();
32920 +
32921 crtc->enabled = drm_helper_crtc_in_use(crtc);
32922
32923 if (!crtc->enabled)
32924 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
32925 index 0e27d98..dec8768 100644
32926 --- a/drivers/gpu/drm/drm_drv.c
32927 +++ b/drivers/gpu/drm/drm_drv.c
32928 @@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
32929 char *kdata = NULL;
32930
32931 atomic_inc(&dev->ioctl_count);
32932 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
32933 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
32934 ++file_priv->ioctl_count;
32935
32936 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
32937 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
32938 index 519161e..98c840c 100644
32939 --- a/drivers/gpu/drm/drm_fops.c
32940 +++ b/drivers/gpu/drm/drm_fops.c
32941 @@ -66,7 +66,7 @@ static int drm_setup(struct drm_device * dev)
32942 }
32943
32944 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
32945 - atomic_set(&dev->counts[i], 0);
32946 + atomic_set_unchecked(&dev->counts[i], 0);
32947
32948 dev->sigdata.lock = NULL;
32949
32950 @@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct file *filp)
32951
32952 retcode = drm_open_helper(inode, filp, dev);
32953 if (!retcode) {
32954 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
32955 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
32956 spin_lock(&dev->count_lock);
32957 - if (!dev->open_count++) {
32958 + if (local_inc_return(&dev->open_count) == 1) {
32959 spin_unlock(&dev->count_lock);
32960 retcode = drm_setup(dev);
32961 goto out;
32962 @@ -435,7 +435,7 @@ int drm_release(struct inode *inode, struct file *filp)
32963
32964 lock_kernel();
32965
32966 - DRM_DEBUG("open_count = %d\n", dev->open_count);
32967 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
32968
32969 if (dev->driver->preclose)
32970 dev->driver->preclose(dev, file_priv);
32971 @@ -447,7 +447,7 @@ int drm_release(struct inode *inode, struct file *filp)
32972 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
32973 task_pid_nr(current),
32974 (long)old_encode_dev(file_priv->minor->device),
32975 - dev->open_count);
32976 + local_read(&dev->open_count));
32977
32978 /* Release any auth tokens that might point to this file_priv,
32979 (do that under the drm_global_mutex) */
32980 @@ -529,9 +529,9 @@ int drm_release(struct inode *inode, struct file *filp)
32981 * End inline drm_release
32982 */
32983
32984 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
32985 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
32986 spin_lock(&dev->count_lock);
32987 - if (!--dev->open_count) {
32988 + if (local_dec_and_test(&dev->open_count)) {
32989 if (atomic_read(&dev->ioctl_count)) {
32990 DRM_ERROR("Device busy: %d\n",
32991 atomic_read(&dev->ioctl_count));
32992 diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
32993 index 8bf3770..79422805 100644
32994 --- a/drivers/gpu/drm/drm_gem.c
32995 +++ b/drivers/gpu/drm/drm_gem.c
32996 @@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
32997 spin_lock_init(&dev->object_name_lock);
32998 idr_init(&dev->object_name_idr);
32999 atomic_set(&dev->object_count, 0);
33000 - atomic_set(&dev->object_memory, 0);
33001 + atomic_set_unchecked(&dev->object_memory, 0);
33002 atomic_set(&dev->pin_count, 0);
33003 - atomic_set(&dev->pin_memory, 0);
33004 + atomic_set_unchecked(&dev->pin_memory, 0);
33005 atomic_set(&dev->gtt_count, 0);
33006 - atomic_set(&dev->gtt_memory, 0);
33007 + atomic_set_unchecked(&dev->gtt_memory, 0);
33008
33009 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
33010 if (!mm) {
33011 @@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
33012 goto fput;
33013 }
33014 atomic_inc(&dev->object_count);
33015 - atomic_add(obj->size, &dev->object_memory);
33016 + atomic_add_unchecked(obj->size, &dev->object_memory);
33017 return obj;
33018 fput:
33019 fput(obj->filp);
33020 @@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
33021
33022 fput(obj->filp);
33023 atomic_dec(&dev->object_count);
33024 - atomic_sub(obj->size, &dev->object_memory);
33025 + atomic_sub_unchecked(obj->size, &dev->object_memory);
33026 kfree(obj);
33027 }
33028 EXPORT_SYMBOL(drm_gem_object_free);
33029 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
33030 index f0f6c6b..34af322 100644
33031 --- a/drivers/gpu/drm/drm_info.c
33032 +++ b/drivers/gpu/drm/drm_info.c
33033 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
33034 struct drm_local_map *map;
33035 struct drm_map_list *r_list;
33036
33037 - /* Hardcoded from _DRM_FRAME_BUFFER,
33038 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
33039 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
33040 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
33041 + static const char * const types[] = {
33042 + [_DRM_FRAME_BUFFER] = "FB",
33043 + [_DRM_REGISTERS] = "REG",
33044 + [_DRM_SHM] = "SHM",
33045 + [_DRM_AGP] = "AGP",
33046 + [_DRM_SCATTER_GATHER] = "SG",
33047 + [_DRM_CONSISTENT] = "PCI",
33048 + [_DRM_GEM] = "GEM" };
33049 const char *type;
33050 int i;
33051
33052 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
33053 map = r_list->map;
33054 if (!map)
33055 continue;
33056 - if (map->type < 0 || map->type > 5)
33057 + if (map->type >= ARRAY_SIZE(types))
33058 type = "??";
33059 else
33060 type = types[map->type];
33061 @@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file *m, void* data)
33062 struct drm_device *dev = node->minor->dev;
33063
33064 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
33065 - seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
33066 + seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
33067 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
33068 - seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
33069 - seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
33070 + seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
33071 + seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
33072 seq_printf(m, "%d gtt total\n", dev->gtt_total);
33073 return 0;
33074 }
33075 @@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, void *data)
33076 mutex_lock(&dev->struct_mutex);
33077 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
33078 atomic_read(&dev->vma_count),
33079 +#ifdef CONFIG_GRKERNSEC_HIDESYM
33080 + NULL, 0);
33081 +#else
33082 high_memory, (u64)virt_to_phys(high_memory));
33083 +#endif
33084
33085 list_for_each_entry(pt, &dev->vmalist, head) {
33086 vma = pt->vma;
33087 @@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, void *data)
33088 continue;
33089 seq_printf(m,
33090 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
33091 - pt->pid, vma->vm_start, vma->vm_end,
33092 + pt->pid,
33093 +#ifdef CONFIG_GRKERNSEC_HIDESYM
33094 + 0, 0,
33095 +#else
33096 + vma->vm_start, vma->vm_end,
33097 +#endif
33098 vma->vm_flags & VM_READ ? 'r' : '-',
33099 vma->vm_flags & VM_WRITE ? 'w' : '-',
33100 vma->vm_flags & VM_EXEC ? 'x' : '-',
33101 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
33102 vma->vm_flags & VM_LOCKED ? 'l' : '-',
33103 vma->vm_flags & VM_IO ? 'i' : '-',
33104 +#ifdef CONFIG_GRKERNSEC_HIDESYM
33105 + 0);
33106 +#else
33107 vma->vm_pgoff);
33108 +#endif
33109
33110 #if defined(__i386__)
33111 pgprot = pgprot_val(vma->vm_page_prot);
33112 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
33113 index 282d9fd..71e5f11 100644
33114 --- a/drivers/gpu/drm/drm_ioc32.c
33115 +++ b/drivers/gpu/drm/drm_ioc32.c
33116 @@ -463,7 +463,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
33117 request = compat_alloc_user_space(nbytes);
33118 if (!access_ok(VERIFY_WRITE, request, nbytes))
33119 return -EFAULT;
33120 - list = (struct drm_buf_desc *) (request + 1);
33121 + list = (struct drm_buf_desc __user *) (request + 1);
33122
33123 if (__put_user(count, &request->count)
33124 || __put_user(list, &request->list))
33125 @@ -525,7 +525,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
33126 request = compat_alloc_user_space(nbytes);
33127 if (!access_ok(VERIFY_WRITE, request, nbytes))
33128 return -EFAULT;
33129 - list = (struct drm_buf_pub *) (request + 1);
33130 + list = (struct drm_buf_pub __user *) (request + 1);
33131
33132 if (__put_user(count, &request->count)
33133 || __put_user(list, &request->list))
33134 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
33135 index 9b9ff46..4ea724c 100644
33136 --- a/drivers/gpu/drm/drm_ioctl.c
33137 +++ b/drivers/gpu/drm/drm_ioctl.c
33138 @@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev, void *data,
33139 stats->data[i].value =
33140 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
33141 else
33142 - stats->data[i].value = atomic_read(&dev->counts[i]);
33143 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
33144 stats->data[i].type = dev->types[i];
33145 }
33146
33147 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
33148 index e2f70a5..c703e86 100644
33149 --- a/drivers/gpu/drm/drm_lock.c
33150 +++ b/drivers/gpu/drm/drm_lock.c
33151 @@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
33152 if (drm_lock_take(&master->lock, lock->context)) {
33153 master->lock.file_priv = file_priv;
33154 master->lock.lock_time = jiffies;
33155 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
33156 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
33157 break; /* Got lock */
33158 }
33159
33160 @@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
33161 return -EINVAL;
33162 }
33163
33164 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
33165 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
33166
33167 /* kernel_context_switch isn't used by any of the x86 drm
33168 * modules but is required by the Sparc driver.
33169 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
33170 index 7d1d88c..b9131b2 100644
33171 --- a/drivers/gpu/drm/i810/i810_dma.c
33172 +++ b/drivers/gpu/drm/i810/i810_dma.c
33173 @@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
33174 dma->buflist[vertex->idx],
33175 vertex->discard, vertex->used);
33176
33177 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
33178 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
33179 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
33180 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
33181 sarea_priv->last_enqueue = dev_priv->counter - 1;
33182 sarea_priv->last_dispatch = (int)hw_status[5];
33183
33184 @@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
33185 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
33186 mc->last_render);
33187
33188 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
33189 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
33190 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
33191 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
33192 sarea_priv->last_enqueue = dev_priv->counter - 1;
33193 sarea_priv->last_dispatch = (int)hw_status[5];
33194
33195 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
33196 index 21e2691..7321edd 100644
33197 --- a/drivers/gpu/drm/i810/i810_drv.h
33198 +++ b/drivers/gpu/drm/i810/i810_drv.h
33199 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
33200 int page_flipping;
33201
33202 wait_queue_head_t irq_queue;
33203 - atomic_t irq_received;
33204 - atomic_t irq_emitted;
33205 + atomic_unchecked_t irq_received;
33206 + atomic_unchecked_t irq_emitted;
33207
33208 int front_offset;
33209 } drm_i810_private_t;
33210 diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
33211 index da82afe..48a45de 100644
33212 --- a/drivers/gpu/drm/i830/i830_drv.h
33213 +++ b/drivers/gpu/drm/i830/i830_drv.h
33214 @@ -115,8 +115,8 @@ typedef struct drm_i830_private {
33215 int page_flipping;
33216
33217 wait_queue_head_t irq_queue;
33218 - atomic_t irq_received;
33219 - atomic_t irq_emitted;
33220 + atomic_unchecked_t irq_received;
33221 + atomic_unchecked_t irq_emitted;
33222
33223 int use_mi_batchbuffer_start;
33224
33225 diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
33226 index 91ec2bb..6f21fab 100644
33227 --- a/drivers/gpu/drm/i830/i830_irq.c
33228 +++ b/drivers/gpu/drm/i830/i830_irq.c
33229 @@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
33230
33231 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
33232
33233 - atomic_inc(&dev_priv->irq_received);
33234 + atomic_inc_unchecked(&dev_priv->irq_received);
33235 wake_up_interruptible(&dev_priv->irq_queue);
33236
33237 return IRQ_HANDLED;
33238 @@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_device * dev)
33239
33240 DRM_DEBUG("%s\n", __func__);
33241
33242 - atomic_inc(&dev_priv->irq_emitted);
33243 + atomic_inc_unchecked(&dev_priv->irq_emitted);
33244
33245 BEGIN_LP_RING(2);
33246 OUT_RING(0);
33247 OUT_RING(GFX_OP_USER_INTERRUPT);
33248 ADVANCE_LP_RING();
33249
33250 - return atomic_read(&dev_priv->irq_emitted);
33251 + return atomic_read_unchecked(&dev_priv->irq_emitted);
33252 }
33253
33254 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
33255 @@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
33256
33257 DRM_DEBUG("%s\n", __func__);
33258
33259 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
33260 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
33261 return 0;
33262
33263 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
33264 @@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
33265
33266 for (;;) {
33267 __set_current_state(TASK_INTERRUPTIBLE);
33268 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
33269 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
33270 break;
33271 if ((signed)(end - jiffies) <= 0) {
33272 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
33273 @@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct drm_device * dev)
33274 I830_WRITE16(I830REG_HWSTAM, 0xffff);
33275 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
33276 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
33277 - atomic_set(&dev_priv->irq_received, 0);
33278 - atomic_set(&dev_priv->irq_emitted, 0);
33279 + atomic_set_unchecked(&dev_priv->irq_received, 0);
33280 + atomic_set_unchecked(&dev_priv->irq_emitted, 0);
33281 init_waitqueue_head(&dev_priv->irq_queue);
33282 }
33283
33284 diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
33285 index 288fc50..c6092055 100644
33286 --- a/drivers/gpu/drm/i915/dvo.h
33287 +++ b/drivers/gpu/drm/i915/dvo.h
33288 @@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
33289 *
33290 * \return singly-linked list of modes or NULL if no modes found.
33291 */
33292 - struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
33293 + struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
33294
33295 /**
33296 * Clean up driver-specific bits of the output
33297 */
33298 - void (*destroy) (struct intel_dvo_device *dvo);
33299 + void (* const destroy) (struct intel_dvo_device *dvo);
33300
33301 /**
33302 * Debugging hook to dump device registers to log file
33303 */
33304 - void (*dump_regs)(struct intel_dvo_device *dvo);
33305 + void (* const dump_regs)(struct intel_dvo_device *dvo);
33306 };
33307
33308 -extern struct intel_dvo_dev_ops sil164_ops;
33309 -extern struct intel_dvo_dev_ops ch7xxx_ops;
33310 -extern struct intel_dvo_dev_ops ivch_ops;
33311 -extern struct intel_dvo_dev_ops tfp410_ops;
33312 -extern struct intel_dvo_dev_ops ch7017_ops;
33313 +extern const struct intel_dvo_dev_ops sil164_ops;
33314 +extern const struct intel_dvo_dev_ops ch7xxx_ops;
33315 +extern const struct intel_dvo_dev_ops ivch_ops;
33316 +extern const struct intel_dvo_dev_ops tfp410_ops;
33317 +extern const struct intel_dvo_dev_ops ch7017_ops;
33318
33319 #endif /* _INTEL_DVO_H */
33320 diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
33321 index 621815b..499d82e 100644
33322 --- a/drivers/gpu/drm/i915/dvo_ch7017.c
33323 +++ b/drivers/gpu/drm/i915/dvo_ch7017.c
33324 @@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_dvo_device *dvo)
33325 }
33326 }
33327
33328 -struct intel_dvo_dev_ops ch7017_ops = {
33329 +const struct intel_dvo_dev_ops ch7017_ops = {
33330 .init = ch7017_init,
33331 .detect = ch7017_detect,
33332 .mode_valid = ch7017_mode_valid,
33333 diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
33334 index a9b8962..ac769ba 100644
33335 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
33336 +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
33337 @@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_dvo_device *dvo)
33338 }
33339 }
33340
33341 -struct intel_dvo_dev_ops ch7xxx_ops = {
33342 +const struct intel_dvo_dev_ops ch7xxx_ops = {
33343 .init = ch7xxx_init,
33344 .detect = ch7xxx_detect,
33345 .mode_valid = ch7xxx_mode_valid,
33346 diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
33347 index aa176f9..ed2930c 100644
33348 --- a/drivers/gpu/drm/i915/dvo_ivch.c
33349 +++ b/drivers/gpu/drm/i915/dvo_ivch.c
33350 @@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
33351 }
33352 }
33353
33354 -struct intel_dvo_dev_ops ivch_ops= {
33355 +const struct intel_dvo_dev_ops ivch_ops= {
33356 .init = ivch_init,
33357 .dpms = ivch_dpms,
33358 .save = ivch_save,
33359 diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
33360 index e1c1f73..7dbebcf 100644
33361 --- a/drivers/gpu/drm/i915/dvo_sil164.c
33362 +++ b/drivers/gpu/drm/i915/dvo_sil164.c
33363 @@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_dvo_device *dvo)
33364 }
33365 }
33366
33367 -struct intel_dvo_dev_ops sil164_ops = {
33368 +const struct intel_dvo_dev_ops sil164_ops = {
33369 .init = sil164_init,
33370 .detect = sil164_detect,
33371 .mode_valid = sil164_mode_valid,
33372 diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
33373 index 16dce84..7e1b6f8 100644
33374 --- a/drivers/gpu/drm/i915/dvo_tfp410.c
33375 +++ b/drivers/gpu/drm/i915/dvo_tfp410.c
33376 @@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_dvo_device *dvo)
33377 }
33378 }
33379
33380 -struct intel_dvo_dev_ops tfp410_ops = {
33381 +const struct intel_dvo_dev_ops tfp410_ops = {
33382 .init = tfp410_init,
33383 .detect = tfp410_detect,
33384 .mode_valid = tfp410_mode_valid,
33385 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
33386 index 7e859d6..7d1cf2b 100644
33387 --- a/drivers/gpu/drm/i915/i915_debugfs.c
33388 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
33389 @@ -192,7 +192,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
33390 I915_READ(GTIMR));
33391 }
33392 seq_printf(m, "Interrupts received: %d\n",
33393 - atomic_read(&dev_priv->irq_received));
33394 + atomic_read_unchecked(&dev_priv->irq_received));
33395 if (dev_priv->hw_status_page != NULL) {
33396 seq_printf(m, "Current sequence: %d\n",
33397 i915_get_gem_seqno(dev));
33398 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
33399 index 5449239..7e4f68d 100644
33400 --- a/drivers/gpu/drm/i915/i915_drv.c
33401 +++ b/drivers/gpu/drm/i915/i915_drv.c
33402 @@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
33403 return i915_resume(dev);
33404 }
33405
33406 -static struct vm_operations_struct i915_gem_vm_ops = {
33407 +static const struct vm_operations_struct i915_gem_vm_ops = {
33408 .fault = i915_gem_fault,
33409 .open = drm_gem_vm_open,
33410 .close = drm_gem_vm_close,
33411 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
33412 index 97163f7..c24c7c7 100644
33413 --- a/drivers/gpu/drm/i915/i915_drv.h
33414 +++ b/drivers/gpu/drm/i915/i915_drv.h
33415 @@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
33416 /* display clock increase/decrease */
33417 /* pll clock increase/decrease */
33418 /* clock gating init */
33419 -};
33420 +} __no_const;
33421
33422 typedef struct drm_i915_private {
33423 struct drm_device *dev;
33424 @@ -197,7 +197,7 @@ typedef struct drm_i915_private {
33425 int page_flipping;
33426
33427 wait_queue_head_t irq_queue;
33428 - atomic_t irq_received;
33429 + atomic_unchecked_t irq_received;
33430 /** Protects user_irq_refcount and irq_mask_reg */
33431 spinlock_t user_irq_lock;
33432 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
33433 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
33434 index 27a3074..eb3f959 100644
33435 --- a/drivers/gpu/drm/i915/i915_gem.c
33436 +++ b/drivers/gpu/drm/i915/i915_gem.c
33437 @@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
33438
33439 args->aper_size = dev->gtt_total;
33440 args->aper_available_size = (args->aper_size -
33441 - atomic_read(&dev->pin_memory));
33442 + atomic_read_unchecked(&dev->pin_memory));
33443
33444 return 0;
33445 }
33446 @@ -2058,7 +2058,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
33447
33448 if (obj_priv->gtt_space) {
33449 atomic_dec(&dev->gtt_count);
33450 - atomic_sub(obj->size, &dev->gtt_memory);
33451 + atomic_sub_unchecked(obj->size, &dev->gtt_memory);
33452
33453 drm_mm_put_block(obj_priv->gtt_space);
33454 obj_priv->gtt_space = NULL;
33455 @@ -2701,7 +2701,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
33456 goto search_free;
33457 }
33458 atomic_inc(&dev->gtt_count);
33459 - atomic_add(obj->size, &dev->gtt_memory);
33460 + atomic_add_unchecked(obj->size, &dev->gtt_memory);
33461
33462 /* Assert that the object is not currently in any GPU domain. As it
33463 * wasn't in the GTT, there shouldn't be any way it could have been in
33464 @@ -3755,9 +3755,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
33465 "%d/%d gtt bytes\n",
33466 atomic_read(&dev->object_count),
33467 atomic_read(&dev->pin_count),
33468 - atomic_read(&dev->object_memory),
33469 - atomic_read(&dev->pin_memory),
33470 - atomic_read(&dev->gtt_memory),
33471 + atomic_read_unchecked(&dev->object_memory),
33472 + atomic_read_unchecked(&dev->pin_memory),
33473 + atomic_read_unchecked(&dev->gtt_memory),
33474 dev->gtt_total);
33475 }
33476 goto err;
33477 @@ -3989,7 +3989,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
33478 */
33479 if (obj_priv->pin_count == 1) {
33480 atomic_inc(&dev->pin_count);
33481 - atomic_add(obj->size, &dev->pin_memory);
33482 + atomic_add_unchecked(obj->size, &dev->pin_memory);
33483 if (!obj_priv->active &&
33484 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
33485 !list_empty(&obj_priv->list))
33486 @@ -4022,7 +4022,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
33487 list_move_tail(&obj_priv->list,
33488 &dev_priv->mm.inactive_list);
33489 atomic_dec(&dev->pin_count);
33490 - atomic_sub(obj->size, &dev->pin_memory);
33491 + atomic_sub_unchecked(obj->size, &dev->pin_memory);
33492 }
33493 i915_verify_inactive(dev, __FILE__, __LINE__);
33494 }
33495 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
33496 index 63f28ad..f5469da 100644
33497 --- a/drivers/gpu/drm/i915/i915_irq.c
33498 +++ b/drivers/gpu/drm/i915/i915_irq.c
33499 @@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
33500 int irq_received;
33501 int ret = IRQ_NONE;
33502
33503 - atomic_inc(&dev_priv->irq_received);
33504 + atomic_inc_unchecked(&dev_priv->irq_received);
33505
33506 if (IS_IGDNG(dev))
33507 return igdng_irq_handler(dev);
33508 @@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
33509 {
33510 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
33511
33512 - atomic_set(&dev_priv->irq_received, 0);
33513 + atomic_set_unchecked(&dev_priv->irq_received, 0);
33514
33515 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
33516 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
33517 diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
33518 index 5d9c6a7..d1b0e29 100644
33519 --- a/drivers/gpu/drm/i915/intel_sdvo.c
33520 +++ b/drivers/gpu/drm/i915/intel_sdvo.c
33521 @@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
33522 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
33523
33524 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
33525 - intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33526 + pax_open_kernel();
33527 + *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33528 + pax_close_kernel();
33529
33530 /* Read the regs to test if we can talk to the device */
33531 for (i = 0; i < 0x40; i++) {
33532 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
33533 index be6c6b9..8615d9c 100644
33534 --- a/drivers/gpu/drm/mga/mga_drv.h
33535 +++ b/drivers/gpu/drm/mga/mga_drv.h
33536 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
33537 u32 clear_cmd;
33538 u32 maccess;
33539
33540 - atomic_t vbl_received; /**< Number of vblanks received. */
33541 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
33542 wait_queue_head_t fence_queue;
33543 - atomic_t last_fence_retired;
33544 + atomic_unchecked_t last_fence_retired;
33545 u32 next_fence_to_post;
33546
33547 unsigned int fb_cpp;
33548 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
33549 index daa6041..a28a5da 100644
33550 --- a/drivers/gpu/drm/mga/mga_irq.c
33551 +++ b/drivers/gpu/drm/mga/mga_irq.c
33552 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
33553 if (crtc != 0)
33554 return 0;
33555
33556 - return atomic_read(&dev_priv->vbl_received);
33557 + return atomic_read_unchecked(&dev_priv->vbl_received);
33558 }
33559
33560
33561 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33562 /* VBLANK interrupt */
33563 if (status & MGA_VLINEPEN) {
33564 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
33565 - atomic_inc(&dev_priv->vbl_received);
33566 + atomic_inc_unchecked(&dev_priv->vbl_received);
33567 drm_handle_vblank(dev, 0);
33568 handled = 1;
33569 }
33570 @@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33571 MGA_WRITE(MGA_PRIMEND, prim_end);
33572 }
33573
33574 - atomic_inc(&dev_priv->last_fence_retired);
33575 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
33576 DRM_WAKEUP(&dev_priv->fence_queue);
33577 handled = 1;
33578 }
33579 @@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
33580 * using fences.
33581 */
33582 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
33583 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
33584 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
33585 - *sequence) <= (1 << 23)));
33586
33587 *sequence = cur_fence;
33588 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
33589 index 4c39a40..b22a9ea 100644
33590 --- a/drivers/gpu/drm/r128/r128_cce.c
33591 +++ b/drivers/gpu/drm/r128/r128_cce.c
33592 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
33593
33594 /* GH: Simple idle check.
33595 */
33596 - atomic_set(&dev_priv->idle_count, 0);
33597 + atomic_set_unchecked(&dev_priv->idle_count, 0);
33598
33599 /* We don't support anything other than bus-mastering ring mode,
33600 * but the ring can be in either AGP or PCI space for the ring
33601 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
33602 index 3c60829..4faf484 100644
33603 --- a/drivers/gpu/drm/r128/r128_drv.h
33604 +++ b/drivers/gpu/drm/r128/r128_drv.h
33605 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
33606 int is_pci;
33607 unsigned long cce_buffers_offset;
33608
33609 - atomic_t idle_count;
33610 + atomic_unchecked_t idle_count;
33611
33612 int page_flipping;
33613 int current_page;
33614 u32 crtc_offset;
33615 u32 crtc_offset_cntl;
33616
33617 - atomic_t vbl_received;
33618 + atomic_unchecked_t vbl_received;
33619
33620 u32 color_fmt;
33621 unsigned int front_offset;
33622 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
33623 index 69810fb..97bf17a 100644
33624 --- a/drivers/gpu/drm/r128/r128_irq.c
33625 +++ b/drivers/gpu/drm/r128/r128_irq.c
33626 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
33627 if (crtc != 0)
33628 return 0;
33629
33630 - return atomic_read(&dev_priv->vbl_received);
33631 + return atomic_read_unchecked(&dev_priv->vbl_received);
33632 }
33633
33634 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33635 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33636 /* VBLANK interrupt */
33637 if (status & R128_CRTC_VBLANK_INT) {
33638 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
33639 - atomic_inc(&dev_priv->vbl_received);
33640 + atomic_inc_unchecked(&dev_priv->vbl_received);
33641 drm_handle_vblank(dev, 0);
33642 return IRQ_HANDLED;
33643 }
33644 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
33645 index af2665c..51922d2 100644
33646 --- a/drivers/gpu/drm/r128/r128_state.c
33647 +++ b/drivers/gpu/drm/r128/r128_state.c
33648 @@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_private_t * dev_priv,
33649
33650 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
33651 {
33652 - if (atomic_read(&dev_priv->idle_count) == 0) {
33653 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
33654 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
33655 } else {
33656 - atomic_set(&dev_priv->idle_count, 0);
33657 + atomic_set_unchecked(&dev_priv->idle_count, 0);
33658 }
33659 }
33660
33661 diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
33662 index dd72b91..8644b3c 100644
33663 --- a/drivers/gpu/drm/radeon/atom.c
33664 +++ b/drivers/gpu/drm/radeon/atom.c
33665 @@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
33666 char name[512];
33667 int i;
33668
33669 + pax_track_stack();
33670 +
33671 ctx->card = card;
33672 ctx->bios = bios;
33673
33674 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
33675 index 0d79577..efaa7a5 100644
33676 --- a/drivers/gpu/drm/radeon/mkregtable.c
33677 +++ b/drivers/gpu/drm/radeon/mkregtable.c
33678 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
33679 regex_t mask_rex;
33680 regmatch_t match[4];
33681 char buf[1024];
33682 - size_t end;
33683 + long end;
33684 int len;
33685 int done = 0;
33686 int r;
33687 unsigned o;
33688 struct offset *offset;
33689 char last_reg_s[10];
33690 - int last_reg;
33691 + unsigned long last_reg;
33692
33693 if (regcomp
33694 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
33695 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
33696 index 6735213..38c2c67 100644
33697 --- a/drivers/gpu/drm/radeon/radeon.h
33698 +++ b/drivers/gpu/drm/radeon/radeon.h
33699 @@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device *rdev);
33700 */
33701 struct radeon_fence_driver {
33702 uint32_t scratch_reg;
33703 - atomic_t seq;
33704 + atomic_unchecked_t seq;
33705 uint32_t last_seq;
33706 unsigned long count_timeout;
33707 wait_queue_head_t queue;
33708 @@ -640,7 +640,7 @@ struct radeon_asic {
33709 uint32_t offset, uint32_t obj_size);
33710 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
33711 void (*bandwidth_update)(struct radeon_device *rdev);
33712 -};
33713 +} __no_const;
33714
33715 /*
33716 * Asic structures
33717 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
33718 index 4e928b9..d8b6008 100644
33719 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
33720 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
33721 @@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
33722 bool linkb;
33723 struct radeon_i2c_bus_rec ddc_bus;
33724
33725 + pax_track_stack();
33726 +
33727 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33728
33729 if (data_offset == 0)
33730 @@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
33731 }
33732 }
33733
33734 -struct bios_connector {
33735 +static struct bios_connector {
33736 bool valid;
33737 uint16_t line_mux;
33738 uint16_t devices;
33739 int connector_type;
33740 struct radeon_i2c_bus_rec ddc_bus;
33741 -};
33742 +} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33743
33744 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33745 drm_device
33746 @@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33747 uint8_t dac;
33748 union atom_supported_devices *supported_devices;
33749 int i, j;
33750 - struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33751
33752 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33753
33754 diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
33755 index 083a181..ccccae0 100644
33756 --- a/drivers/gpu/drm/radeon/radeon_display.c
33757 +++ b/drivers/gpu/drm/radeon/radeon_display.c
33758 @@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
33759
33760 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
33761 error = freq - current_freq;
33762 - error = error < 0 ? 0xffffffff : error;
33763 + error = (int32_t)error < 0 ? 0xffffffff : error;
33764 } else
33765 error = abs(current_freq - freq);
33766 vco_diff = abs(vco - best_vco);
33767 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
33768 index 76e4070..193fa7f 100644
33769 --- a/drivers/gpu/drm/radeon/radeon_drv.h
33770 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
33771 @@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
33772
33773 /* SW interrupt */
33774 wait_queue_head_t swi_queue;
33775 - atomic_t swi_emitted;
33776 + atomic_unchecked_t swi_emitted;
33777 int vblank_crtc;
33778 uint32_t irq_enable_reg;
33779 uint32_t r500_disp_irq_reg;
33780 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
33781 index 3beb26d..6ce9c4a 100644
33782 --- a/drivers/gpu/drm/radeon/radeon_fence.c
33783 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
33784 @@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
33785 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
33786 return 0;
33787 }
33788 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
33789 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
33790 if (!rdev->cp.ready) {
33791 /* FIXME: cp is not running assume everythings is done right
33792 * away
33793 @@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
33794 return r;
33795 }
33796 WREG32(rdev->fence_drv.scratch_reg, 0);
33797 - atomic_set(&rdev->fence_drv.seq, 0);
33798 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
33799 INIT_LIST_HEAD(&rdev->fence_drv.created);
33800 INIT_LIST_HEAD(&rdev->fence_drv.emited);
33801 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
33802 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
33803 index a1bf11d..4a123c0 100644
33804 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
33805 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
33806 @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
33807 request = compat_alloc_user_space(sizeof(*request));
33808 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
33809 || __put_user(req32.param, &request->param)
33810 - || __put_user((void __user *)(unsigned long)req32.value,
33811 + || __put_user((unsigned long)req32.value,
33812 &request->value))
33813 return -EFAULT;
33814
33815 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
33816 index b79ecc4..8dab92d 100644
33817 --- a/drivers/gpu/drm/radeon/radeon_irq.c
33818 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
33819 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
33820 unsigned int ret;
33821 RING_LOCALS;
33822
33823 - atomic_inc(&dev_priv->swi_emitted);
33824 - ret = atomic_read(&dev_priv->swi_emitted);
33825 + atomic_inc_unchecked(&dev_priv->swi_emitted);
33826 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
33827
33828 BEGIN_RING(4);
33829 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
33830 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
33831 drm_radeon_private_t *dev_priv =
33832 (drm_radeon_private_t *) dev->dev_private;
33833
33834 - atomic_set(&dev_priv->swi_emitted, 0);
33835 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
33836 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
33837
33838 dev->max_vblank_count = 0x001fffff;
33839 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
33840 index 4747910..48ca4b3 100644
33841 --- a/drivers/gpu/drm/radeon/radeon_state.c
33842 +++ b/drivers/gpu/drm/radeon/radeon_state.c
33843 @@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
33844 {
33845 drm_radeon_private_t *dev_priv = dev->dev_private;
33846 drm_radeon_getparam_t *param = data;
33847 - int value;
33848 + int value = 0;
33849
33850 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
33851
33852 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
33853 index 1381e06..0e53b17 100644
33854 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
33855 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
33856 @@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_device *rdev)
33857 DRM_INFO("radeon: ttm finalized\n");
33858 }
33859
33860 -static struct vm_operations_struct radeon_ttm_vm_ops;
33861 -static const struct vm_operations_struct *ttm_vm_ops = NULL;
33862 -
33863 -static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33864 -{
33865 - struct ttm_buffer_object *bo;
33866 - int r;
33867 -
33868 - bo = (struct ttm_buffer_object *)vma->vm_private_data;
33869 - if (bo == NULL) {
33870 - return VM_FAULT_NOPAGE;
33871 - }
33872 - r = ttm_vm_ops->fault(vma, vmf);
33873 - return r;
33874 -}
33875 -
33876 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33877 {
33878 struct drm_file *file_priv;
33879 struct radeon_device *rdev;
33880 - int r;
33881
33882 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
33883 return drm_mmap(filp, vma);
33884 @@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33885
33886 file_priv = (struct drm_file *)filp->private_data;
33887 rdev = file_priv->minor->dev->dev_private;
33888 - if (rdev == NULL) {
33889 + if (!rdev)
33890 return -EINVAL;
33891 - }
33892 - r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33893 - if (unlikely(r != 0)) {
33894 - return r;
33895 - }
33896 - if (unlikely(ttm_vm_ops == NULL)) {
33897 - ttm_vm_ops = vma->vm_ops;
33898 - radeon_ttm_vm_ops = *ttm_vm_ops;
33899 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
33900 - }
33901 - vma->vm_ops = &radeon_ttm_vm_ops;
33902 - return 0;
33903 + return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33904 }
33905
33906
33907 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
33908 index b12ff76..0bd0c6e 100644
33909 --- a/drivers/gpu/drm/radeon/rs690.c
33910 +++ b/drivers/gpu/drm/radeon/rs690.c
33911 @@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
33912 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
33913 rdev->pm.sideport_bandwidth.full)
33914 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
33915 - read_delay_latency.full = rfixed_const(370 * 800 * 1000);
33916 + read_delay_latency.full = rfixed_const(800 * 1000);
33917 read_delay_latency.full = rfixed_div(read_delay_latency,
33918 rdev->pm.igp_sideport_mclk);
33919 + a.full = rfixed_const(370);
33920 + read_delay_latency.full = rfixed_mul(read_delay_latency, a);
33921 } else {
33922 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
33923 rdev->pm.k8_bandwidth.full)
33924 diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
33925 index 0ed436e..e6e7ce3 100644
33926 --- a/drivers/gpu/drm/ttm/ttm_bo.c
33927 +++ b/drivers/gpu/drm/ttm/ttm_bo.c
33928 @@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_attrs[] = {
33929 NULL
33930 };
33931
33932 -static struct sysfs_ops ttm_bo_global_ops = {
33933 +static const struct sysfs_ops ttm_bo_global_ops = {
33934 .show = &ttm_bo_global_show
33935 };
33936
33937 diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33938 index 1c040d0..f9e4af8 100644
33939 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
33940 +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33941 @@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33942 {
33943 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
33944 vma->vm_private_data;
33945 - struct ttm_bo_device *bdev = bo->bdev;
33946 + struct ttm_bo_device *bdev;
33947 unsigned long bus_base;
33948 unsigned long bus_offset;
33949 unsigned long bus_size;
33950 @@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33951 unsigned long address = (unsigned long)vmf->virtual_address;
33952 int retval = VM_FAULT_NOPAGE;
33953
33954 + if (!bo)
33955 + return VM_FAULT_NOPAGE;
33956 + bdev = bo->bdev;
33957 +
33958 /*
33959 * Work around locking order reversal in fault / nopfn
33960 * between mmap_sem and bo_reserve: Perform a trylock operation
33961 diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
33962 index b170071..28ae90e 100644
33963 --- a/drivers/gpu/drm/ttm/ttm_global.c
33964 +++ b/drivers/gpu/drm/ttm/ttm_global.c
33965 @@ -36,7 +36,7 @@
33966 struct ttm_global_item {
33967 struct mutex mutex;
33968 void *object;
33969 - int refcount;
33970 + atomic_t refcount;
33971 };
33972
33973 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
33974 @@ -49,7 +49,7 @@ void ttm_global_init(void)
33975 struct ttm_global_item *item = &glob[i];
33976 mutex_init(&item->mutex);
33977 item->object = NULL;
33978 - item->refcount = 0;
33979 + atomic_set(&item->refcount, 0);
33980 }
33981 }
33982
33983 @@ -59,7 +59,7 @@ void ttm_global_release(void)
33984 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
33985 struct ttm_global_item *item = &glob[i];
33986 BUG_ON(item->object != NULL);
33987 - BUG_ON(item->refcount != 0);
33988 + BUG_ON(atomic_read(&item->refcount) != 0);
33989 }
33990 }
33991
33992 @@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
33993 void *object;
33994
33995 mutex_lock(&item->mutex);
33996 - if (item->refcount == 0) {
33997 + if (atomic_read(&item->refcount) == 0) {
33998 item->object = kzalloc(ref->size, GFP_KERNEL);
33999 if (unlikely(item->object == NULL)) {
34000 ret = -ENOMEM;
34001 @@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
34002 goto out_err;
34003
34004 }
34005 - ++item->refcount;
34006 + atomic_inc(&item->refcount);
34007 ref->object = item->object;
34008 object = item->object;
34009 mutex_unlock(&item->mutex);
34010 @@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_global_reference *ref)
34011 struct ttm_global_item *item = &glob[ref->global_type];
34012
34013 mutex_lock(&item->mutex);
34014 - BUG_ON(item->refcount == 0);
34015 + BUG_ON(atomic_read(&item->refcount) == 0);
34016 BUG_ON(ref->object != item->object);
34017 - if (--item->refcount == 0) {
34018 + if (atomic_dec_and_test(&item->refcount)) {
34019 ref->release(ref);
34020 item->object = NULL;
34021 }
34022 diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
34023 index 072c281..d8ef483 100644
34024 --- a/drivers/gpu/drm/ttm/ttm_memory.c
34025 +++ b/drivers/gpu/drm/ttm/ttm_memory.c
34026 @@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_attrs[] = {
34027 NULL
34028 };
34029
34030 -static struct sysfs_ops ttm_mem_zone_ops = {
34031 +static const struct sysfs_ops ttm_mem_zone_ops = {
34032 .show = &ttm_mem_zone_show,
34033 .store = &ttm_mem_zone_store
34034 };
34035 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
34036 index cafcb84..b8e66cc 100644
34037 --- a/drivers/gpu/drm/via/via_drv.h
34038 +++ b/drivers/gpu/drm/via/via_drv.h
34039 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
34040 typedef uint32_t maskarray_t[5];
34041
34042 typedef struct drm_via_irq {
34043 - atomic_t irq_received;
34044 + atomic_unchecked_t irq_received;
34045 uint32_t pending_mask;
34046 uint32_t enable_mask;
34047 wait_queue_head_t irq_queue;
34048 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
34049 struct timeval last_vblank;
34050 int last_vblank_valid;
34051 unsigned usec_per_vblank;
34052 - atomic_t vbl_received;
34053 + atomic_unchecked_t vbl_received;
34054 drm_via_state_t hc_state;
34055 char pci_buf[VIA_PCI_BUF_SIZE];
34056 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
34057 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
34058 index 5935b88..127a8a6 100644
34059 --- a/drivers/gpu/drm/via/via_irq.c
34060 +++ b/drivers/gpu/drm/via/via_irq.c
34061 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
34062 if (crtc != 0)
34063 return 0;
34064
34065 - return atomic_read(&dev_priv->vbl_received);
34066 + return atomic_read_unchecked(&dev_priv->vbl_received);
34067 }
34068
34069 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34070 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34071
34072 status = VIA_READ(VIA_REG_INTERRUPT);
34073 if (status & VIA_IRQ_VBLANK_PENDING) {
34074 - atomic_inc(&dev_priv->vbl_received);
34075 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
34076 + atomic_inc_unchecked(&dev_priv->vbl_received);
34077 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
34078 do_gettimeofday(&cur_vblank);
34079 if (dev_priv->last_vblank_valid) {
34080 dev_priv->usec_per_vblank =
34081 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34082 dev_priv->last_vblank = cur_vblank;
34083 dev_priv->last_vblank_valid = 1;
34084 }
34085 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
34086 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
34087 DRM_DEBUG("US per vblank is: %u\n",
34088 dev_priv->usec_per_vblank);
34089 }
34090 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34091
34092 for (i = 0; i < dev_priv->num_irqs; ++i) {
34093 if (status & cur_irq->pending_mask) {
34094 - atomic_inc(&cur_irq->irq_received);
34095 + atomic_inc_unchecked(&cur_irq->irq_received);
34096 DRM_WAKEUP(&cur_irq->irq_queue);
34097 handled = 1;
34098 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
34099 @@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
34100 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
34101 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
34102 masks[irq][4]));
34103 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
34104 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
34105 } else {
34106 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
34107 (((cur_irq_sequence =
34108 - atomic_read(&cur_irq->irq_received)) -
34109 + atomic_read_unchecked(&cur_irq->irq_received)) -
34110 *sequence) <= (1 << 23)));
34111 }
34112 *sequence = cur_irq_sequence;
34113 @@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct drm_device * dev)
34114 }
34115
34116 for (i = 0; i < dev_priv->num_irqs; ++i) {
34117 - atomic_set(&cur_irq->irq_received, 0);
34118 + atomic_set_unchecked(&cur_irq->irq_received, 0);
34119 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
34120 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
34121 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
34122 @@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
34123 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
34124 case VIA_IRQ_RELATIVE:
34125 irqwait->request.sequence +=
34126 - atomic_read(&cur_irq->irq_received);
34127 + atomic_read_unchecked(&cur_irq->irq_received);
34128 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
34129 case VIA_IRQ_ABSOLUTE:
34130 break;
34131 diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
34132 index aa8688d..6a0140c 100644
34133 --- a/drivers/gpu/vga/vgaarb.c
34134 +++ b/drivers/gpu/vga/vgaarb.c
34135 @@ -894,14 +894,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
34136 uc = &priv->cards[i];
34137 }
34138
34139 - if (!uc)
34140 - return -EINVAL;
34141 + if (!uc) {
34142 + ret_val = -EINVAL;
34143 + goto done;
34144 + }
34145
34146 - if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
34147 - return -EINVAL;
34148 + if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
34149 + ret_val = -EINVAL;
34150 + goto done;
34151 + }
34152
34153 - if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
34154 - return -EINVAL;
34155 + if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
34156 + ret_val = -EINVAL;
34157 + goto done;
34158 + }
34159
34160 vga_put(pdev, io_state);
34161
34162 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
34163 index 11f8069..4783396 100644
34164 --- a/drivers/hid/hid-core.c
34165 +++ b/drivers/hid/hid-core.c
34166 @@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device *hdev)
34167
34168 int hid_add_device(struct hid_device *hdev)
34169 {
34170 - static atomic_t id = ATOMIC_INIT(0);
34171 + static atomic_unchecked_t id = ATOMIC_INIT(0);
34172 int ret;
34173
34174 if (WARN_ON(hdev->status & HID_STAT_ADDED))
34175 @@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hdev)
34176 /* XXX hack, any other cleaner solution after the driver core
34177 * is converted to allow more than 20 bytes as the device name? */
34178 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
34179 - hdev->vendor, hdev->product, atomic_inc_return(&id));
34180 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
34181
34182 ret = device_add(&hdev->dev);
34183 if (!ret)
34184 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
34185 index 8b6ee24..70f657d 100644
34186 --- a/drivers/hid/usbhid/hiddev.c
34187 +++ b/drivers/hid/usbhid/hiddev.c
34188 @@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
34189 return put_user(HID_VERSION, (int __user *)arg);
34190
34191 case HIDIOCAPPLICATION:
34192 - if (arg < 0 || arg >= hid->maxapplication)
34193 + if (arg >= hid->maxapplication)
34194 return -EINVAL;
34195
34196 for (i = 0; i < hid->maxcollection; i++)
34197 diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
34198 index 5d5ed69..f40533e 100644
34199 --- a/drivers/hwmon/lis3lv02d.c
34200 +++ b/drivers/hwmon/lis3lv02d.c
34201 @@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
34202 * the lid is closed. This leads to interrupts as soon as a little move
34203 * is done.
34204 */
34205 - atomic_inc(&lis3_dev.count);
34206 + atomic_inc_unchecked(&lis3_dev.count);
34207
34208 wake_up_interruptible(&lis3_dev.misc_wait);
34209 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
34210 @@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
34211 if (test_and_set_bit(0, &lis3_dev.misc_opened))
34212 return -EBUSY; /* already open */
34213
34214 - atomic_set(&lis3_dev.count, 0);
34215 + atomic_set_unchecked(&lis3_dev.count, 0);
34216
34217 /*
34218 * The sensor can generate interrupts for free-fall and direction
34219 @@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
34220 add_wait_queue(&lis3_dev.misc_wait, &wait);
34221 while (true) {
34222 set_current_state(TASK_INTERRUPTIBLE);
34223 - data = atomic_xchg(&lis3_dev.count, 0);
34224 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
34225 if (data)
34226 break;
34227
34228 @@ -244,7 +244,7 @@ out:
34229 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
34230 {
34231 poll_wait(file, &lis3_dev.misc_wait, wait);
34232 - if (atomic_read(&lis3_dev.count))
34233 + if (atomic_read_unchecked(&lis3_dev.count))
34234 return POLLIN | POLLRDNORM;
34235 return 0;
34236 }
34237 diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
34238 index 7cdd76f..fe0efdf 100644
34239 --- a/drivers/hwmon/lis3lv02d.h
34240 +++ b/drivers/hwmon/lis3lv02d.h
34241 @@ -201,7 +201,7 @@ struct lis3lv02d {
34242
34243 struct input_polled_dev *idev; /* input device */
34244 struct platform_device *pdev; /* platform device */
34245 - atomic_t count; /* interrupt count after last read */
34246 + atomic_unchecked_t count; /* interrupt count after last read */
34247 int xcalib; /* calibrated null value for x */
34248 int ycalib; /* calibrated null value for y */
34249 int zcalib; /* calibrated null value for z */
34250 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
34251 index 740785e..5a5c6c6 100644
34252 --- a/drivers/hwmon/sht15.c
34253 +++ b/drivers/hwmon/sht15.c
34254 @@ -112,7 +112,7 @@ struct sht15_data {
34255 int supply_uV;
34256 int supply_uV_valid;
34257 struct work_struct update_supply_work;
34258 - atomic_t interrupt_handled;
34259 + atomic_unchecked_t interrupt_handled;
34260 };
34261
34262 /**
34263 @@ -245,13 +245,13 @@ static inline int sht15_update_single_val(struct sht15_data *data,
34264 return ret;
34265
34266 gpio_direction_input(data->pdata->gpio_data);
34267 - atomic_set(&data->interrupt_handled, 0);
34268 + atomic_set_unchecked(&data->interrupt_handled, 0);
34269
34270 enable_irq(gpio_to_irq(data->pdata->gpio_data));
34271 if (gpio_get_value(data->pdata->gpio_data) == 0) {
34272 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
34273 /* Only relevant if the interrupt hasn't occured. */
34274 - if (!atomic_read(&data->interrupt_handled))
34275 + if (!atomic_read_unchecked(&data->interrupt_handled))
34276 schedule_work(&data->read_work);
34277 }
34278 ret = wait_event_timeout(data->wait_queue,
34279 @@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
34280 struct sht15_data *data = d;
34281 /* First disable the interrupt */
34282 disable_irq_nosync(irq);
34283 - atomic_inc(&data->interrupt_handled);
34284 + atomic_inc_unchecked(&data->interrupt_handled);
34285 /* Then schedule a reading work struct */
34286 if (data->flag != SHT15_READING_NOTHING)
34287 schedule_work(&data->read_work);
34288 @@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
34289 here as could have gone low in meantime so verify
34290 it hasn't!
34291 */
34292 - atomic_set(&data->interrupt_handled, 0);
34293 + atomic_set_unchecked(&data->interrupt_handled, 0);
34294 enable_irq(gpio_to_irq(data->pdata->gpio_data));
34295 /* If still not occured or another handler has been scheduled */
34296 if (gpio_get_value(data->pdata->gpio_data)
34297 - || atomic_read(&data->interrupt_handled))
34298 + || atomic_read_unchecked(&data->interrupt_handled))
34299 return;
34300 }
34301 /* Read the data back from the device */
34302 diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
34303 index 97851c5..cb40626 100644
34304 --- a/drivers/hwmon/w83791d.c
34305 +++ b/drivers/hwmon/w83791d.c
34306 @@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_client *client, int kind,
34307 struct i2c_board_info *info);
34308 static int w83791d_remove(struct i2c_client *client);
34309
34310 -static int w83791d_read(struct i2c_client *client, u8 register);
34311 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
34312 +static int w83791d_read(struct i2c_client *client, u8 reg);
34313 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
34314 static struct w83791d_data *w83791d_update_device(struct device *dev);
34315
34316 #ifdef DEBUG
34317 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
34318 index 378fcb5..5e91fa8 100644
34319 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
34320 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
34321 @@ -43,7 +43,7 @@
34322 extern struct i2c_adapter amd756_smbus;
34323
34324 static struct i2c_adapter *s4882_adapter;
34325 -static struct i2c_algorithm *s4882_algo;
34326 +static i2c_algorithm_no_const *s4882_algo;
34327
34328 /* Wrapper access functions for multiplexed SMBus */
34329 static DEFINE_MUTEX(amd756_lock);
34330 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
34331 index 29015eb..af2d8e9 100644
34332 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
34333 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
34334 @@ -41,7 +41,7 @@
34335 extern struct i2c_adapter *nforce2_smbus;
34336
34337 static struct i2c_adapter *s4985_adapter;
34338 -static struct i2c_algorithm *s4985_algo;
34339 +static i2c_algorithm_no_const *s4985_algo;
34340
34341 /* Wrapper access functions for multiplexed SMBus */
34342 static DEFINE_MUTEX(nforce2_lock);
34343 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
34344 index 878f8ec..12376fc 100644
34345 --- a/drivers/ide/aec62xx.c
34346 +++ b/drivers/ide/aec62xx.c
34347 @@ -180,7 +180,7 @@ static const struct ide_port_ops atp86x_port_ops = {
34348 .cable_detect = atp86x_cable_detect,
34349 };
34350
34351 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
34352 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
34353 { /* 0: AEC6210 */
34354 .name = DRV_NAME,
34355 .init_chipset = init_chipset_aec62xx,
34356 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
34357 index e59b6de..4b4fc65 100644
34358 --- a/drivers/ide/alim15x3.c
34359 +++ b/drivers/ide/alim15x3.c
34360 @@ -509,7 +509,7 @@ static const struct ide_dma_ops ali_dma_ops = {
34361 .dma_sff_read_status = ide_dma_sff_read_status,
34362 };
34363
34364 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
34365 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
34366 .name = DRV_NAME,
34367 .init_chipset = init_chipset_ali15x3,
34368 .init_hwif = init_hwif_ali15x3,
34369 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
34370 index 628cd2e..087a414 100644
34371 --- a/drivers/ide/amd74xx.c
34372 +++ b/drivers/ide/amd74xx.c
34373 @@ -221,7 +221,7 @@ static const struct ide_port_ops amd_port_ops = {
34374 .udma_mask = udma, \
34375 }
34376
34377 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
34378 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
34379 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
34380 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
34381 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
34382 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
34383 index 837322b..837fd71 100644
34384 --- a/drivers/ide/atiixp.c
34385 +++ b/drivers/ide/atiixp.c
34386 @@ -137,7 +137,7 @@ static const struct ide_port_ops atiixp_port_ops = {
34387 .cable_detect = atiixp_cable_detect,
34388 };
34389
34390 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
34391 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
34392 { /* 0: IXP200/300/400/700 */
34393 .name = DRV_NAME,
34394 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
34395 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
34396 index ca0c46f..d55318a 100644
34397 --- a/drivers/ide/cmd64x.c
34398 +++ b/drivers/ide/cmd64x.c
34399 @@ -372,7 +372,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
34400 .dma_sff_read_status = ide_dma_sff_read_status,
34401 };
34402
34403 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
34404 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
34405 { /* 0: CMD643 */
34406 .name = DRV_NAME,
34407 .init_chipset = init_chipset_cmd64x,
34408 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
34409 index 09f98ed..cebc5bc 100644
34410 --- a/drivers/ide/cs5520.c
34411 +++ b/drivers/ide/cs5520.c
34412 @@ -93,7 +93,7 @@ static const struct ide_port_ops cs5520_port_ops = {
34413 .set_dma_mode = cs5520_set_dma_mode,
34414 };
34415
34416 -static const struct ide_port_info cyrix_chipset __devinitdata = {
34417 +static const struct ide_port_info cyrix_chipset __devinitconst = {
34418 .name = DRV_NAME,
34419 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
34420 .port_ops = &cs5520_port_ops,
34421 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
34422 index 40bf05e..7d58ca0 100644
34423 --- a/drivers/ide/cs5530.c
34424 +++ b/drivers/ide/cs5530.c
34425 @@ -244,7 +244,7 @@ static const struct ide_port_ops cs5530_port_ops = {
34426 .udma_filter = cs5530_udma_filter,
34427 };
34428
34429 -static const struct ide_port_info cs5530_chipset __devinitdata = {
34430 +static const struct ide_port_info cs5530_chipset __devinitconst = {
34431 .name = DRV_NAME,
34432 .init_chipset = init_chipset_cs5530,
34433 .init_hwif = init_hwif_cs5530,
34434 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
34435 index 983d957..53e6172 100644
34436 --- a/drivers/ide/cs5535.c
34437 +++ b/drivers/ide/cs5535.c
34438 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
34439 .cable_detect = cs5535_cable_detect,
34440 };
34441
34442 -static const struct ide_port_info cs5535_chipset __devinitdata = {
34443 +static const struct ide_port_info cs5535_chipset __devinitconst = {
34444 .name = DRV_NAME,
34445 .port_ops = &cs5535_port_ops,
34446 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
34447 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
34448 index 74fc540..8e933d8 100644
34449 --- a/drivers/ide/cy82c693.c
34450 +++ b/drivers/ide/cy82c693.c
34451 @@ -288,7 +288,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
34452 .set_dma_mode = cy82c693_set_dma_mode,
34453 };
34454
34455 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
34456 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
34457 .name = DRV_NAME,
34458 .init_iops = init_iops_cy82c693,
34459 .port_ops = &cy82c693_port_ops,
34460 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
34461 index 7ce68ef..e78197d 100644
34462 --- a/drivers/ide/hpt366.c
34463 +++ b/drivers/ide/hpt366.c
34464 @@ -507,7 +507,7 @@ static struct hpt_timings hpt37x_timings = {
34465 }
34466 };
34467
34468 -static const struct hpt_info hpt36x __devinitdata = {
34469 +static const struct hpt_info hpt36x __devinitconst = {
34470 .chip_name = "HPT36x",
34471 .chip_type = HPT36x,
34472 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
34473 @@ -515,7 +515,7 @@ static const struct hpt_info hpt36x __devinitdata = {
34474 .timings = &hpt36x_timings
34475 };
34476
34477 -static const struct hpt_info hpt370 __devinitdata = {
34478 +static const struct hpt_info hpt370 __devinitconst = {
34479 .chip_name = "HPT370",
34480 .chip_type = HPT370,
34481 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34482 @@ -523,7 +523,7 @@ static const struct hpt_info hpt370 __devinitdata = {
34483 .timings = &hpt37x_timings
34484 };
34485
34486 -static const struct hpt_info hpt370a __devinitdata = {
34487 +static const struct hpt_info hpt370a __devinitconst = {
34488 .chip_name = "HPT370A",
34489 .chip_type = HPT370A,
34490 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34491 @@ -531,7 +531,7 @@ static const struct hpt_info hpt370a __devinitdata = {
34492 .timings = &hpt37x_timings
34493 };
34494
34495 -static const struct hpt_info hpt374 __devinitdata = {
34496 +static const struct hpt_info hpt374 __devinitconst = {
34497 .chip_name = "HPT374",
34498 .chip_type = HPT374,
34499 .udma_mask = ATA_UDMA5,
34500 @@ -539,7 +539,7 @@ static const struct hpt_info hpt374 __devinitdata = {
34501 .timings = &hpt37x_timings
34502 };
34503
34504 -static const struct hpt_info hpt372 __devinitdata = {
34505 +static const struct hpt_info hpt372 __devinitconst = {
34506 .chip_name = "HPT372",
34507 .chip_type = HPT372,
34508 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34509 @@ -547,7 +547,7 @@ static const struct hpt_info hpt372 __devinitdata = {
34510 .timings = &hpt37x_timings
34511 };
34512
34513 -static const struct hpt_info hpt372a __devinitdata = {
34514 +static const struct hpt_info hpt372a __devinitconst = {
34515 .chip_name = "HPT372A",
34516 .chip_type = HPT372A,
34517 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34518 @@ -555,7 +555,7 @@ static const struct hpt_info hpt372a __devinitdata = {
34519 .timings = &hpt37x_timings
34520 };
34521
34522 -static const struct hpt_info hpt302 __devinitdata = {
34523 +static const struct hpt_info hpt302 __devinitconst = {
34524 .chip_name = "HPT302",
34525 .chip_type = HPT302,
34526 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34527 @@ -563,7 +563,7 @@ static const struct hpt_info hpt302 __devinitdata = {
34528 .timings = &hpt37x_timings
34529 };
34530
34531 -static const struct hpt_info hpt371 __devinitdata = {
34532 +static const struct hpt_info hpt371 __devinitconst = {
34533 .chip_name = "HPT371",
34534 .chip_type = HPT371,
34535 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34536 @@ -571,7 +571,7 @@ static const struct hpt_info hpt371 __devinitdata = {
34537 .timings = &hpt37x_timings
34538 };
34539
34540 -static const struct hpt_info hpt372n __devinitdata = {
34541 +static const struct hpt_info hpt372n __devinitconst = {
34542 .chip_name = "HPT372N",
34543 .chip_type = HPT372N,
34544 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34545 @@ -579,7 +579,7 @@ static const struct hpt_info hpt372n __devinitdata = {
34546 .timings = &hpt37x_timings
34547 };
34548
34549 -static const struct hpt_info hpt302n __devinitdata = {
34550 +static const struct hpt_info hpt302n __devinitconst = {
34551 .chip_name = "HPT302N",
34552 .chip_type = HPT302N,
34553 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34554 @@ -587,7 +587,7 @@ static const struct hpt_info hpt302n __devinitdata = {
34555 .timings = &hpt37x_timings
34556 };
34557
34558 -static const struct hpt_info hpt371n __devinitdata = {
34559 +static const struct hpt_info hpt371n __devinitconst = {
34560 .chip_name = "HPT371N",
34561 .chip_type = HPT371N,
34562 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34563 @@ -1422,7 +1422,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
34564 .dma_sff_read_status = ide_dma_sff_read_status,
34565 };
34566
34567 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
34568 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
34569 { /* 0: HPT36x */
34570 .name = DRV_NAME,
34571 .init_chipset = init_chipset_hpt366,
34572 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
34573 index 2de76cc..74186a1 100644
34574 --- a/drivers/ide/ide-cd.c
34575 +++ b/drivers/ide/ide-cd.c
34576 @@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
34577 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
34578 if ((unsigned long)buf & alignment
34579 || blk_rq_bytes(rq) & q->dma_pad_mask
34580 - || object_is_on_stack(buf))
34581 + || object_starts_on_stack(buf))
34582 drive->dma = 0;
34583 }
34584 }
34585 diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
34586 index fefbdfc..62ff465 100644
34587 --- a/drivers/ide/ide-floppy.c
34588 +++ b/drivers/ide/ide-floppy.c
34589 @@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
34590 u8 pc_buf[256], header_len, desc_cnt;
34591 int i, rc = 1, blocks, length;
34592
34593 + pax_track_stack();
34594 +
34595 ide_debug_log(IDE_DBG_FUNC, "enter");
34596
34597 drive->bios_cyl = 0;
34598 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
34599 index 39d4e01..11538ce 100644
34600 --- a/drivers/ide/ide-pci-generic.c
34601 +++ b/drivers/ide/ide-pci-generic.c
34602 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
34603 .udma_mask = ATA_UDMA6, \
34604 }
34605
34606 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
34607 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
34608 /* 0: Unknown */
34609 DECLARE_GENERIC_PCI_DEV(0),
34610
34611 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
34612 index 0d266a5..aaca790 100644
34613 --- a/drivers/ide/it8172.c
34614 +++ b/drivers/ide/it8172.c
34615 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
34616 .set_dma_mode = it8172_set_dma_mode,
34617 };
34618
34619 -static const struct ide_port_info it8172_port_info __devinitdata = {
34620 +static const struct ide_port_info it8172_port_info __devinitconst = {
34621 .name = DRV_NAME,
34622 .port_ops = &it8172_port_ops,
34623 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
34624 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
34625 index 4797616..4be488a 100644
34626 --- a/drivers/ide/it8213.c
34627 +++ b/drivers/ide/it8213.c
34628 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
34629 .cable_detect = it8213_cable_detect,
34630 };
34631
34632 -static const struct ide_port_info it8213_chipset __devinitdata = {
34633 +static const struct ide_port_info it8213_chipset __devinitconst = {
34634 .name = DRV_NAME,
34635 .enablebits = { {0x41, 0x80, 0x80} },
34636 .port_ops = &it8213_port_ops,
34637 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
34638 index 51aa745..146ee60 100644
34639 --- a/drivers/ide/it821x.c
34640 +++ b/drivers/ide/it821x.c
34641 @@ -627,7 +627,7 @@ static const struct ide_port_ops it821x_port_ops = {
34642 .cable_detect = it821x_cable_detect,
34643 };
34644
34645 -static const struct ide_port_info it821x_chipset __devinitdata = {
34646 +static const struct ide_port_info it821x_chipset __devinitconst = {
34647 .name = DRV_NAME,
34648 .init_chipset = init_chipset_it821x,
34649 .init_hwif = init_hwif_it821x,
34650 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
34651 index bf2be64..9270098 100644
34652 --- a/drivers/ide/jmicron.c
34653 +++ b/drivers/ide/jmicron.c
34654 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
34655 .cable_detect = jmicron_cable_detect,
34656 };
34657
34658 -static const struct ide_port_info jmicron_chipset __devinitdata = {
34659 +static const struct ide_port_info jmicron_chipset __devinitconst = {
34660 .name = DRV_NAME,
34661 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
34662 .port_ops = &jmicron_port_ops,
34663 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
34664 index 95327a2..73f78d8 100644
34665 --- a/drivers/ide/ns87415.c
34666 +++ b/drivers/ide/ns87415.c
34667 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
34668 .dma_sff_read_status = superio_dma_sff_read_status,
34669 };
34670
34671 -static const struct ide_port_info ns87415_chipset __devinitdata = {
34672 +static const struct ide_port_info ns87415_chipset __devinitconst = {
34673 .name = DRV_NAME,
34674 .init_hwif = init_hwif_ns87415,
34675 .tp_ops = &ns87415_tp_ops,
34676 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
34677 index f1d70d6..e1de05b 100644
34678 --- a/drivers/ide/opti621.c
34679 +++ b/drivers/ide/opti621.c
34680 @@ -202,7 +202,7 @@ static const struct ide_port_ops opti621_port_ops = {
34681 .set_pio_mode = opti621_set_pio_mode,
34682 };
34683
34684 -static const struct ide_port_info opti621_chipset __devinitdata = {
34685 +static const struct ide_port_info opti621_chipset __devinitconst = {
34686 .name = DRV_NAME,
34687 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
34688 .port_ops = &opti621_port_ops,
34689 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
34690 index 65ba823..7311f4d 100644
34691 --- a/drivers/ide/pdc202xx_new.c
34692 +++ b/drivers/ide/pdc202xx_new.c
34693 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
34694 .udma_mask = udma, \
34695 }
34696
34697 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
34698 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
34699 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
34700 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
34701 };
34702 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
34703 index cb812f3..af816ef 100644
34704 --- a/drivers/ide/pdc202xx_old.c
34705 +++ b/drivers/ide/pdc202xx_old.c
34706 @@ -285,7 +285,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
34707 .max_sectors = sectors, \
34708 }
34709
34710 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
34711 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
34712 { /* 0: PDC20246 */
34713 .name = DRV_NAME,
34714 .init_chipset = init_chipset_pdc202xx,
34715 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
34716 index bf14f39..15c4b98 100644
34717 --- a/drivers/ide/piix.c
34718 +++ b/drivers/ide/piix.c
34719 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
34720 .udma_mask = udma, \
34721 }
34722
34723 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
34724 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
34725 /* 0: MPIIX */
34726 { /*
34727 * MPIIX actually has only a single IDE channel mapped to
34728 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
34729 index a6414a8..c04173e 100644
34730 --- a/drivers/ide/rz1000.c
34731 +++ b/drivers/ide/rz1000.c
34732 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
34733 }
34734 }
34735
34736 -static const struct ide_port_info rz1000_chipset __devinitdata = {
34737 +static const struct ide_port_info rz1000_chipset __devinitconst = {
34738 .name = DRV_NAME,
34739 .host_flags = IDE_HFLAG_NO_DMA,
34740 };
34741 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
34742 index d467478..9203942 100644
34743 --- a/drivers/ide/sc1200.c
34744 +++ b/drivers/ide/sc1200.c
34745 @@ -290,7 +290,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
34746 .dma_sff_read_status = ide_dma_sff_read_status,
34747 };
34748
34749 -static const struct ide_port_info sc1200_chipset __devinitdata = {
34750 +static const struct ide_port_info sc1200_chipset __devinitconst = {
34751 .name = DRV_NAME,
34752 .port_ops = &sc1200_port_ops,
34753 .dma_ops = &sc1200_dma_ops,
34754 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
34755 index 1104bb3..59c5194 100644
34756 --- a/drivers/ide/scc_pata.c
34757 +++ b/drivers/ide/scc_pata.c
34758 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
34759 .dma_sff_read_status = scc_dma_sff_read_status,
34760 };
34761
34762 -static const struct ide_port_info scc_chipset __devinitdata = {
34763 +static const struct ide_port_info scc_chipset __devinitconst = {
34764 .name = "sccIDE",
34765 .init_iops = init_iops_scc,
34766 .init_dma = scc_init_dma,
34767 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
34768 index b6554ef..6cc2cc3 100644
34769 --- a/drivers/ide/serverworks.c
34770 +++ b/drivers/ide/serverworks.c
34771 @@ -353,7 +353,7 @@ static const struct ide_port_ops svwks_port_ops = {
34772 .cable_detect = svwks_cable_detect,
34773 };
34774
34775 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
34776 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
34777 { /* 0: OSB4 */
34778 .name = DRV_NAME,
34779 .init_chipset = init_chipset_svwks,
34780 diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
34781 index ab3db61..afed580 100644
34782 --- a/drivers/ide/setup-pci.c
34783 +++ b/drivers/ide/setup-pci.c
34784 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
34785 int ret, i, n_ports = dev2 ? 4 : 2;
34786 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
34787
34788 + pax_track_stack();
34789 +
34790 for (i = 0; i < n_ports / 2; i++) {
34791 ret = ide_setup_pci_controller(pdev[i], d, !i);
34792 if (ret < 0)
34793 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
34794 index d95df52..0b03a39 100644
34795 --- a/drivers/ide/siimage.c
34796 +++ b/drivers/ide/siimage.c
34797 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
34798 .udma_mask = ATA_UDMA6, \
34799 }
34800
34801 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
34802 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
34803 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
34804 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
34805 };
34806 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
34807 index 3b88eba..ca8699d 100644
34808 --- a/drivers/ide/sis5513.c
34809 +++ b/drivers/ide/sis5513.c
34810 @@ -561,7 +561,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
34811 .cable_detect = sis_cable_detect,
34812 };
34813
34814 -static const struct ide_port_info sis5513_chipset __devinitdata = {
34815 +static const struct ide_port_info sis5513_chipset __devinitconst = {
34816 .name = DRV_NAME,
34817 .init_chipset = init_chipset_sis5513,
34818 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
34819 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
34820 index d698da4..fca42a4 100644
34821 --- a/drivers/ide/sl82c105.c
34822 +++ b/drivers/ide/sl82c105.c
34823 @@ -319,7 +319,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
34824 .dma_sff_read_status = ide_dma_sff_read_status,
34825 };
34826
34827 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
34828 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
34829 .name = DRV_NAME,
34830 .init_chipset = init_chipset_sl82c105,
34831 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
34832 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
34833 index 1ccfb40..83d5779 100644
34834 --- a/drivers/ide/slc90e66.c
34835 +++ b/drivers/ide/slc90e66.c
34836 @@ -131,7 +131,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
34837 .cable_detect = slc90e66_cable_detect,
34838 };
34839
34840 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
34841 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
34842 .name = DRV_NAME,
34843 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
34844 .port_ops = &slc90e66_port_ops,
34845 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
34846 index 05a93d6..5f9e325 100644
34847 --- a/drivers/ide/tc86c001.c
34848 +++ b/drivers/ide/tc86c001.c
34849 @@ -190,7 +190,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
34850 .dma_sff_read_status = ide_dma_sff_read_status,
34851 };
34852
34853 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
34854 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
34855 .name = DRV_NAME,
34856 .init_hwif = init_hwif_tc86c001,
34857 .port_ops = &tc86c001_port_ops,
34858 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
34859 index 8773c3b..7907d6c 100644
34860 --- a/drivers/ide/triflex.c
34861 +++ b/drivers/ide/triflex.c
34862 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
34863 .set_dma_mode = triflex_set_mode,
34864 };
34865
34866 -static const struct ide_port_info triflex_device __devinitdata = {
34867 +static const struct ide_port_info triflex_device __devinitconst = {
34868 .name = DRV_NAME,
34869 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
34870 .port_ops = &triflex_port_ops,
34871 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
34872 index 4b42ca0..e494a98 100644
34873 --- a/drivers/ide/trm290.c
34874 +++ b/drivers/ide/trm290.c
34875 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
34876 .dma_check = trm290_dma_check,
34877 };
34878
34879 -static const struct ide_port_info trm290_chipset __devinitdata = {
34880 +static const struct ide_port_info trm290_chipset __devinitconst = {
34881 .name = DRV_NAME,
34882 .init_hwif = init_hwif_trm290,
34883 .tp_ops = &trm290_tp_ops,
34884 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
34885 index 028de26..520d5d5 100644
34886 --- a/drivers/ide/via82cxxx.c
34887 +++ b/drivers/ide/via82cxxx.c
34888 @@ -374,7 +374,7 @@ static const struct ide_port_ops via_port_ops = {
34889 .cable_detect = via82cxxx_cable_detect,
34890 };
34891
34892 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
34893 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
34894 .name = DRV_NAME,
34895 .init_chipset = init_chipset_via82cxxx,
34896 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
34897 diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
34898 index 2cd00b5..14de699 100644
34899 --- a/drivers/ieee1394/dv1394.c
34900 +++ b/drivers/ieee1394/dv1394.c
34901 @@ -739,7 +739,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
34902 based upon DIF section and sequence
34903 */
34904
34905 -static void inline
34906 +static inline void
34907 frame_put_packet (struct frame *f, struct packet *p)
34908 {
34909 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
34910 diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
34911 index e947d8f..6a966b9 100644
34912 --- a/drivers/ieee1394/hosts.c
34913 +++ b/drivers/ieee1394/hosts.c
34914 @@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command,
34915 }
34916
34917 static struct hpsb_host_driver dummy_driver = {
34918 + .name = "dummy",
34919 .transmit_packet = dummy_transmit_packet,
34920 .devctl = dummy_devctl,
34921 .isoctl = dummy_isoctl
34922 diff --git a/drivers/ieee1394/init_ohci1394_dma.c b/drivers/ieee1394/init_ohci1394_dma.c
34923 index ddaab6e..8d37435 100644
34924 --- a/drivers/ieee1394/init_ohci1394_dma.c
34925 +++ b/drivers/ieee1394/init_ohci1394_dma.c
34926 @@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
34927 for (func = 0; func < 8; func++) {
34928 u32 class = read_pci_config(num,slot,func,
34929 PCI_CLASS_REVISION);
34930 - if ((class == 0xffffffff))
34931 + if (class == 0xffffffff)
34932 continue; /* No device at this func */
34933
34934 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
34935 diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
34936 index 65c1429..5d8c11f 100644
34937 --- a/drivers/ieee1394/ohci1394.c
34938 +++ b/drivers/ieee1394/ohci1394.c
34939 @@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
34940 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
34941
34942 /* Module Parameters */
34943 -static int phys_dma = 1;
34944 +static int phys_dma;
34945 module_param(phys_dma, int, 0444);
34946 -MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
34947 +MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
34948
34949 static void dma_trm_tasklet(unsigned long data);
34950 static void dma_trm_reset(struct dma_trm_ctx *d);
34951 diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
34952 index f199896..78c9fc8 100644
34953 --- a/drivers/ieee1394/sbp2.c
34954 +++ b/drivers/ieee1394/sbp2.c
34955 @@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
34956 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
34957 MODULE_LICENSE("GPL");
34958
34959 -static int sbp2_module_init(void)
34960 +static int __init sbp2_module_init(void)
34961 {
34962 int ret;
34963
34964 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
34965 index a5dea6b..0cefe8f 100644
34966 --- a/drivers/infiniband/core/cm.c
34967 +++ b/drivers/infiniband/core/cm.c
34968 @@ -112,7 +112,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
34969
34970 struct cm_counter_group {
34971 struct kobject obj;
34972 - atomic_long_t counter[CM_ATTR_COUNT];
34973 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
34974 };
34975
34976 struct cm_counter_attribute {
34977 @@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm_work *work,
34978 struct ib_mad_send_buf *msg = NULL;
34979 int ret;
34980
34981 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34982 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34983 counter[CM_REQ_COUNTER]);
34984
34985 /* Quick state check to discard duplicate REQs. */
34986 @@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
34987 if (!cm_id_priv)
34988 return;
34989
34990 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34991 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34992 counter[CM_REP_COUNTER]);
34993 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
34994 if (ret)
34995 @@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work *work)
34996 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
34997 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
34998 spin_unlock_irq(&cm_id_priv->lock);
34999 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35000 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35001 counter[CM_RTU_COUNTER]);
35002 goto out;
35003 }
35004 @@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_work *work)
35005 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
35006 dreq_msg->local_comm_id);
35007 if (!cm_id_priv) {
35008 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35009 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35010 counter[CM_DREQ_COUNTER]);
35011 cm_issue_drep(work->port, work->mad_recv_wc);
35012 return -EINVAL;
35013 @@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_work *work)
35014 case IB_CM_MRA_REP_RCVD:
35015 break;
35016 case IB_CM_TIMEWAIT:
35017 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35018 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35019 counter[CM_DREQ_COUNTER]);
35020 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
35021 goto unlock;
35022 @@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_work *work)
35023 cm_free_msg(msg);
35024 goto deref;
35025 case IB_CM_DREQ_RCVD:
35026 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35027 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35028 counter[CM_DREQ_COUNTER]);
35029 goto unlock;
35030 default:
35031 @@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work *work)
35032 ib_modify_mad(cm_id_priv->av.port->mad_agent,
35033 cm_id_priv->msg, timeout)) {
35034 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
35035 - atomic_long_inc(&work->port->
35036 + atomic_long_inc_unchecked(&work->port->
35037 counter_group[CM_RECV_DUPLICATES].
35038 counter[CM_MRA_COUNTER]);
35039 goto out;
35040 @@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work *work)
35041 break;
35042 case IB_CM_MRA_REQ_RCVD:
35043 case IB_CM_MRA_REP_RCVD:
35044 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35045 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35046 counter[CM_MRA_COUNTER]);
35047 /* fall through */
35048 default:
35049 @@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work *work)
35050 case IB_CM_LAP_IDLE:
35051 break;
35052 case IB_CM_MRA_LAP_SENT:
35053 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35054 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35055 counter[CM_LAP_COUNTER]);
35056 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
35057 goto unlock;
35058 @@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work *work)
35059 cm_free_msg(msg);
35060 goto deref;
35061 case IB_CM_LAP_RCVD:
35062 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35063 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35064 counter[CM_LAP_COUNTER]);
35065 goto unlock;
35066 default:
35067 @@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
35068 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
35069 if (cur_cm_id_priv) {
35070 spin_unlock_irq(&cm.lock);
35071 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35072 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35073 counter[CM_SIDR_REQ_COUNTER]);
35074 goto out; /* Duplicate message. */
35075 }
35076 @@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
35077 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
35078 msg->retries = 1;
35079
35080 - atomic_long_add(1 + msg->retries,
35081 + atomic_long_add_unchecked(1 + msg->retries,
35082 &port->counter_group[CM_XMIT].counter[attr_index]);
35083 if (msg->retries)
35084 - atomic_long_add(msg->retries,
35085 + atomic_long_add_unchecked(msg->retries,
35086 &port->counter_group[CM_XMIT_RETRIES].
35087 counter[attr_index]);
35088
35089 @@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
35090 }
35091
35092 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
35093 - atomic_long_inc(&port->counter_group[CM_RECV].
35094 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
35095 counter[attr_id - CM_ATTR_ID_OFFSET]);
35096
35097 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
35098 @@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
35099 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
35100
35101 return sprintf(buf, "%ld\n",
35102 - atomic_long_read(&group->counter[cm_attr->index]));
35103 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
35104 }
35105
35106 -static struct sysfs_ops cm_counter_ops = {
35107 +static const struct sysfs_ops cm_counter_ops = {
35108 .show = cm_show_counter
35109 };
35110
35111 diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
35112 index 8fd3a6f..61d8075 100644
35113 --- a/drivers/infiniband/core/cma.c
35114 +++ b/drivers/infiniband/core/cma.c
35115 @@ -2267,6 +2267,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
35116
35117 req.private_data_len = sizeof(struct cma_hdr) +
35118 conn_param->private_data_len;
35119 + if (req.private_data_len < conn_param->private_data_len)
35120 + return -EINVAL;
35121 +
35122 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
35123 if (!req.private_data)
35124 return -ENOMEM;
35125 @@ -2314,6 +2317,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
35126 memset(&req, 0, sizeof req);
35127 offset = cma_user_data_offset(id_priv->id.ps);
35128 req.private_data_len = offset + conn_param->private_data_len;
35129 + if (req.private_data_len < conn_param->private_data_len)
35130 + return -EINVAL;
35131 +
35132 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
35133 if (!private_data)
35134 return -ENOMEM;
35135 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
35136 index 4507043..14ad522 100644
35137 --- a/drivers/infiniband/core/fmr_pool.c
35138 +++ b/drivers/infiniband/core/fmr_pool.c
35139 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
35140
35141 struct task_struct *thread;
35142
35143 - atomic_t req_ser;
35144 - atomic_t flush_ser;
35145 + atomic_unchecked_t req_ser;
35146 + atomic_unchecked_t flush_ser;
35147
35148 wait_queue_head_t force_wait;
35149 };
35150 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
35151 struct ib_fmr_pool *pool = pool_ptr;
35152
35153 do {
35154 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
35155 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
35156 ib_fmr_batch_release(pool);
35157
35158 - atomic_inc(&pool->flush_ser);
35159 + atomic_inc_unchecked(&pool->flush_ser);
35160 wake_up_interruptible(&pool->force_wait);
35161
35162 if (pool->flush_function)
35163 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
35164 }
35165
35166 set_current_state(TASK_INTERRUPTIBLE);
35167 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
35168 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
35169 !kthread_should_stop())
35170 schedule();
35171 __set_current_state(TASK_RUNNING);
35172 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
35173 pool->dirty_watermark = params->dirty_watermark;
35174 pool->dirty_len = 0;
35175 spin_lock_init(&pool->pool_lock);
35176 - atomic_set(&pool->req_ser, 0);
35177 - atomic_set(&pool->flush_ser, 0);
35178 + atomic_set_unchecked(&pool->req_ser, 0);
35179 + atomic_set_unchecked(&pool->flush_ser, 0);
35180 init_waitqueue_head(&pool->force_wait);
35181
35182 pool->thread = kthread_run(ib_fmr_cleanup_thread,
35183 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
35184 }
35185 spin_unlock_irq(&pool->pool_lock);
35186
35187 - serial = atomic_inc_return(&pool->req_ser);
35188 + serial = atomic_inc_return_unchecked(&pool->req_ser);
35189 wake_up_process(pool->thread);
35190
35191 if (wait_event_interruptible(pool->force_wait,
35192 - atomic_read(&pool->flush_ser) - serial >= 0))
35193 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
35194 return -EINTR;
35195
35196 return 0;
35197 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
35198 } else {
35199 list_add_tail(&fmr->list, &pool->dirty_list);
35200 if (++pool->dirty_len >= pool->dirty_watermark) {
35201 - atomic_inc(&pool->req_ser);
35202 + atomic_inc_unchecked(&pool->req_ser);
35203 wake_up_process(pool->thread);
35204 }
35205 }
35206 diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
35207 index 158a214..1558bb7 100644
35208 --- a/drivers/infiniband/core/sysfs.c
35209 +++ b/drivers/infiniband/core/sysfs.c
35210 @@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kobject *kobj,
35211 return port_attr->show(p, port_attr, buf);
35212 }
35213
35214 -static struct sysfs_ops port_sysfs_ops = {
35215 +static const struct sysfs_ops port_sysfs_ops = {
35216 .show = port_attr_show
35217 };
35218
35219 diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
35220 index 5440da0..1194ecb 100644
35221 --- a/drivers/infiniband/core/uverbs_marshall.c
35222 +++ b/drivers/infiniband/core/uverbs_marshall.c
35223 @@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
35224 dst->grh.sgid_index = src->grh.sgid_index;
35225 dst->grh.hop_limit = src->grh.hop_limit;
35226 dst->grh.traffic_class = src->grh.traffic_class;
35227 + memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
35228 dst->dlid = src->dlid;
35229 dst->sl = src->sl;
35230 dst->src_path_bits = src->src_path_bits;
35231 dst->static_rate = src->static_rate;
35232 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
35233 dst->port_num = src->port_num;
35234 + dst->reserved = 0;
35235 }
35236 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
35237
35238 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
35239 struct ib_qp_attr *src)
35240 {
35241 + dst->qp_state = src->qp_state;
35242 dst->cur_qp_state = src->cur_qp_state;
35243 dst->path_mtu = src->path_mtu;
35244 dst->path_mig_state = src->path_mig_state;
35245 @@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
35246 dst->rnr_retry = src->rnr_retry;
35247 dst->alt_port_num = src->alt_port_num;
35248 dst->alt_timeout = src->alt_timeout;
35249 + memset(dst->reserved, 0, sizeof(dst->reserved));
35250 }
35251 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
35252
35253 diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
35254 index 100da85..62e6b88 100644
35255 --- a/drivers/infiniband/hw/ipath/ipath_fs.c
35256 +++ b/drivers/infiniband/hw/ipath/ipath_fs.c
35257 @@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
35258 struct infinipath_counters counters;
35259 struct ipath_devdata *dd;
35260
35261 + pax_track_stack();
35262 +
35263 dd = file->f_path.dentry->d_inode->i_private;
35264 dd->ipath_f_read_counters(dd, &counters);
35265
35266 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
35267 index cbde0cf..afaf55c 100644
35268 --- a/drivers/infiniband/hw/nes/nes.c
35269 +++ b/drivers/infiniband/hw/nes/nes.c
35270 @@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
35271 LIST_HEAD(nes_adapter_list);
35272 static LIST_HEAD(nes_dev_list);
35273
35274 -atomic_t qps_destroyed;
35275 +atomic_unchecked_t qps_destroyed;
35276
35277 static unsigned int ee_flsh_adapter;
35278 static unsigned int sysfs_nonidx_addr;
35279 @@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
35280 struct nes_adapter *nesadapter = nesdev->nesadapter;
35281 u32 qp_id;
35282
35283 - atomic_inc(&qps_destroyed);
35284 + atomic_inc_unchecked(&qps_destroyed);
35285
35286 /* Free the control structures */
35287
35288 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
35289 index bcc6abc..9c76b2f 100644
35290 --- a/drivers/infiniband/hw/nes/nes.h
35291 +++ b/drivers/infiniband/hw/nes/nes.h
35292 @@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
35293 extern unsigned int wqm_quanta;
35294 extern struct list_head nes_adapter_list;
35295
35296 -extern atomic_t cm_connects;
35297 -extern atomic_t cm_accepts;
35298 -extern atomic_t cm_disconnects;
35299 -extern atomic_t cm_closes;
35300 -extern atomic_t cm_connecteds;
35301 -extern atomic_t cm_connect_reqs;
35302 -extern atomic_t cm_rejects;
35303 -extern atomic_t mod_qp_timouts;
35304 -extern atomic_t qps_created;
35305 -extern atomic_t qps_destroyed;
35306 -extern atomic_t sw_qps_destroyed;
35307 +extern atomic_unchecked_t cm_connects;
35308 +extern atomic_unchecked_t cm_accepts;
35309 +extern atomic_unchecked_t cm_disconnects;
35310 +extern atomic_unchecked_t cm_closes;
35311 +extern atomic_unchecked_t cm_connecteds;
35312 +extern atomic_unchecked_t cm_connect_reqs;
35313 +extern atomic_unchecked_t cm_rejects;
35314 +extern atomic_unchecked_t mod_qp_timouts;
35315 +extern atomic_unchecked_t qps_created;
35316 +extern atomic_unchecked_t qps_destroyed;
35317 +extern atomic_unchecked_t sw_qps_destroyed;
35318 extern u32 mh_detected;
35319 extern u32 mh_pauses_sent;
35320 extern u32 cm_packets_sent;
35321 @@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
35322 extern u32 cm_listens_created;
35323 extern u32 cm_listens_destroyed;
35324 extern u32 cm_backlog_drops;
35325 -extern atomic_t cm_loopbacks;
35326 -extern atomic_t cm_nodes_created;
35327 -extern atomic_t cm_nodes_destroyed;
35328 -extern atomic_t cm_accel_dropped_pkts;
35329 -extern atomic_t cm_resets_recvd;
35330 +extern atomic_unchecked_t cm_loopbacks;
35331 +extern atomic_unchecked_t cm_nodes_created;
35332 +extern atomic_unchecked_t cm_nodes_destroyed;
35333 +extern atomic_unchecked_t cm_accel_dropped_pkts;
35334 +extern atomic_unchecked_t cm_resets_recvd;
35335
35336 extern u32 int_mod_timer_init;
35337 extern u32 int_mod_cq_depth_256;
35338 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
35339 index 73473db..5ed06e8 100644
35340 --- a/drivers/infiniband/hw/nes/nes_cm.c
35341 +++ b/drivers/infiniband/hw/nes/nes_cm.c
35342 @@ -69,11 +69,11 @@ u32 cm_packets_received;
35343 u32 cm_listens_created;
35344 u32 cm_listens_destroyed;
35345 u32 cm_backlog_drops;
35346 -atomic_t cm_loopbacks;
35347 -atomic_t cm_nodes_created;
35348 -atomic_t cm_nodes_destroyed;
35349 -atomic_t cm_accel_dropped_pkts;
35350 -atomic_t cm_resets_recvd;
35351 +atomic_unchecked_t cm_loopbacks;
35352 +atomic_unchecked_t cm_nodes_created;
35353 +atomic_unchecked_t cm_nodes_destroyed;
35354 +atomic_unchecked_t cm_accel_dropped_pkts;
35355 +atomic_unchecked_t cm_resets_recvd;
35356
35357 static inline int mini_cm_accelerated(struct nes_cm_core *,
35358 struct nes_cm_node *);
35359 @@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
35360
35361 static struct nes_cm_core *g_cm_core;
35362
35363 -atomic_t cm_connects;
35364 -atomic_t cm_accepts;
35365 -atomic_t cm_disconnects;
35366 -atomic_t cm_closes;
35367 -atomic_t cm_connecteds;
35368 -atomic_t cm_connect_reqs;
35369 -atomic_t cm_rejects;
35370 +atomic_unchecked_t cm_connects;
35371 +atomic_unchecked_t cm_accepts;
35372 +atomic_unchecked_t cm_disconnects;
35373 +atomic_unchecked_t cm_closes;
35374 +atomic_unchecked_t cm_connecteds;
35375 +atomic_unchecked_t cm_connect_reqs;
35376 +atomic_unchecked_t cm_rejects;
35377
35378
35379 /**
35380 @@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
35381 cm_node->rem_mac);
35382
35383 add_hte_node(cm_core, cm_node);
35384 - atomic_inc(&cm_nodes_created);
35385 + atomic_inc_unchecked(&cm_nodes_created);
35386
35387 return cm_node;
35388 }
35389 @@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
35390 }
35391
35392 atomic_dec(&cm_core->node_cnt);
35393 - atomic_inc(&cm_nodes_destroyed);
35394 + atomic_inc_unchecked(&cm_nodes_destroyed);
35395 nesqp = cm_node->nesqp;
35396 if (nesqp) {
35397 nesqp->cm_node = NULL;
35398 @@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
35399
35400 static void drop_packet(struct sk_buff *skb)
35401 {
35402 - atomic_inc(&cm_accel_dropped_pkts);
35403 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
35404 dev_kfree_skb_any(skb);
35405 }
35406
35407 @@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
35408
35409 int reset = 0; /* whether to send reset in case of err.. */
35410 int passive_state;
35411 - atomic_inc(&cm_resets_recvd);
35412 + atomic_inc_unchecked(&cm_resets_recvd);
35413 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
35414 " refcnt=%d\n", cm_node, cm_node->state,
35415 atomic_read(&cm_node->ref_count));
35416 @@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
35417 rem_ref_cm_node(cm_node->cm_core, cm_node);
35418 return NULL;
35419 }
35420 - atomic_inc(&cm_loopbacks);
35421 + atomic_inc_unchecked(&cm_loopbacks);
35422 loopbackremotenode->loopbackpartner = cm_node;
35423 loopbackremotenode->tcp_cntxt.rcv_wscale =
35424 NES_CM_DEFAULT_RCV_WND_SCALE;
35425 @@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
35426 add_ref_cm_node(cm_node);
35427 } else if (cm_node->state == NES_CM_STATE_TSA) {
35428 rem_ref_cm_node(cm_core, cm_node);
35429 - atomic_inc(&cm_accel_dropped_pkts);
35430 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
35431 dev_kfree_skb_any(skb);
35432 break;
35433 }
35434 @@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35435
35436 if ((cm_id) && (cm_id->event_handler)) {
35437 if (issue_disconn) {
35438 - atomic_inc(&cm_disconnects);
35439 + atomic_inc_unchecked(&cm_disconnects);
35440 cm_event.event = IW_CM_EVENT_DISCONNECT;
35441 cm_event.status = disconn_status;
35442 cm_event.local_addr = cm_id->local_addr;
35443 @@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35444 }
35445
35446 if (issue_close) {
35447 - atomic_inc(&cm_closes);
35448 + atomic_inc_unchecked(&cm_closes);
35449 nes_disconnect(nesqp, 1);
35450
35451 cm_id->provider_data = nesqp;
35452 @@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35453
35454 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
35455 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
35456 - atomic_inc(&cm_accepts);
35457 + atomic_inc_unchecked(&cm_accepts);
35458
35459 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
35460 atomic_read(&nesvnic->netdev->refcnt));
35461 @@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
35462
35463 struct nes_cm_core *cm_core;
35464
35465 - atomic_inc(&cm_rejects);
35466 + atomic_inc_unchecked(&cm_rejects);
35467 cm_node = (struct nes_cm_node *) cm_id->provider_data;
35468 loopback = cm_node->loopbackpartner;
35469 cm_core = cm_node->cm_core;
35470 @@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35471 ntohl(cm_id->local_addr.sin_addr.s_addr),
35472 ntohs(cm_id->local_addr.sin_port));
35473
35474 - atomic_inc(&cm_connects);
35475 + atomic_inc_unchecked(&cm_connects);
35476 nesqp->active_conn = 1;
35477
35478 /* cache the cm_id in the qp */
35479 @@ -3195,7 +3195,7 @@ static void cm_event_connected(struct nes_cm_event *event)
35480 if (nesqp->destroyed) {
35481 return;
35482 }
35483 - atomic_inc(&cm_connecteds);
35484 + atomic_inc_unchecked(&cm_connecteds);
35485 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
35486 " local port 0x%04X. jiffies = %lu.\n",
35487 nesqp->hwqp.qp_id,
35488 @@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm_event *event)
35489
35490 ret = cm_id->event_handler(cm_id, &cm_event);
35491 cm_id->add_ref(cm_id);
35492 - atomic_inc(&cm_closes);
35493 + atomic_inc_unchecked(&cm_closes);
35494 cm_event.event = IW_CM_EVENT_CLOSE;
35495 cm_event.status = IW_CM_EVENT_STATUS_OK;
35496 cm_event.provider_data = cm_id->provider_data;
35497 @@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
35498 return;
35499 cm_id = cm_node->cm_id;
35500
35501 - atomic_inc(&cm_connect_reqs);
35502 + atomic_inc_unchecked(&cm_connect_reqs);
35503 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
35504 cm_node, cm_id, jiffies);
35505
35506 @@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
35507 return;
35508 cm_id = cm_node->cm_id;
35509
35510 - atomic_inc(&cm_connect_reqs);
35511 + atomic_inc_unchecked(&cm_connect_reqs);
35512 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
35513 cm_node, cm_id, jiffies);
35514
35515 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
35516 index e593af3..870694a 100644
35517 --- a/drivers/infiniband/hw/nes/nes_nic.c
35518 +++ b/drivers/infiniband/hw/nes/nes_nic.c
35519 @@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35520 target_stat_values[++index] = mh_detected;
35521 target_stat_values[++index] = mh_pauses_sent;
35522 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
35523 - target_stat_values[++index] = atomic_read(&cm_connects);
35524 - target_stat_values[++index] = atomic_read(&cm_accepts);
35525 - target_stat_values[++index] = atomic_read(&cm_disconnects);
35526 - target_stat_values[++index] = atomic_read(&cm_connecteds);
35527 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
35528 - target_stat_values[++index] = atomic_read(&cm_rejects);
35529 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
35530 - target_stat_values[++index] = atomic_read(&qps_created);
35531 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
35532 - target_stat_values[++index] = atomic_read(&qps_destroyed);
35533 - target_stat_values[++index] = atomic_read(&cm_closes);
35534 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
35535 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
35536 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
35537 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
35538 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
35539 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
35540 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
35541 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
35542 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
35543 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
35544 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
35545 target_stat_values[++index] = cm_packets_sent;
35546 target_stat_values[++index] = cm_packets_bounced;
35547 target_stat_values[++index] = cm_packets_created;
35548 @@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35549 target_stat_values[++index] = cm_listens_created;
35550 target_stat_values[++index] = cm_listens_destroyed;
35551 target_stat_values[++index] = cm_backlog_drops;
35552 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
35553 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
35554 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
35555 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
35556 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
35557 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
35558 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
35559 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
35560 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
35561 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
35562 target_stat_values[++index] = int_mod_timer_init;
35563 target_stat_values[++index] = int_mod_cq_depth_1;
35564 target_stat_values[++index] = int_mod_cq_depth_4;
35565 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
35566 index a680c42..f914deb 100644
35567 --- a/drivers/infiniband/hw/nes/nes_verbs.c
35568 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
35569 @@ -45,9 +45,9 @@
35570
35571 #include <rdma/ib_umem.h>
35572
35573 -atomic_t mod_qp_timouts;
35574 -atomic_t qps_created;
35575 -atomic_t sw_qps_destroyed;
35576 +atomic_unchecked_t mod_qp_timouts;
35577 +atomic_unchecked_t qps_created;
35578 +atomic_unchecked_t sw_qps_destroyed;
35579
35580 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
35581
35582 @@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
35583 if (init_attr->create_flags)
35584 return ERR_PTR(-EINVAL);
35585
35586 - atomic_inc(&qps_created);
35587 + atomic_inc_unchecked(&qps_created);
35588 switch (init_attr->qp_type) {
35589 case IB_QPT_RC:
35590 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
35591 @@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
35592 struct iw_cm_event cm_event;
35593 int ret;
35594
35595 - atomic_inc(&sw_qps_destroyed);
35596 + atomic_inc_unchecked(&sw_qps_destroyed);
35597 nesqp->destroyed = 1;
35598
35599 /* Blow away the connection if it exists. */
35600 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
35601 index ac11be0..3883c04 100644
35602 --- a/drivers/input/gameport/gameport.c
35603 +++ b/drivers/input/gameport/gameport.c
35604 @@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
35605 */
35606 static void gameport_init_port(struct gameport *gameport)
35607 {
35608 - static atomic_t gameport_no = ATOMIC_INIT(0);
35609 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
35610
35611 __module_get(THIS_MODULE);
35612
35613 mutex_init(&gameport->drv_mutex);
35614 device_initialize(&gameport->dev);
35615 - dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
35616 + dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
35617 gameport->dev.bus = &gameport_bus;
35618 gameport->dev.release = gameport_release_port;
35619 if (gameport->parent)
35620 diff --git a/drivers/input/input.c b/drivers/input/input.c
35621 index c82ae82..8cfb9cb 100644
35622 --- a/drivers/input/input.c
35623 +++ b/drivers/input/input.c
35624 @@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
35625 */
35626 int input_register_device(struct input_dev *dev)
35627 {
35628 - static atomic_t input_no = ATOMIC_INIT(0);
35629 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
35630 struct input_handler *handler;
35631 const char *path;
35632 int error;
35633 @@ -1585,7 +1585,7 @@ int input_register_device(struct input_dev *dev)
35634 dev->setkeycode = input_default_setkeycode;
35635
35636 dev_set_name(&dev->dev, "input%ld",
35637 - (unsigned long) atomic_inc_return(&input_no) - 1);
35638 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
35639
35640 error = device_add(&dev->dev);
35641 if (error)
35642 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
35643 index ca13a6b..b032b0c 100644
35644 --- a/drivers/input/joystick/sidewinder.c
35645 +++ b/drivers/input/joystick/sidewinder.c
35646 @@ -30,6 +30,7 @@
35647 #include <linux/kernel.h>
35648 #include <linux/module.h>
35649 #include <linux/slab.h>
35650 +#include <linux/sched.h>
35651 #include <linux/init.h>
35652 #include <linux/input.h>
35653 #include <linux/gameport.h>
35654 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
35655 unsigned char buf[SW_LENGTH];
35656 int i;
35657
35658 + pax_track_stack();
35659 +
35660 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
35661
35662 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
35663 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
35664 index 79e3edc..01412b9 100644
35665 --- a/drivers/input/joystick/xpad.c
35666 +++ b/drivers/input/joystick/xpad.c
35667 @@ -621,7 +621,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
35668
35669 static int xpad_led_probe(struct usb_xpad *xpad)
35670 {
35671 - static atomic_t led_seq = ATOMIC_INIT(0);
35672 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
35673 long led_no;
35674 struct xpad_led *led;
35675 struct led_classdev *led_cdev;
35676 @@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
35677 if (!led)
35678 return -ENOMEM;
35679
35680 - led_no = (long)atomic_inc_return(&led_seq) - 1;
35681 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
35682
35683 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
35684 led->xpad = xpad;
35685 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
35686 index 0236f0d..c7327f1 100644
35687 --- a/drivers/input/serio/serio.c
35688 +++ b/drivers/input/serio/serio.c
35689 @@ -527,7 +527,7 @@ static void serio_release_port(struct device *dev)
35690 */
35691 static void serio_init_port(struct serio *serio)
35692 {
35693 - static atomic_t serio_no = ATOMIC_INIT(0);
35694 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
35695
35696 __module_get(THIS_MODULE);
35697
35698 @@ -536,7 +536,7 @@ static void serio_init_port(struct serio *serio)
35699 mutex_init(&serio->drv_mutex);
35700 device_initialize(&serio->dev);
35701 dev_set_name(&serio->dev, "serio%ld",
35702 - (long)atomic_inc_return(&serio_no) - 1);
35703 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
35704 serio->dev.bus = &serio_bus;
35705 serio->dev.release = serio_release_port;
35706 if (serio->parent) {
35707 diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
35708 index 33dcd8d..2783d25 100644
35709 --- a/drivers/isdn/gigaset/common.c
35710 +++ b/drivers/isdn/gigaset/common.c
35711 @@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
35712 cs->commands_pending = 0;
35713 cs->cur_at_seq = 0;
35714 cs->gotfwver = -1;
35715 - cs->open_count = 0;
35716 + local_set(&cs->open_count, 0);
35717 cs->dev = NULL;
35718 cs->tty = NULL;
35719 cs->tty_dev = NULL;
35720 diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
35721 index a2f6125..6a70677 100644
35722 --- a/drivers/isdn/gigaset/gigaset.h
35723 +++ b/drivers/isdn/gigaset/gigaset.h
35724 @@ -34,6 +34,7 @@
35725 #include <linux/tty_driver.h>
35726 #include <linux/list.h>
35727 #include <asm/atomic.h>
35728 +#include <asm/local.h>
35729
35730 #define GIG_VERSION {0,5,0,0}
35731 #define GIG_COMPAT {0,4,0,0}
35732 @@ -446,7 +447,7 @@ struct cardstate {
35733 spinlock_t cmdlock;
35734 unsigned curlen, cmdbytes;
35735
35736 - unsigned open_count;
35737 + local_t open_count;
35738 struct tty_struct *tty;
35739 struct tasklet_struct if_wake_tasklet;
35740 unsigned control_state;
35741 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
35742 index b3065b8..c7e8cc9 100644
35743 --- a/drivers/isdn/gigaset/interface.c
35744 +++ b/drivers/isdn/gigaset/interface.c
35745 @@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
35746 return -ERESTARTSYS; // FIXME -EINTR?
35747 tty->driver_data = cs;
35748
35749 - ++cs->open_count;
35750 -
35751 - if (cs->open_count == 1) {
35752 + if (local_inc_return(&cs->open_count) == 1) {
35753 spin_lock_irqsave(&cs->lock, flags);
35754 cs->tty = tty;
35755 spin_unlock_irqrestore(&cs->lock, flags);
35756 @@ -195,10 +193,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
35757
35758 if (!cs->connected)
35759 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35760 - else if (!cs->open_count)
35761 + else if (!local_read(&cs->open_count))
35762 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35763 else {
35764 - if (!--cs->open_count) {
35765 + if (!local_dec_return(&cs->open_count)) {
35766 spin_lock_irqsave(&cs->lock, flags);
35767 cs->tty = NULL;
35768 spin_unlock_irqrestore(&cs->lock, flags);
35769 @@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file,
35770 if (!cs->connected) {
35771 gig_dbg(DEBUG_IF, "not connected");
35772 retval = -ENODEV;
35773 - } else if (!cs->open_count)
35774 + } else if (!local_read(&cs->open_count))
35775 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35776 else {
35777 retval = 0;
35778 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
35779 if (!cs->connected) {
35780 gig_dbg(DEBUG_IF, "not connected");
35781 retval = -ENODEV;
35782 - } else if (!cs->open_count)
35783 + } else if (!local_read(&cs->open_count))
35784 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35785 else if (cs->mstate != MS_LOCKED) {
35786 dev_warn(cs->dev, "can't write to unlocked device\n");
35787 @@ -395,7 +393,7 @@ static int if_write_room(struct tty_struct *tty)
35788 if (!cs->connected) {
35789 gig_dbg(DEBUG_IF, "not connected");
35790 retval = -ENODEV;
35791 - } else if (!cs->open_count)
35792 + } else if (!local_read(&cs->open_count))
35793 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35794 else if (cs->mstate != MS_LOCKED) {
35795 dev_warn(cs->dev, "can't write to unlocked device\n");
35796 @@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
35797
35798 if (!cs->connected)
35799 gig_dbg(DEBUG_IF, "not connected");
35800 - else if (!cs->open_count)
35801 + else if (!local_read(&cs->open_count))
35802 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35803 else if (cs->mstate != MS_LOCKED)
35804 dev_warn(cs->dev, "can't write to unlocked device\n");
35805 @@ -453,7 +451,7 @@ static void if_throttle(struct tty_struct *tty)
35806
35807 if (!cs->connected)
35808 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35809 - else if (!cs->open_count)
35810 + else if (!local_read(&cs->open_count))
35811 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35812 else {
35813 //FIXME
35814 @@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_struct *tty)
35815
35816 if (!cs->connected)
35817 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35818 - else if (!cs->open_count)
35819 + else if (!local_read(&cs->open_count))
35820 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35821 else {
35822 //FIXME
35823 @@ -510,7 +508,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
35824 goto out;
35825 }
35826
35827 - if (!cs->open_count) {
35828 + if (!local_read(&cs->open_count)) {
35829 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35830 goto out;
35831 }
35832 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
35833 index a7c0083..62a7cb6 100644
35834 --- a/drivers/isdn/hardware/avm/b1.c
35835 +++ b/drivers/isdn/hardware/avm/b1.c
35836 @@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
35837 }
35838 if (left) {
35839 if (t4file->user) {
35840 - if (copy_from_user(buf, dp, left))
35841 + if (left > sizeof buf || copy_from_user(buf, dp, left))
35842 return -EFAULT;
35843 } else {
35844 memcpy(buf, dp, left);
35845 @@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
35846 }
35847 if (left) {
35848 if (config->user) {
35849 - if (copy_from_user(buf, dp, left))
35850 + if (left > sizeof buf || copy_from_user(buf, dp, left))
35851 return -EFAULT;
35852 } else {
35853 memcpy(buf, dp, left);
35854 diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c
35855 index f130724..c373c68 100644
35856 --- a/drivers/isdn/hardware/eicon/capidtmf.c
35857 +++ b/drivers/isdn/hardware/eicon/capidtmf.c
35858 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_state *p_state, byte *buffer, word leng
35859 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
35860 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
35861
35862 + pax_track_stack();
35863
35864 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
35865 {
35866 diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
35867 index 4d425c6..a9be6c4 100644
35868 --- a/drivers/isdn/hardware/eicon/capifunc.c
35869 +++ b/drivers/isdn/hardware/eicon/capifunc.c
35870 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
35871 IDI_SYNC_REQ req;
35872 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35873
35874 + pax_track_stack();
35875 +
35876 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35877
35878 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35879 diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
35880 index 3029234..ef0d9e2 100644
35881 --- a/drivers/isdn/hardware/eicon/diddfunc.c
35882 +++ b/drivers/isdn/hardware/eicon/diddfunc.c
35883 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35884 IDI_SYNC_REQ req;
35885 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35886
35887 + pax_track_stack();
35888 +
35889 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35890
35891 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35892 diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
35893 index d36a4c0..11e7d1a 100644
35894 --- a/drivers/isdn/hardware/eicon/divasfunc.c
35895 +++ b/drivers/isdn/hardware/eicon/divasfunc.c
35896 @@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35897 IDI_SYNC_REQ req;
35898 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35899
35900 + pax_track_stack();
35901 +
35902 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35903
35904 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35905 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
35906 index 85784a7..a19ca98 100644
35907 --- a/drivers/isdn/hardware/eicon/divasync.h
35908 +++ b/drivers/isdn/hardware/eicon/divasync.h
35909 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
35910 } diva_didd_add_adapter_t;
35911 typedef struct _diva_didd_remove_adapter {
35912 IDI_CALL p_request;
35913 -} diva_didd_remove_adapter_t;
35914 +} __no_const diva_didd_remove_adapter_t;
35915 typedef struct _diva_didd_read_adapter_array {
35916 void * buffer;
35917 dword length;
35918 diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
35919 index db87d51..7d09acf 100644
35920 --- a/drivers/isdn/hardware/eicon/idifunc.c
35921 +++ b/drivers/isdn/hardware/eicon/idifunc.c
35922 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35923 IDI_SYNC_REQ req;
35924 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35925
35926 + pax_track_stack();
35927 +
35928 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35929
35930 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35931 diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
35932 index ae89fb8..0fab299 100644
35933 --- a/drivers/isdn/hardware/eicon/message.c
35934 +++ b/drivers/isdn/hardware/eicon/message.c
35935 @@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
35936 dword d;
35937 word w;
35938
35939 + pax_track_stack();
35940 +
35941 a = plci->adapter;
35942 Id = ((word)plci->Id<<8)|a->Id;
35943 PUT_WORD(&SS_Ind[4],0x0000);
35944 @@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
35945 word j, n, w;
35946 dword d;
35947
35948 + pax_track_stack();
35949 +
35950
35951 for(i=0;i<8;i++) bp_parms[i].length = 0;
35952 for(i=0;i<2;i++) global_config[i].length = 0;
35953 @@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARSE *bp)
35954 const byte llc3[] = {4,3,2,2,6,6,0};
35955 const byte header[] = {0,2,3,3,0,0,0};
35956
35957 + pax_track_stack();
35958 +
35959 for(i=0;i<8;i++) bp_parms[i].length = 0;
35960 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
35961 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
35962 @@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci)
35963 word appl_number_group_type[MAX_APPL];
35964 PLCI *auxplci;
35965
35966 + pax_track_stack();
35967 +
35968 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
35969
35970 if(!a->group_optimization_enabled)
35971 diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
35972 index a564b75..f3cf8b5 100644
35973 --- a/drivers/isdn/hardware/eicon/mntfunc.c
35974 +++ b/drivers/isdn/hardware/eicon/mntfunc.c
35975 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35976 IDI_SYNC_REQ req;
35977 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35978
35979 + pax_track_stack();
35980 +
35981 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35982
35983 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35984 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
35985 index a3bd163..8956575 100644
35986 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
35987 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
35988 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
35989 typedef struct _diva_os_idi_adapter_interface {
35990 diva_init_card_proc_t cleanup_adapter_proc;
35991 diva_cmd_card_proc_t cmd_proc;
35992 -} diva_os_idi_adapter_interface_t;
35993 +} __no_const diva_os_idi_adapter_interface_t;
35994
35995 typedef struct _diva_os_xdi_adapter {
35996 struct list_head link;
35997 diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
35998 index adb1e8c..21b590b 100644
35999 --- a/drivers/isdn/i4l/isdn_common.c
36000 +++ b/drivers/isdn/i4l/isdn_common.c
36001 @@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
36002 } iocpar;
36003 void __user *argp = (void __user *)arg;
36004
36005 + pax_track_stack();
36006 +
36007 #define name iocpar.name
36008 #define bname iocpar.bname
36009 #define iocts iocpar.iocts
36010 diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
36011 index 90b56ed..5ed3305 100644
36012 --- a/drivers/isdn/i4l/isdn_net.c
36013 +++ b/drivers/isdn/i4l/isdn_net.c
36014 @@ -1902,7 +1902,7 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
36015 {
36016 isdn_net_local *lp = netdev_priv(dev);
36017 unsigned char *p;
36018 - ushort len = 0;
36019 + int len = 0;
36020
36021 switch (lp->p_encap) {
36022 case ISDN_NET_ENCAP_ETHER:
36023 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
36024 index bf7997a..cf091db 100644
36025 --- a/drivers/isdn/icn/icn.c
36026 +++ b/drivers/isdn/icn/icn.c
36027 @@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
36028 if (count > len)
36029 count = len;
36030 if (user) {
36031 - if (copy_from_user(msg, buf, count))
36032 + if (count > sizeof msg || copy_from_user(msg, buf, count))
36033 return -EFAULT;
36034 } else
36035 memcpy(msg, buf, count);
36036 diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
36037 index feb0fa4..f76f830 100644
36038 --- a/drivers/isdn/mISDN/socket.c
36039 +++ b/drivers/isdn/mISDN/socket.c
36040 @@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
36041 if (dev) {
36042 struct mISDN_devinfo di;
36043
36044 + memset(&di, 0, sizeof(di));
36045 di.id = dev->id;
36046 di.Dprotocols = dev->Dprotocols;
36047 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
36048 @@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
36049 if (dev) {
36050 struct mISDN_devinfo di;
36051
36052 + memset(&di, 0, sizeof(di));
36053 di.id = dev->id;
36054 di.Dprotocols = dev->Dprotocols;
36055 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
36056 diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c
36057 index 485be8b..f0225bc 100644
36058 --- a/drivers/isdn/sc/interrupt.c
36059 +++ b/drivers/isdn/sc/interrupt.c
36060 @@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
36061 }
36062 else if(callid>=0x0000 && callid<=0x7FFF)
36063 {
36064 + int len;
36065 +
36066 pr_debug("%s: Got Incoming Call\n",
36067 sc_adapter[card]->devicename);
36068 - strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
36069 - strcpy(setup.eazmsn,
36070 - sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
36071 + len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
36072 + sizeof(setup.phone));
36073 + if (len >= sizeof(setup.phone))
36074 + continue;
36075 + len = strlcpy(setup.eazmsn,
36076 + sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
36077 + sizeof(setup.eazmsn));
36078 + if (len >= sizeof(setup.eazmsn))
36079 + continue;
36080 setup.si1 = 7;
36081 setup.si2 = 0;
36082 setup.plan = 0;
36083 @@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
36084 * Handle a GetMyNumber Rsp
36085 */
36086 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
36087 - strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
36088 + strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
36089 + rcvmsg.msg_data.byte_array,
36090 + sizeof(rcvmsg.msg_data.byte_array));
36091 continue;
36092 }
36093
36094 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
36095 index 8744d24..d1f9a9a 100644
36096 --- a/drivers/lguest/core.c
36097 +++ b/drivers/lguest/core.c
36098 @@ -91,9 +91,17 @@ static __init int map_switcher(void)
36099 * it's worked so far. The end address needs +1 because __get_vm_area
36100 * allocates an extra guard page, so we need space for that.
36101 */
36102 +
36103 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
36104 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
36105 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
36106 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
36107 +#else
36108 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
36109 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
36110 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
36111 +#endif
36112 +
36113 if (!switcher_vma) {
36114 err = -ENOMEM;
36115 printk("lguest: could not map switcher pages high\n");
36116 @@ -118,7 +126,7 @@ static __init int map_switcher(void)
36117 * Now the Switcher is mapped at the right address, we can't fail!
36118 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
36119 */
36120 - memcpy(switcher_vma->addr, start_switcher_text,
36121 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
36122 end_switcher_text - start_switcher_text);
36123
36124 printk(KERN_INFO "lguest: mapped switcher at %p\n",
36125 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
36126 index 6ae3888..8b38145 100644
36127 --- a/drivers/lguest/x86/core.c
36128 +++ b/drivers/lguest/x86/core.c
36129 @@ -59,7 +59,7 @@ static struct {
36130 /* Offset from where switcher.S was compiled to where we've copied it */
36131 static unsigned long switcher_offset(void)
36132 {
36133 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
36134 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
36135 }
36136
36137 /* This cpu's struct lguest_pages. */
36138 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
36139 * These copies are pretty cheap, so we do them unconditionally: */
36140 /* Save the current Host top-level page directory.
36141 */
36142 +
36143 +#ifdef CONFIG_PAX_PER_CPU_PGD
36144 + pages->state.host_cr3 = read_cr3();
36145 +#else
36146 pages->state.host_cr3 = __pa(current->mm->pgd);
36147 +#endif
36148 +
36149 /*
36150 * Set up the Guest's page tables to see this CPU's pages (and no
36151 * other CPU's pages).
36152 @@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
36153 * compiled-in switcher code and the high-mapped copy we just made.
36154 */
36155 for (i = 0; i < IDT_ENTRIES; i++)
36156 - default_idt_entries[i] += switcher_offset();
36157 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
36158
36159 /*
36160 * Set up the Switcher's per-cpu areas.
36161 @@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
36162 * it will be undisturbed when we switch. To change %cs and jump we
36163 * need this structure to feed to Intel's "lcall" instruction.
36164 */
36165 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
36166 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
36167 lguest_entry.segment = LGUEST_CS;
36168
36169 /*
36170 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
36171 index 40634b0..4f5855e 100644
36172 --- a/drivers/lguest/x86/switcher_32.S
36173 +++ b/drivers/lguest/x86/switcher_32.S
36174 @@ -87,6 +87,7 @@
36175 #include <asm/page.h>
36176 #include <asm/segment.h>
36177 #include <asm/lguest.h>
36178 +#include <asm/processor-flags.h>
36179
36180 // We mark the start of the code to copy
36181 // It's placed in .text tho it's never run here
36182 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
36183 // Changes type when we load it: damn Intel!
36184 // For after we switch over our page tables
36185 // That entry will be read-only: we'd crash.
36186 +
36187 +#ifdef CONFIG_PAX_KERNEXEC
36188 + mov %cr0, %edx
36189 + xor $X86_CR0_WP, %edx
36190 + mov %edx, %cr0
36191 +#endif
36192 +
36193 movl $(GDT_ENTRY_TSS*8), %edx
36194 ltr %dx
36195
36196 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
36197 // Let's clear it again for our return.
36198 // The GDT descriptor of the Host
36199 // Points to the table after two "size" bytes
36200 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
36201 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
36202 // Clear "used" from type field (byte 5, bit 2)
36203 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
36204 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
36205 +
36206 +#ifdef CONFIG_PAX_KERNEXEC
36207 + mov %cr0, %eax
36208 + xor $X86_CR0_WP, %eax
36209 + mov %eax, %cr0
36210 +#endif
36211
36212 // Once our page table's switched, the Guest is live!
36213 // The Host fades as we run this final step.
36214 @@ -295,13 +309,12 @@ deliver_to_host:
36215 // I consulted gcc, and it gave
36216 // These instructions, which I gladly credit:
36217 leal (%edx,%ebx,8), %eax
36218 - movzwl (%eax),%edx
36219 - movl 4(%eax), %eax
36220 - xorw %ax, %ax
36221 - orl %eax, %edx
36222 + movl 4(%eax), %edx
36223 + movw (%eax), %dx
36224 // Now the address of the handler's in %edx
36225 // We call it now: its "iret" drops us home.
36226 - jmp *%edx
36227 + ljmp $__KERNEL_CS, $1f
36228 +1: jmp *%edx
36229
36230 // Every interrupt can come to us here
36231 // But we must truly tell each apart.
36232 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
36233 index 588a5b0..b71db89 100644
36234 --- a/drivers/macintosh/macio_asic.c
36235 +++ b/drivers/macintosh/macio_asic.c
36236 @@ -701,7 +701,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
36237 * MacIO is matched against any Apple ID, it's probe() function
36238 * will then decide wether it applies or not
36239 */
36240 -static const struct pci_device_id __devinitdata pci_ids [] = { {
36241 +static const struct pci_device_id __devinitconst pci_ids [] = { {
36242 .vendor = PCI_VENDOR_ID_APPLE,
36243 .device = PCI_ANY_ID,
36244 .subvendor = PCI_ANY_ID,
36245 diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
36246 index a348bb0..ecd9b3f 100644
36247 --- a/drivers/macintosh/via-pmu-backlight.c
36248 +++ b/drivers/macintosh/via-pmu-backlight.c
36249 @@ -15,7 +15,7 @@
36250
36251 #define MAX_PMU_LEVEL 0xFF
36252
36253 -static struct backlight_ops pmu_backlight_data;
36254 +static const struct backlight_ops pmu_backlight_data;
36255 static DEFINE_SPINLOCK(pmu_backlight_lock);
36256 static int sleeping, uses_pmu_bl;
36257 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
36258 @@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(struct backlight_device *bd)
36259 return bd->props.brightness;
36260 }
36261
36262 -static struct backlight_ops pmu_backlight_data = {
36263 +static const struct backlight_ops pmu_backlight_data = {
36264 .get_brightness = pmu_backlight_get_brightness,
36265 .update_status = pmu_backlight_update_status,
36266
36267 diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
36268 index 6f308a4..b5f7ff7 100644
36269 --- a/drivers/macintosh/via-pmu.c
36270 +++ b/drivers/macintosh/via-pmu.c
36271 @@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state_t state)
36272 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
36273 }
36274
36275 -static struct platform_suspend_ops pmu_pm_ops = {
36276 +static const struct platform_suspend_ops pmu_pm_ops = {
36277 .enter = powerbook_sleep,
36278 .valid = pmu_sleep_valid,
36279 };
36280 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
36281 index 818b617..4656e38 100644
36282 --- a/drivers/md/dm-ioctl.c
36283 +++ b/drivers/md/dm-ioctl.c
36284 @@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
36285 cmd == DM_LIST_VERSIONS_CMD)
36286 return 0;
36287
36288 - if ((cmd == DM_DEV_CREATE_CMD)) {
36289 + if (cmd == DM_DEV_CREATE_CMD) {
36290 if (!*param->name) {
36291 DMWARN("name not supplied when creating device");
36292 return -EINVAL;
36293 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
36294 index 6021d0a..a878643 100644
36295 --- a/drivers/md/dm-raid1.c
36296 +++ b/drivers/md/dm-raid1.c
36297 @@ -41,7 +41,7 @@ enum dm_raid1_error {
36298
36299 struct mirror {
36300 struct mirror_set *ms;
36301 - atomic_t error_count;
36302 + atomic_unchecked_t error_count;
36303 unsigned long error_type;
36304 struct dm_dev *dev;
36305 sector_t offset;
36306 @@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
36307 * simple way to tell if a device has encountered
36308 * errors.
36309 */
36310 - atomic_inc(&m->error_count);
36311 + atomic_inc_unchecked(&m->error_count);
36312
36313 if (test_and_set_bit(error_type, &m->error_type))
36314 return;
36315 @@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
36316 }
36317
36318 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
36319 - if (!atomic_read(&new->error_count)) {
36320 + if (!atomic_read_unchecked(&new->error_count)) {
36321 set_default_mirror(new);
36322 break;
36323 }
36324 @@ -363,7 +363,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
36325 struct mirror *m = get_default_mirror(ms);
36326
36327 do {
36328 - if (likely(!atomic_read(&m->error_count)))
36329 + if (likely(!atomic_read_unchecked(&m->error_count)))
36330 return m;
36331
36332 if (m-- == ms->mirror)
36333 @@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
36334 {
36335 struct mirror *default_mirror = get_default_mirror(m->ms);
36336
36337 - return !atomic_read(&default_mirror->error_count);
36338 + return !atomic_read_unchecked(&default_mirror->error_count);
36339 }
36340
36341 static int mirror_available(struct mirror_set *ms, struct bio *bio)
36342 @@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
36343 */
36344 if (likely(region_in_sync(ms, region, 1)))
36345 m = choose_mirror(ms, bio->bi_sector);
36346 - else if (m && atomic_read(&m->error_count))
36347 + else if (m && atomic_read_unchecked(&m->error_count))
36348 m = NULL;
36349
36350 if (likely(m))
36351 @@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
36352 }
36353
36354 ms->mirror[mirror].ms = ms;
36355 - atomic_set(&(ms->mirror[mirror].error_count), 0);
36356 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
36357 ms->mirror[mirror].error_type = 0;
36358 ms->mirror[mirror].offset = offset;
36359
36360 @@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_target *ti)
36361 */
36362 static char device_status_char(struct mirror *m)
36363 {
36364 - if (!atomic_read(&(m->error_count)))
36365 + if (!atomic_read_unchecked(&(m->error_count)))
36366 return 'A';
36367
36368 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
36369 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
36370 index bd58703..9f26571 100644
36371 --- a/drivers/md/dm-stripe.c
36372 +++ b/drivers/md/dm-stripe.c
36373 @@ -20,7 +20,7 @@ struct stripe {
36374 struct dm_dev *dev;
36375 sector_t physical_start;
36376
36377 - atomic_t error_count;
36378 + atomic_unchecked_t error_count;
36379 };
36380
36381 struct stripe_c {
36382 @@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
36383 kfree(sc);
36384 return r;
36385 }
36386 - atomic_set(&(sc->stripe[i].error_count), 0);
36387 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
36388 }
36389
36390 ti->private = sc;
36391 @@ -257,7 +257,7 @@ static int stripe_status(struct dm_target *ti,
36392 DMEMIT("%d ", sc->stripes);
36393 for (i = 0; i < sc->stripes; i++) {
36394 DMEMIT("%s ", sc->stripe[i].dev->name);
36395 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
36396 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
36397 'D' : 'A';
36398 }
36399 buffer[i] = '\0';
36400 @@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
36401 */
36402 for (i = 0; i < sc->stripes; i++)
36403 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
36404 - atomic_inc(&(sc->stripe[i].error_count));
36405 - if (atomic_read(&(sc->stripe[i].error_count)) <
36406 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
36407 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
36408 DM_IO_ERROR_THRESHOLD)
36409 queue_work(kstriped, &sc->kstriped_ws);
36410 }
36411 diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
36412 index 4b04590..13a77b2 100644
36413 --- a/drivers/md/dm-sysfs.c
36414 +++ b/drivers/md/dm-sysfs.c
36415 @@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
36416 NULL,
36417 };
36418
36419 -static struct sysfs_ops dm_sysfs_ops = {
36420 +static const struct sysfs_ops dm_sysfs_ops = {
36421 .show = dm_attr_show,
36422 };
36423
36424 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
36425 index 03345bb..332250d 100644
36426 --- a/drivers/md/dm-table.c
36427 +++ b/drivers/md/dm-table.c
36428 @@ -376,7 +376,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
36429 if (!dev_size)
36430 return 0;
36431
36432 - if ((start >= dev_size) || (start + len > dev_size)) {
36433 + if ((start >= dev_size) || (len > dev_size - start)) {
36434 DMWARN("%s: %s too small for target: "
36435 "start=%llu, len=%llu, dev_size=%llu",
36436 dm_device_name(ti->table->md), bdevname(bdev, b),
36437 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
36438 index c988ac2..c418141 100644
36439 --- a/drivers/md/dm.c
36440 +++ b/drivers/md/dm.c
36441 @@ -165,9 +165,9 @@ struct mapped_device {
36442 /*
36443 * Event handling.
36444 */
36445 - atomic_t event_nr;
36446 + atomic_unchecked_t event_nr;
36447 wait_queue_head_t eventq;
36448 - atomic_t uevent_seq;
36449 + atomic_unchecked_t uevent_seq;
36450 struct list_head uevent_list;
36451 spinlock_t uevent_lock; /* Protect access to uevent_list */
36452
36453 @@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(int minor)
36454 rwlock_init(&md->map_lock);
36455 atomic_set(&md->holders, 1);
36456 atomic_set(&md->open_count, 0);
36457 - atomic_set(&md->event_nr, 0);
36458 - atomic_set(&md->uevent_seq, 0);
36459 + atomic_set_unchecked(&md->event_nr, 0);
36460 + atomic_set_unchecked(&md->uevent_seq, 0);
36461 INIT_LIST_HEAD(&md->uevent_list);
36462 spin_lock_init(&md->uevent_lock);
36463
36464 @@ -1927,7 +1927,7 @@ static void event_callback(void *context)
36465
36466 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
36467
36468 - atomic_inc(&md->event_nr);
36469 + atomic_inc_unchecked(&md->event_nr);
36470 wake_up(&md->eventq);
36471 }
36472
36473 @@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
36474
36475 uint32_t dm_next_uevent_seq(struct mapped_device *md)
36476 {
36477 - return atomic_add_return(1, &md->uevent_seq);
36478 + return atomic_add_return_unchecked(1, &md->uevent_seq);
36479 }
36480
36481 uint32_t dm_get_event_nr(struct mapped_device *md)
36482 {
36483 - return atomic_read(&md->event_nr);
36484 + return atomic_read_unchecked(&md->event_nr);
36485 }
36486
36487 int dm_wait_event(struct mapped_device *md, int event_nr)
36488 {
36489 return wait_event_interruptible(md->eventq,
36490 - (event_nr != atomic_read(&md->event_nr)));
36491 + (event_nr != atomic_read_unchecked(&md->event_nr)));
36492 }
36493
36494 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
36495 diff --git a/drivers/md/md.c b/drivers/md/md.c
36496 index 4ce6e2f..7a9530a 100644
36497 --- a/drivers/md/md.c
36498 +++ b/drivers/md/md.c
36499 @@ -153,10 +153,10 @@ static int start_readonly;
36500 * start build, activate spare
36501 */
36502 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
36503 -static atomic_t md_event_count;
36504 +static atomic_unchecked_t md_event_count;
36505 void md_new_event(mddev_t *mddev)
36506 {
36507 - atomic_inc(&md_event_count);
36508 + atomic_inc_unchecked(&md_event_count);
36509 wake_up(&md_event_waiters);
36510 }
36511 EXPORT_SYMBOL_GPL(md_new_event);
36512 @@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
36513 */
36514 static void md_new_event_inintr(mddev_t *mddev)
36515 {
36516 - atomic_inc(&md_event_count);
36517 + atomic_inc_unchecked(&md_event_count);
36518 wake_up(&md_event_waiters);
36519 }
36520
36521 @@ -1226,7 +1226,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
36522
36523 rdev->preferred_minor = 0xffff;
36524 rdev->data_offset = le64_to_cpu(sb->data_offset);
36525 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
36526 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
36527
36528 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
36529 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
36530 @@ -1400,7 +1400,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
36531 else
36532 sb->resync_offset = cpu_to_le64(0);
36533
36534 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
36535 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
36536
36537 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
36538 sb->size = cpu_to_le64(mddev->dev_sectors);
36539 @@ -2222,7 +2222,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
36540 static ssize_t
36541 errors_show(mdk_rdev_t *rdev, char *page)
36542 {
36543 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
36544 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
36545 }
36546
36547 static ssize_t
36548 @@ -2231,7 +2231,7 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
36549 char *e;
36550 unsigned long n = simple_strtoul(buf, &e, 10);
36551 if (*buf && (*e == 0 || *e == '\n')) {
36552 - atomic_set(&rdev->corrected_errors, n);
36553 + atomic_set_unchecked(&rdev->corrected_errors, n);
36554 return len;
36555 }
36556 return -EINVAL;
36557 @@ -2525,7 +2525,7 @@ static void rdev_free(struct kobject *ko)
36558 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
36559 kfree(rdev);
36560 }
36561 -static struct sysfs_ops rdev_sysfs_ops = {
36562 +static const struct sysfs_ops rdev_sysfs_ops = {
36563 .show = rdev_attr_show,
36564 .store = rdev_attr_store,
36565 };
36566 @@ -2574,8 +2574,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
36567 rdev->data_offset = 0;
36568 rdev->sb_events = 0;
36569 atomic_set(&rdev->nr_pending, 0);
36570 - atomic_set(&rdev->read_errors, 0);
36571 - atomic_set(&rdev->corrected_errors, 0);
36572 + atomic_set_unchecked(&rdev->read_errors, 0);
36573 + atomic_set_unchecked(&rdev->corrected_errors, 0);
36574
36575 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
36576 if (!size) {
36577 @@ -3895,7 +3895,7 @@ static void md_free(struct kobject *ko)
36578 kfree(mddev);
36579 }
36580
36581 -static struct sysfs_ops md_sysfs_ops = {
36582 +static const struct sysfs_ops md_sysfs_ops = {
36583 .show = md_attr_show,
36584 .store = md_attr_store,
36585 };
36586 @@ -4482,7 +4482,8 @@ out:
36587 err = 0;
36588 blk_integrity_unregister(disk);
36589 md_new_event(mddev);
36590 - sysfs_notify_dirent(mddev->sysfs_state);
36591 + if (mddev->sysfs_state)
36592 + sysfs_notify_dirent(mddev->sysfs_state);
36593 return err;
36594 }
36595
36596 @@ -5962,7 +5963,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36597
36598 spin_unlock(&pers_lock);
36599 seq_printf(seq, "\n");
36600 - mi->event = atomic_read(&md_event_count);
36601 + mi->event = atomic_read_unchecked(&md_event_count);
36602 return 0;
36603 }
36604 if (v == (void*)2) {
36605 @@ -6051,7 +6052,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36606 chunk_kb ? "KB" : "B");
36607 if (bitmap->file) {
36608 seq_printf(seq, ", file: ");
36609 - seq_path(seq, &bitmap->file->f_path, " \t\n");
36610 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
36611 }
36612
36613 seq_printf(seq, "\n");
36614 @@ -6085,7 +6086,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
36615 else {
36616 struct seq_file *p = file->private_data;
36617 p->private = mi;
36618 - mi->event = atomic_read(&md_event_count);
36619 + mi->event = atomic_read_unchecked(&md_event_count);
36620 }
36621 return error;
36622 }
36623 @@ -6101,7 +6102,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
36624 /* always allow read */
36625 mask = POLLIN | POLLRDNORM;
36626
36627 - if (mi->event != atomic_read(&md_event_count))
36628 + if (mi->event != atomic_read_unchecked(&md_event_count))
36629 mask |= POLLERR | POLLPRI;
36630 return mask;
36631 }
36632 @@ -6145,7 +6146,7 @@ static int is_mddev_idle(mddev_t *mddev, int init)
36633 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
36634 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
36635 (int)part_stat_read(&disk->part0, sectors[1]) -
36636 - atomic_read(&disk->sync_io);
36637 + atomic_read_unchecked(&disk->sync_io);
36638 /* sync IO will cause sync_io to increase before the disk_stats
36639 * as sync_io is counted when a request starts, and
36640 * disk_stats is counted when it completes.
36641 diff --git a/drivers/md/md.h b/drivers/md/md.h
36642 index 87430fe..0024a4c 100644
36643 --- a/drivers/md/md.h
36644 +++ b/drivers/md/md.h
36645 @@ -94,10 +94,10 @@ struct mdk_rdev_s
36646 * only maintained for arrays that
36647 * support hot removal
36648 */
36649 - atomic_t read_errors; /* number of consecutive read errors that
36650 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
36651 * we have tried to ignore.
36652 */
36653 - atomic_t corrected_errors; /* number of corrected read errors,
36654 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
36655 * for reporting to userspace and storing
36656 * in superblock.
36657 */
36658 @@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
36659
36660 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
36661 {
36662 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36663 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36664 }
36665
36666 struct mdk_personality
36667 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
36668 index 968cb14..f0ad2e4 100644
36669 --- a/drivers/md/raid1.c
36670 +++ b/drivers/md/raid1.c
36671 @@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
36672 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
36673 continue;
36674 rdev = conf->mirrors[d].rdev;
36675 - atomic_add(s, &rdev->corrected_errors);
36676 + atomic_add_unchecked(s, &rdev->corrected_errors);
36677 if (sync_page_io(rdev->bdev,
36678 sect + rdev->data_offset,
36679 s<<9,
36680 @@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
36681 /* Well, this device is dead */
36682 md_error(mddev, rdev);
36683 else {
36684 - atomic_add(s, &rdev->corrected_errors);
36685 + atomic_add_unchecked(s, &rdev->corrected_errors);
36686 printk(KERN_INFO
36687 "raid1:%s: read error corrected "
36688 "(%d sectors at %llu on %s)\n",
36689 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
36690 index 1b4e232..cf0f534 100644
36691 --- a/drivers/md/raid10.c
36692 +++ b/drivers/md/raid10.c
36693 @@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bio, int error)
36694 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
36695 set_bit(R10BIO_Uptodate, &r10_bio->state);
36696 else {
36697 - atomic_add(r10_bio->sectors,
36698 + atomic_add_unchecked(r10_bio->sectors,
36699 &conf->mirrors[d].rdev->corrected_errors);
36700 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
36701 md_error(r10_bio->mddev,
36702 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
36703 test_bit(In_sync, &rdev->flags)) {
36704 atomic_inc(&rdev->nr_pending);
36705 rcu_read_unlock();
36706 - atomic_add(s, &rdev->corrected_errors);
36707 + atomic_add_unchecked(s, &rdev->corrected_errors);
36708 if (sync_page_io(rdev->bdev,
36709 r10_bio->devs[sl].addr +
36710 sect + rdev->data_offset,
36711 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
36712 index 883215d..675bf47 100644
36713 --- a/drivers/md/raid5.c
36714 +++ b/drivers/md/raid5.c
36715 @@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
36716 bi->bi_next = NULL;
36717 if ((rw & WRITE) &&
36718 test_bit(R5_ReWrite, &sh->dev[i].flags))
36719 - atomic_add(STRIPE_SECTORS,
36720 + atomic_add_unchecked(STRIPE_SECTORS,
36721 &rdev->corrected_errors);
36722 generic_make_request(bi);
36723 } else {
36724 @@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struct bio * bi, int error)
36725 clear_bit(R5_ReadError, &sh->dev[i].flags);
36726 clear_bit(R5_ReWrite, &sh->dev[i].flags);
36727 }
36728 - if (atomic_read(&conf->disks[i].rdev->read_errors))
36729 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
36730 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
36731 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
36732 } else {
36733 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
36734 int retry = 0;
36735 rdev = conf->disks[i].rdev;
36736
36737 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
36738 - atomic_inc(&rdev->read_errors);
36739 + atomic_inc_unchecked(&rdev->read_errors);
36740 if (conf->mddev->degraded >= conf->max_degraded)
36741 printk_rl(KERN_WARNING
36742 "raid5:%s: read error not correctable "
36743 @@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
36744 (unsigned long long)(sh->sector
36745 + rdev->data_offset),
36746 bdn);
36747 - else if (atomic_read(&rdev->read_errors)
36748 + else if (atomic_read_unchecked(&rdev->read_errors)
36749 > conf->max_nr_stripes)
36750 printk(KERN_WARNING
36751 "raid5:%s: Too many read errors, failing device %s.\n",
36752 @@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
36753 sector_t r_sector;
36754 struct stripe_head sh2;
36755
36756 + pax_track_stack();
36757
36758 chunk_offset = sector_div(new_sector, sectors_per_chunk);
36759 stripe = new_sector;
36760 diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
36761 index 05bde9c..2f31d40 100644
36762 --- a/drivers/media/common/saa7146_hlp.c
36763 +++ b/drivers/media/common/saa7146_hlp.c
36764 @@ -353,6 +353,8 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
36765
36766 int x[32], y[32], w[32], h[32];
36767
36768 + pax_track_stack();
36769 +
36770 /* clear out memory */
36771 memset(&line_list[0], 0x00, sizeof(u32)*32);
36772 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
36773 diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36774 index cb22da5..82b686e 100644
36775 --- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36776 +++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36777 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
36778 u8 buf[HOST_LINK_BUF_SIZE];
36779 int i;
36780
36781 + pax_track_stack();
36782 +
36783 dprintk("%s\n", __func__);
36784
36785 /* check if we have space for a link buf in the rx_buffer */
36786 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
36787 unsigned long timeout;
36788 int written;
36789
36790 + pax_track_stack();
36791 +
36792 dprintk("%s\n", __func__);
36793
36794 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
36795 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
36796 index 2fe05d0..a3289c4 100644
36797 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
36798 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
36799 @@ -71,7 +71,7 @@ struct dvb_demux_feed {
36800 union {
36801 dmx_ts_cb ts;
36802 dmx_section_cb sec;
36803 - } cb;
36804 + } __no_const cb;
36805
36806 struct dvb_demux *demux;
36807 void *priv;
36808 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
36809 index 94159b9..376bd8e 100644
36810 --- a/drivers/media/dvb/dvb-core/dvbdev.c
36811 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
36812 @@ -191,7 +191,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
36813 const struct dvb_device *template, void *priv, int type)
36814 {
36815 struct dvb_device *dvbdev;
36816 - struct file_operations *dvbdevfops;
36817 + file_operations_no_const *dvbdevfops;
36818 struct device *clsdev;
36819 int minor;
36820 int id;
36821 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
36822 index 2a53dd0..db8c07a 100644
36823 --- a/drivers/media/dvb/dvb-usb/cxusb.c
36824 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
36825 @@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
36826 struct dib0700_adapter_state {
36827 int (*set_param_save) (struct dvb_frontend *,
36828 struct dvb_frontend_parameters *);
36829 -};
36830 +} __no_const;
36831
36832 static int dib7070_set_param_override(struct dvb_frontend *fe,
36833 struct dvb_frontend_parameters *fep)
36834 diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
36835 index db7f7f7..f55e96f 100644
36836 --- a/drivers/media/dvb/dvb-usb/dib0700_core.c
36837 +++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
36838 @@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
36839
36840 u8 buf[260];
36841
36842 + pax_track_stack();
36843 +
36844 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
36845 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
36846
36847 diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36848 index 524acf5..5ffc403 100644
36849 --- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
36850 +++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36851 @@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplif
36852
36853 struct dib0700_adapter_state {
36854 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
36855 -};
36856 +} __no_const;
36857
36858 /* Hauppauge Nova-T 500 (aka Bristol)
36859 * has a LNA on GPIO0 which is enabled by setting 1 */
36860 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
36861 index ba91735..4261d84 100644
36862 --- a/drivers/media/dvb/frontends/dib3000.h
36863 +++ b/drivers/media/dvb/frontends/dib3000.h
36864 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
36865 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
36866 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
36867 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
36868 -};
36869 +} __no_const;
36870
36871 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
36872 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
36873 diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
36874 index c709ce6..b3fe620 100644
36875 --- a/drivers/media/dvb/frontends/or51211.c
36876 +++ b/drivers/media/dvb/frontends/or51211.c
36877 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
36878 u8 tudata[585];
36879 int i;
36880
36881 + pax_track_stack();
36882 +
36883 dprintk("Firmware is %zd bytes\n",fw->size);
36884
36885 /* Get eprom data */
36886 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
36887 index 482d0f3..ee1e202 100644
36888 --- a/drivers/media/radio/radio-cadet.c
36889 +++ b/drivers/media/radio/radio-cadet.c
36890 @@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
36891 while (i < count && dev->rdsin != dev->rdsout)
36892 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
36893
36894 - if (copy_to_user(data, readbuf, i))
36895 + if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
36896 return -EFAULT;
36897 return i;
36898 }
36899 diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
36900 index 6dd51e2..0359b92 100644
36901 --- a/drivers/media/video/cx18/cx18-driver.c
36902 +++ b/drivers/media/video/cx18/cx18-driver.c
36903 @@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl[] __devinitdata = {
36904
36905 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
36906
36907 -static atomic_t cx18_instance = ATOMIC_INIT(0);
36908 +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
36909
36910 /* Parameter declarations */
36911 static int cardtype[CX18_MAX_CARDS];
36912 @@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
36913 struct i2c_client c;
36914 u8 eedata[256];
36915
36916 + pax_track_stack();
36917 +
36918 memset(&c, 0, sizeof(c));
36919 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
36920 c.adapter = &cx->i2c_adap[0];
36921 @@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
36922 struct cx18 *cx;
36923
36924 /* FIXME - module parameter arrays constrain max instances */
36925 - i = atomic_inc_return(&cx18_instance) - 1;
36926 + i = atomic_inc_return_unchecked(&cx18_instance) - 1;
36927 if (i >= CX18_MAX_CARDS) {
36928 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
36929 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
36930 diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
36931 index 463ec34..2f4625a 100644
36932 --- a/drivers/media/video/ivtv/ivtv-driver.c
36933 +++ b/drivers/media/video/ivtv/ivtv-driver.c
36934 @@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl[] __devinitdata = {
36935 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
36936
36937 /* ivtv instance counter */
36938 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
36939 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
36940
36941 /* Parameter declarations */
36942 static int cardtype[IVTV_MAX_CARDS];
36943 diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
36944 index 5fc4ac0..652a54a 100644
36945 --- a/drivers/media/video/omap24xxcam.c
36946 +++ b/drivers/media/video/omap24xxcam.c
36947 @@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
36948 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
36949
36950 do_gettimeofday(&vb->ts);
36951 - vb->field_count = atomic_add_return(2, &fh->field_count);
36952 + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
36953 if (csr & csr_error) {
36954 vb->state = VIDEOBUF_ERROR;
36955 if (!atomic_read(&fh->cam->in_reset)) {
36956 diff --git a/drivers/media/video/omap24xxcam.h b/drivers/media/video/omap24xxcam.h
36957 index 2ce67f5..cf26a5b 100644
36958 --- a/drivers/media/video/omap24xxcam.h
36959 +++ b/drivers/media/video/omap24xxcam.h
36960 @@ -533,7 +533,7 @@ struct omap24xxcam_fh {
36961 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
36962 struct videobuf_queue vbq;
36963 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
36964 - atomic_t field_count; /* field counter for videobuf_buffer */
36965 + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
36966 /* accessing cam here doesn't need serialisation: it's constant */
36967 struct omap24xxcam_device *cam;
36968 };
36969 diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36970 index 299afa4..eb47459 100644
36971 --- a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36972 +++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36973 @@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
36974 u8 *eeprom;
36975 struct tveeprom tvdata;
36976
36977 + pax_track_stack();
36978 +
36979 memset(&tvdata,0,sizeof(tvdata));
36980
36981 eeprom = pvr2_eeprom_fetch(hdw);
36982 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36983 index 5b152ff..3320638 100644
36984 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36985 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36986 @@ -195,7 +195,7 @@ struct pvr2_hdw {
36987
36988 /* I2C stuff */
36989 struct i2c_adapter i2c_adap;
36990 - struct i2c_algorithm i2c_algo;
36991 + i2c_algorithm_no_const i2c_algo;
36992 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
36993 int i2c_cx25840_hack_state;
36994 int i2c_linked;
36995 diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
36996 index 1eabff6..8e2313a 100644
36997 --- a/drivers/media/video/saa7134/saa6752hs.c
36998 +++ b/drivers/media/video/saa7134/saa6752hs.c
36999 @@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes)
37000 unsigned char localPAT[256];
37001 unsigned char localPMT[256];
37002
37003 + pax_track_stack();
37004 +
37005 /* Set video format - must be done first as it resets other settings */
37006 set_reg8(client, 0x41, h->video_format);
37007
37008 diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
37009 index 9c1d3ac..b1b49e9 100644
37010 --- a/drivers/media/video/saa7164/saa7164-cmd.c
37011 +++ b/drivers/media/video/saa7164/saa7164-cmd.c
37012 @@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
37013 wait_queue_head_t *q = 0;
37014 dprintk(DBGLVL_CMD, "%s()\n", __func__);
37015
37016 + pax_track_stack();
37017 +
37018 /* While any outstand message on the bus exists... */
37019 do {
37020
37021 @@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
37022 u8 tmp[512];
37023 dprintk(DBGLVL_CMD, "%s()\n", __func__);
37024
37025 + pax_track_stack();
37026 +
37027 while (loop) {
37028
37029 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
37030 diff --git a/drivers/media/video/usbvideo/ibmcam.c b/drivers/media/video/usbvideo/ibmcam.c
37031 index b085496..cde0270 100644
37032 --- a/drivers/media/video/usbvideo/ibmcam.c
37033 +++ b/drivers/media/video/usbvideo/ibmcam.c
37034 @@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] = {
37035 static int __init ibmcam_init(void)
37036 {
37037 struct usbvideo_cb cbTbl;
37038 - memset(&cbTbl, 0, sizeof(cbTbl));
37039 - cbTbl.probe = ibmcam_probe;
37040 - cbTbl.setupOnOpen = ibmcam_setup_on_open;
37041 - cbTbl.videoStart = ibmcam_video_start;
37042 - cbTbl.videoStop = ibmcam_video_stop;
37043 - cbTbl.processData = ibmcam_ProcessIsocData;
37044 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37045 - cbTbl.adjustPicture = ibmcam_adjust_picture;
37046 - cbTbl.getFPS = ibmcam_calculate_fps;
37047 + memset((void *)&cbTbl, 0, sizeof(cbTbl));
37048 + *(void **)&cbTbl.probe = ibmcam_probe;
37049 + *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
37050 + *(void **)&cbTbl.videoStart = ibmcam_video_start;
37051 + *(void **)&cbTbl.videoStop = ibmcam_video_stop;
37052 + *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
37053 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37054 + *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
37055 + *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
37056 return usbvideo_register(
37057 &cams,
37058 MAX_IBMCAM,
37059 diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
37060 index 31d57f2..600b735 100644
37061 --- a/drivers/media/video/usbvideo/konicawc.c
37062 +++ b/drivers/media/video/usbvideo/konicawc.c
37063 @@ -225,7 +225,7 @@ static void konicawc_register_input(struct konicawc *cam, struct usb_device *dev
37064 int error;
37065
37066 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
37067 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
37068 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
37069
37070 cam->input = input_dev = input_allocate_device();
37071 if (!input_dev) {
37072 @@ -935,16 +935,16 @@ static int __init konicawc_init(void)
37073 struct usbvideo_cb cbTbl;
37074 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
37075 DRIVER_DESC "\n");
37076 - memset(&cbTbl, 0, sizeof(cbTbl));
37077 - cbTbl.probe = konicawc_probe;
37078 - cbTbl.setupOnOpen = konicawc_setup_on_open;
37079 - cbTbl.processData = konicawc_process_isoc;
37080 - cbTbl.getFPS = konicawc_calculate_fps;
37081 - cbTbl.setVideoMode = konicawc_set_video_mode;
37082 - cbTbl.startDataPump = konicawc_start_data;
37083 - cbTbl.stopDataPump = konicawc_stop_data;
37084 - cbTbl.adjustPicture = konicawc_adjust_picture;
37085 - cbTbl.userFree = konicawc_free_uvd;
37086 + memset((void * )&cbTbl, 0, sizeof(cbTbl));
37087 + *(void **)&cbTbl.probe = konicawc_probe;
37088 + *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
37089 + *(void **)&cbTbl.processData = konicawc_process_isoc;
37090 + *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
37091 + *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
37092 + *(void **)&cbTbl.startDataPump = konicawc_start_data;
37093 + *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
37094 + *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
37095 + *(void **)&cbTbl.userFree = konicawc_free_uvd;
37096 return usbvideo_register(
37097 &cams,
37098 MAX_CAMERAS,
37099 diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
37100 index 803d3e4..c4d1b96 100644
37101 --- a/drivers/media/video/usbvideo/quickcam_messenger.c
37102 +++ b/drivers/media/video/usbvideo/quickcam_messenger.c
37103 @@ -89,7 +89,7 @@ static void qcm_register_input(struct qcm *cam, struct usb_device *dev)
37104 int error;
37105
37106 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
37107 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
37108 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
37109
37110 cam->input = input_dev = input_allocate_device();
37111 if (!input_dev) {
37112 diff --git a/drivers/media/video/usbvideo/ultracam.c b/drivers/media/video/usbvideo/ultracam.c
37113 index fbd1b63..292f9f0 100644
37114 --- a/drivers/media/video/usbvideo/ultracam.c
37115 +++ b/drivers/media/video/usbvideo/ultracam.c
37116 @@ -655,14 +655,14 @@ static int __init ultracam_init(void)
37117 {
37118 struct usbvideo_cb cbTbl;
37119 memset(&cbTbl, 0, sizeof(cbTbl));
37120 - cbTbl.probe = ultracam_probe;
37121 - cbTbl.setupOnOpen = ultracam_setup_on_open;
37122 - cbTbl.videoStart = ultracam_video_start;
37123 - cbTbl.videoStop = ultracam_video_stop;
37124 - cbTbl.processData = ultracam_ProcessIsocData;
37125 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37126 - cbTbl.adjustPicture = ultracam_adjust_picture;
37127 - cbTbl.getFPS = ultracam_calculate_fps;
37128 + *(void **)&cbTbl.probe = ultracam_probe;
37129 + *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
37130 + *(void **)&cbTbl.videoStart = ultracam_video_start;
37131 + *(void **)&cbTbl.videoStop = ultracam_video_stop;
37132 + *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
37133 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37134 + *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
37135 + *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
37136 return usbvideo_register(
37137 &cams,
37138 MAX_CAMERAS,
37139 diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
37140 index dea8b32..34f6878 100644
37141 --- a/drivers/media/video/usbvideo/usbvideo.c
37142 +++ b/drivers/media/video/usbvideo/usbvideo.c
37143 @@ -697,15 +697,15 @@ int usbvideo_register(
37144 __func__, cams, base_size, num_cams);
37145
37146 /* Copy callbacks, apply defaults for those that are not set */
37147 - memmove(&cams->cb, cbTbl, sizeof(cams->cb));
37148 + memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
37149 if (cams->cb.getFrame == NULL)
37150 - cams->cb.getFrame = usbvideo_GetFrame;
37151 + *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
37152 if (cams->cb.disconnect == NULL)
37153 - cams->cb.disconnect = usbvideo_Disconnect;
37154 + *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
37155 if (cams->cb.startDataPump == NULL)
37156 - cams->cb.startDataPump = usbvideo_StartDataPump;
37157 + *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
37158 if (cams->cb.stopDataPump == NULL)
37159 - cams->cb.stopDataPump = usbvideo_StopDataPump;
37160 + *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
37161
37162 cams->num_cameras = num_cams;
37163 cams->cam = (struct uvd *) &cams[1];
37164 diff --git a/drivers/media/video/usbvideo/usbvideo.h b/drivers/media/video/usbvideo/usbvideo.h
37165 index c66985b..7fa143a 100644
37166 --- a/drivers/media/video/usbvideo/usbvideo.h
37167 +++ b/drivers/media/video/usbvideo/usbvideo.h
37168 @@ -268,7 +268,7 @@ struct usbvideo_cb {
37169 int (*startDataPump)(struct uvd *uvd);
37170 void (*stopDataPump)(struct uvd *uvd);
37171 int (*setVideoMode)(struct uvd *uvd, struct video_window *vw);
37172 -};
37173 +} __no_const;
37174
37175 struct usbvideo {
37176 int num_cameras; /* As allocated */
37177 diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
37178 index e0f91e4..37554ea 100644
37179 --- a/drivers/media/video/usbvision/usbvision-core.c
37180 +++ b/drivers/media/video/usbvision/usbvision-core.c
37181 @@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_compress(struct usb_usbvision *usbvision,
37182 unsigned char rv, gv, bv;
37183 static unsigned char *Y, *U, *V;
37184
37185 + pax_track_stack();
37186 +
37187 frame = usbvision->curFrame;
37188 imageSize = frame->frmwidth * frame->frmheight;
37189 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
37190 diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
37191 index 0d06e7c..3d17d24 100644
37192 --- a/drivers/media/video/v4l2-device.c
37193 +++ b/drivers/media/video/v4l2-device.c
37194 @@ -50,9 +50,9 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
37195 EXPORT_SYMBOL_GPL(v4l2_device_register);
37196
37197 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
37198 - atomic_t *instance)
37199 + atomic_unchecked_t *instance)
37200 {
37201 - int num = atomic_inc_return(instance) - 1;
37202 + int num = atomic_inc_return_unchecked(instance) - 1;
37203 int len = strlen(basename);
37204
37205 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
37206 diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
37207 index 032ebae..6a3532c 100644
37208 --- a/drivers/media/video/videobuf-dma-sg.c
37209 +++ b/drivers/media/video/videobuf-dma-sg.c
37210 @@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
37211 {
37212 struct videobuf_queue q;
37213
37214 + pax_track_stack();
37215 +
37216 /* Required to make generic handler to call __videobuf_alloc */
37217 q.int_ops = &sg_ops;
37218
37219 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
37220 index b6992b7..9fa7547 100644
37221 --- a/drivers/message/fusion/mptbase.c
37222 +++ b/drivers/message/fusion/mptbase.c
37223 @@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eo
37224 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
37225 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
37226
37227 +#ifdef CONFIG_GRKERNSEC_HIDESYM
37228 + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
37229 + NULL, NULL);
37230 +#else
37231 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
37232 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
37233 +#endif
37234 +
37235 /*
37236 * Rounding UP to nearest 4-kB boundary here...
37237 */
37238 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
37239 index 83873e3..e360e9a 100644
37240 --- a/drivers/message/fusion/mptsas.c
37241 +++ b/drivers/message/fusion/mptsas.c
37242 @@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
37243 return 0;
37244 }
37245
37246 +static inline void
37247 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
37248 +{
37249 + if (phy_info->port_details) {
37250 + phy_info->port_details->rphy = rphy;
37251 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
37252 + ioc->name, rphy));
37253 + }
37254 +
37255 + if (rphy) {
37256 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
37257 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
37258 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
37259 + ioc->name, rphy, rphy->dev.release));
37260 + }
37261 +}
37262 +
37263 /* no mutex */
37264 static void
37265 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
37266 @@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
37267 return NULL;
37268 }
37269
37270 -static inline void
37271 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
37272 -{
37273 - if (phy_info->port_details) {
37274 - phy_info->port_details->rphy = rphy;
37275 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
37276 - ioc->name, rphy));
37277 - }
37278 -
37279 - if (rphy) {
37280 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
37281 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
37282 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
37283 - ioc->name, rphy, rphy->dev.release));
37284 - }
37285 -}
37286 -
37287 static inline struct sas_port *
37288 mptsas_get_port(struct mptsas_phyinfo *phy_info)
37289 {
37290 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
37291 index bd096ca..332cf76 100644
37292 --- a/drivers/message/fusion/mptscsih.c
37293 +++ b/drivers/message/fusion/mptscsih.c
37294 @@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
37295
37296 h = shost_priv(SChost);
37297
37298 - if (h) {
37299 - if (h->info_kbuf == NULL)
37300 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
37301 - return h->info_kbuf;
37302 - h->info_kbuf[0] = '\0';
37303 + if (!h)
37304 + return NULL;
37305
37306 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
37307 - h->info_kbuf[size-1] = '\0';
37308 - }
37309 + if (h->info_kbuf == NULL)
37310 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
37311 + return h->info_kbuf;
37312 + h->info_kbuf[0] = '\0';
37313 +
37314 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
37315 + h->info_kbuf[size-1] = '\0';
37316
37317 return h->info_kbuf;
37318 }
37319 diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
37320 index efba702..59b2c0f 100644
37321 --- a/drivers/message/i2o/i2o_config.c
37322 +++ b/drivers/message/i2o/i2o_config.c
37323 @@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned long arg)
37324 struct i2o_message *msg;
37325 unsigned int iop;
37326
37327 + pax_track_stack();
37328 +
37329 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
37330 return -EFAULT;
37331
37332 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
37333 index 7045c45..c07b170 100644
37334 --- a/drivers/message/i2o/i2o_proc.c
37335 +++ b/drivers/message/i2o/i2o_proc.c
37336 @@ -259,13 +259,6 @@ static char *scsi_devices[] = {
37337 "Array Controller Device"
37338 };
37339
37340 -static char *chtostr(u8 * chars, int n)
37341 -{
37342 - char tmp[256];
37343 - tmp[0] = 0;
37344 - return strncat(tmp, (char *)chars, n);
37345 -}
37346 -
37347 static int i2o_report_query_status(struct seq_file *seq, int block_status,
37348 char *group)
37349 {
37350 @@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
37351
37352 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
37353 seq_printf(seq, "%-#8x", ddm_table.module_id);
37354 - seq_printf(seq, "%-29s",
37355 - chtostr(ddm_table.module_name_version, 28));
37356 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
37357 seq_printf(seq, "%9d ", ddm_table.data_size);
37358 seq_printf(seq, "%8d", ddm_table.code_size);
37359
37360 @@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
37361
37362 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
37363 seq_printf(seq, "%-#8x", dst->module_id);
37364 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
37365 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
37366 + seq_printf(seq, "%-.28s", dst->module_name_version);
37367 + seq_printf(seq, "%-.8s", dst->date);
37368 seq_printf(seq, "%8d ", dst->module_size);
37369 seq_printf(seq, "%8d ", dst->mpb_size);
37370 seq_printf(seq, "0x%04x", dst->module_flags);
37371 @@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
37372 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
37373 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
37374 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
37375 - seq_printf(seq, "Vendor info : %s\n",
37376 - chtostr((u8 *) (work32 + 2), 16));
37377 - seq_printf(seq, "Product info : %s\n",
37378 - chtostr((u8 *) (work32 + 6), 16));
37379 - seq_printf(seq, "Description : %s\n",
37380 - chtostr((u8 *) (work32 + 10), 16));
37381 - seq_printf(seq, "Product rev. : %s\n",
37382 - chtostr((u8 *) (work32 + 14), 8));
37383 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
37384 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
37385 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
37386 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
37387
37388 seq_printf(seq, "Serial number : ");
37389 print_serial_number(seq, (u8 *) (work32 + 16),
37390 @@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
37391 }
37392
37393 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
37394 - seq_printf(seq, "Module name : %s\n",
37395 - chtostr(result.module_name, 24));
37396 - seq_printf(seq, "Module revision : %s\n",
37397 - chtostr(result.module_rev, 8));
37398 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
37399 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
37400
37401 seq_printf(seq, "Serial number : ");
37402 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
37403 @@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
37404 return 0;
37405 }
37406
37407 - seq_printf(seq, "Device name : %s\n",
37408 - chtostr(result.device_name, 64));
37409 - seq_printf(seq, "Service name : %s\n",
37410 - chtostr(result.service_name, 64));
37411 - seq_printf(seq, "Physical name : %s\n",
37412 - chtostr(result.physical_location, 64));
37413 - seq_printf(seq, "Instance number : %s\n",
37414 - chtostr(result.instance_number, 4));
37415 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
37416 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
37417 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
37418 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
37419
37420 return 0;
37421 }
37422 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
37423 index 27cf4af..b1205b8 100644
37424 --- a/drivers/message/i2o/iop.c
37425 +++ b/drivers/message/i2o/iop.c
37426 @@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
37427
37428 spin_lock_irqsave(&c->context_list_lock, flags);
37429
37430 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
37431 - atomic_inc(&c->context_list_counter);
37432 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
37433 + atomic_inc_unchecked(&c->context_list_counter);
37434
37435 - entry->context = atomic_read(&c->context_list_counter);
37436 + entry->context = atomic_read_unchecked(&c->context_list_counter);
37437
37438 list_add(&entry->list, &c->context_list);
37439
37440 @@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
37441
37442 #if BITS_PER_LONG == 64
37443 spin_lock_init(&c->context_list_lock);
37444 - atomic_set(&c->context_list_counter, 0);
37445 + atomic_set_unchecked(&c->context_list_counter, 0);
37446 INIT_LIST_HEAD(&c->context_list);
37447 #endif
37448
37449 diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
37450 index 78e3e85..66c9a0d 100644
37451 --- a/drivers/mfd/ab3100-core.c
37452 +++ b/drivers/mfd/ab3100-core.c
37453 @@ -777,7 +777,7 @@ struct ab_family_id {
37454 char *name;
37455 };
37456
37457 -static const struct ab_family_id ids[] __initdata = {
37458 +static const struct ab_family_id ids[] __initconst = {
37459 /* AB3100 */
37460 {
37461 .id = 0xc0,
37462 diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
37463 index 8d8c932..8104515 100644
37464 --- a/drivers/mfd/wm8350-i2c.c
37465 +++ b/drivers/mfd/wm8350-i2c.c
37466 @@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
37467 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
37468 int ret;
37469
37470 + pax_track_stack();
37471 +
37472 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
37473 return -EINVAL;
37474
37475 diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
37476 index e4ff50b..4cc3f04 100644
37477 --- a/drivers/misc/kgdbts.c
37478 +++ b/drivers/misc/kgdbts.c
37479 @@ -118,7 +118,7 @@
37480 } while (0)
37481 #define MAX_CONFIG_LEN 40
37482
37483 -static struct kgdb_io kgdbts_io_ops;
37484 +static const struct kgdb_io kgdbts_io_ops;
37485 static char get_buf[BUFMAX];
37486 static int get_buf_cnt;
37487 static char put_buf[BUFMAX];
37488 @@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void)
37489 module_put(THIS_MODULE);
37490 }
37491
37492 -static struct kgdb_io kgdbts_io_ops = {
37493 +static const struct kgdb_io kgdbts_io_ops = {
37494 .name = "kgdbts",
37495 .read_char = kgdbts_get_char,
37496 .write_char = kgdbts_put_char,
37497 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
37498 index 37e7cfc..67cfb76 100644
37499 --- a/drivers/misc/sgi-gru/gruhandles.c
37500 +++ b/drivers/misc/sgi-gru/gruhandles.c
37501 @@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37502
37503 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
37504 {
37505 - atomic_long_inc(&mcs_op_statistics[op].count);
37506 - atomic_long_add(clks, &mcs_op_statistics[op].total);
37507 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
37508 + atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
37509 if (mcs_op_statistics[op].max < clks)
37510 mcs_op_statistics[op].max = clks;
37511 }
37512 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
37513 index 3f2375c..467c6e6 100644
37514 --- a/drivers/misc/sgi-gru/gruprocfs.c
37515 +++ b/drivers/misc/sgi-gru/gruprocfs.c
37516 @@ -32,9 +32,9 @@
37517
37518 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
37519
37520 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
37521 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
37522 {
37523 - unsigned long val = atomic_long_read(v);
37524 + unsigned long val = atomic_long_read_unchecked(v);
37525
37526 if (val)
37527 seq_printf(s, "%16lu %s\n", val, id);
37528 @@ -136,8 +136,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
37529 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
37530
37531 for (op = 0; op < mcsop_last; op++) {
37532 - count = atomic_long_read(&mcs_op_statistics[op].count);
37533 - total = atomic_long_read(&mcs_op_statistics[op].total);
37534 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
37535 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
37536 max = mcs_op_statistics[op].max;
37537 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
37538 count ? total / count : 0, max);
37539 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
37540 index 46990bc..4a251b5 100644
37541 --- a/drivers/misc/sgi-gru/grutables.h
37542 +++ b/drivers/misc/sgi-gru/grutables.h
37543 @@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
37544 * GRU statistics.
37545 */
37546 struct gru_stats_s {
37547 - atomic_long_t vdata_alloc;
37548 - atomic_long_t vdata_free;
37549 - atomic_long_t gts_alloc;
37550 - atomic_long_t gts_free;
37551 - atomic_long_t vdata_double_alloc;
37552 - atomic_long_t gts_double_allocate;
37553 - atomic_long_t assign_context;
37554 - atomic_long_t assign_context_failed;
37555 - atomic_long_t free_context;
37556 - atomic_long_t load_user_context;
37557 - atomic_long_t load_kernel_context;
37558 - atomic_long_t lock_kernel_context;
37559 - atomic_long_t unlock_kernel_context;
37560 - atomic_long_t steal_user_context;
37561 - atomic_long_t steal_kernel_context;
37562 - atomic_long_t steal_context_failed;
37563 - atomic_long_t nopfn;
37564 - atomic_long_t break_cow;
37565 - atomic_long_t asid_new;
37566 - atomic_long_t asid_next;
37567 - atomic_long_t asid_wrap;
37568 - atomic_long_t asid_reuse;
37569 - atomic_long_t intr;
37570 - atomic_long_t intr_mm_lock_failed;
37571 - atomic_long_t call_os;
37572 - atomic_long_t call_os_offnode_reference;
37573 - atomic_long_t call_os_check_for_bug;
37574 - atomic_long_t call_os_wait_queue;
37575 - atomic_long_t user_flush_tlb;
37576 - atomic_long_t user_unload_context;
37577 - atomic_long_t user_exception;
37578 - atomic_long_t set_context_option;
37579 - atomic_long_t migrate_check;
37580 - atomic_long_t migrated_retarget;
37581 - atomic_long_t migrated_unload;
37582 - atomic_long_t migrated_unload_delay;
37583 - atomic_long_t migrated_nopfn_retarget;
37584 - atomic_long_t migrated_nopfn_unload;
37585 - atomic_long_t tlb_dropin;
37586 - atomic_long_t tlb_dropin_fail_no_asid;
37587 - atomic_long_t tlb_dropin_fail_upm;
37588 - atomic_long_t tlb_dropin_fail_invalid;
37589 - atomic_long_t tlb_dropin_fail_range_active;
37590 - atomic_long_t tlb_dropin_fail_idle;
37591 - atomic_long_t tlb_dropin_fail_fmm;
37592 - atomic_long_t tlb_dropin_fail_no_exception;
37593 - atomic_long_t tlb_dropin_fail_no_exception_war;
37594 - atomic_long_t tfh_stale_on_fault;
37595 - atomic_long_t mmu_invalidate_range;
37596 - atomic_long_t mmu_invalidate_page;
37597 - atomic_long_t mmu_clear_flush_young;
37598 - atomic_long_t flush_tlb;
37599 - atomic_long_t flush_tlb_gru;
37600 - atomic_long_t flush_tlb_gru_tgh;
37601 - atomic_long_t flush_tlb_gru_zero_asid;
37602 + atomic_long_unchecked_t vdata_alloc;
37603 + atomic_long_unchecked_t vdata_free;
37604 + atomic_long_unchecked_t gts_alloc;
37605 + atomic_long_unchecked_t gts_free;
37606 + atomic_long_unchecked_t vdata_double_alloc;
37607 + atomic_long_unchecked_t gts_double_allocate;
37608 + atomic_long_unchecked_t assign_context;
37609 + atomic_long_unchecked_t assign_context_failed;
37610 + atomic_long_unchecked_t free_context;
37611 + atomic_long_unchecked_t load_user_context;
37612 + atomic_long_unchecked_t load_kernel_context;
37613 + atomic_long_unchecked_t lock_kernel_context;
37614 + atomic_long_unchecked_t unlock_kernel_context;
37615 + atomic_long_unchecked_t steal_user_context;
37616 + atomic_long_unchecked_t steal_kernel_context;
37617 + atomic_long_unchecked_t steal_context_failed;
37618 + atomic_long_unchecked_t nopfn;
37619 + atomic_long_unchecked_t break_cow;
37620 + atomic_long_unchecked_t asid_new;
37621 + atomic_long_unchecked_t asid_next;
37622 + atomic_long_unchecked_t asid_wrap;
37623 + atomic_long_unchecked_t asid_reuse;
37624 + atomic_long_unchecked_t intr;
37625 + atomic_long_unchecked_t intr_mm_lock_failed;
37626 + atomic_long_unchecked_t call_os;
37627 + atomic_long_unchecked_t call_os_offnode_reference;
37628 + atomic_long_unchecked_t call_os_check_for_bug;
37629 + atomic_long_unchecked_t call_os_wait_queue;
37630 + atomic_long_unchecked_t user_flush_tlb;
37631 + atomic_long_unchecked_t user_unload_context;
37632 + atomic_long_unchecked_t user_exception;
37633 + atomic_long_unchecked_t set_context_option;
37634 + atomic_long_unchecked_t migrate_check;
37635 + atomic_long_unchecked_t migrated_retarget;
37636 + atomic_long_unchecked_t migrated_unload;
37637 + atomic_long_unchecked_t migrated_unload_delay;
37638 + atomic_long_unchecked_t migrated_nopfn_retarget;
37639 + atomic_long_unchecked_t migrated_nopfn_unload;
37640 + atomic_long_unchecked_t tlb_dropin;
37641 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
37642 + atomic_long_unchecked_t tlb_dropin_fail_upm;
37643 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
37644 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
37645 + atomic_long_unchecked_t tlb_dropin_fail_idle;
37646 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
37647 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
37648 + atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
37649 + atomic_long_unchecked_t tfh_stale_on_fault;
37650 + atomic_long_unchecked_t mmu_invalidate_range;
37651 + atomic_long_unchecked_t mmu_invalidate_page;
37652 + atomic_long_unchecked_t mmu_clear_flush_young;
37653 + atomic_long_unchecked_t flush_tlb;
37654 + atomic_long_unchecked_t flush_tlb_gru;
37655 + atomic_long_unchecked_t flush_tlb_gru_tgh;
37656 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
37657
37658 - atomic_long_t copy_gpa;
37659 + atomic_long_unchecked_t copy_gpa;
37660
37661 - atomic_long_t mesq_receive;
37662 - atomic_long_t mesq_receive_none;
37663 - atomic_long_t mesq_send;
37664 - atomic_long_t mesq_send_failed;
37665 - atomic_long_t mesq_noop;
37666 - atomic_long_t mesq_send_unexpected_error;
37667 - atomic_long_t mesq_send_lb_overflow;
37668 - atomic_long_t mesq_send_qlimit_reached;
37669 - atomic_long_t mesq_send_amo_nacked;
37670 - atomic_long_t mesq_send_put_nacked;
37671 - atomic_long_t mesq_qf_not_full;
37672 - atomic_long_t mesq_qf_locked;
37673 - atomic_long_t mesq_qf_noop_not_full;
37674 - atomic_long_t mesq_qf_switch_head_failed;
37675 - atomic_long_t mesq_qf_unexpected_error;
37676 - atomic_long_t mesq_noop_unexpected_error;
37677 - atomic_long_t mesq_noop_lb_overflow;
37678 - atomic_long_t mesq_noop_qlimit_reached;
37679 - atomic_long_t mesq_noop_amo_nacked;
37680 - atomic_long_t mesq_noop_put_nacked;
37681 + atomic_long_unchecked_t mesq_receive;
37682 + atomic_long_unchecked_t mesq_receive_none;
37683 + atomic_long_unchecked_t mesq_send;
37684 + atomic_long_unchecked_t mesq_send_failed;
37685 + atomic_long_unchecked_t mesq_noop;
37686 + atomic_long_unchecked_t mesq_send_unexpected_error;
37687 + atomic_long_unchecked_t mesq_send_lb_overflow;
37688 + atomic_long_unchecked_t mesq_send_qlimit_reached;
37689 + atomic_long_unchecked_t mesq_send_amo_nacked;
37690 + atomic_long_unchecked_t mesq_send_put_nacked;
37691 + atomic_long_unchecked_t mesq_qf_not_full;
37692 + atomic_long_unchecked_t mesq_qf_locked;
37693 + atomic_long_unchecked_t mesq_qf_noop_not_full;
37694 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
37695 + atomic_long_unchecked_t mesq_qf_unexpected_error;
37696 + atomic_long_unchecked_t mesq_noop_unexpected_error;
37697 + atomic_long_unchecked_t mesq_noop_lb_overflow;
37698 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
37699 + atomic_long_unchecked_t mesq_noop_amo_nacked;
37700 + atomic_long_unchecked_t mesq_noop_put_nacked;
37701
37702 };
37703
37704 @@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
37705 cchop_deallocate, tghop_invalidate, mcsop_last};
37706
37707 struct mcs_op_statistic {
37708 - atomic_long_t count;
37709 - atomic_long_t total;
37710 + atomic_long_unchecked_t count;
37711 + atomic_long_unchecked_t total;
37712 unsigned long max;
37713 };
37714
37715 @@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37716
37717 #define STAT(id) do { \
37718 if (gru_options & OPT_STATS) \
37719 - atomic_long_inc(&gru_stats.id); \
37720 + atomic_long_inc_unchecked(&gru_stats.id); \
37721 } while (0)
37722
37723 #ifdef CONFIG_SGI_GRU_DEBUG
37724 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
37725 index 2275126..12a9dbfb 100644
37726 --- a/drivers/misc/sgi-xp/xp.h
37727 +++ b/drivers/misc/sgi-xp/xp.h
37728 @@ -289,7 +289,7 @@ struct xpc_interface {
37729 xpc_notify_func, void *);
37730 void (*received) (short, int, void *);
37731 enum xp_retval (*partid_to_nasids) (short, void *);
37732 -};
37733 +} __no_const;
37734
37735 extern struct xpc_interface xpc_interface;
37736
37737 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
37738 index b94d5f7..7f494c5 100644
37739 --- a/drivers/misc/sgi-xp/xpc.h
37740 +++ b/drivers/misc/sgi-xp/xpc.h
37741 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
37742 void (*received_payload) (struct xpc_channel *, void *);
37743 void (*notify_senders_of_disconnect) (struct xpc_channel *);
37744 };
37745 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
37746
37747 /* struct xpc_partition act_state values (for XPC HB) */
37748
37749 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
37750 /* found in xpc_main.c */
37751 extern struct device *xpc_part;
37752 extern struct device *xpc_chan;
37753 -extern struct xpc_arch_operations xpc_arch_ops;
37754 +extern xpc_arch_operations_no_const xpc_arch_ops;
37755 extern int xpc_disengage_timelimit;
37756 extern int xpc_disengage_timedout;
37757 extern int xpc_activate_IRQ_rcvd;
37758 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
37759 index fd3688a..7e211a4 100644
37760 --- a/drivers/misc/sgi-xp/xpc_main.c
37761 +++ b/drivers/misc/sgi-xp/xpc_main.c
37762 @@ -169,7 +169,7 @@ static struct notifier_block xpc_die_notifier = {
37763 .notifier_call = xpc_system_die,
37764 };
37765
37766 -struct xpc_arch_operations xpc_arch_ops;
37767 +xpc_arch_operations_no_const xpc_arch_ops;
37768
37769 /*
37770 * Timer function to enforce the timelimit on the partition disengage.
37771 diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
37772 index 8b70e03..700bda6 100644
37773 --- a/drivers/misc/sgi-xp/xpc_sn2.c
37774 +++ b/drivers/misc/sgi-xp/xpc_sn2.c
37775 @@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
37776 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
37777 }
37778
37779 -static struct xpc_arch_operations xpc_arch_ops_sn2 = {
37780 +static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
37781 .setup_partitions = xpc_setup_partitions_sn2,
37782 .teardown_partitions = xpc_teardown_partitions_sn2,
37783 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
37784 @@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
37785 int ret;
37786 size_t buf_size;
37787
37788 - xpc_arch_ops = xpc_arch_ops_sn2;
37789 + pax_open_kernel();
37790 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
37791 + pax_close_kernel();
37792
37793 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
37794 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
37795 diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
37796 index 8e08d71..7cb8c9b 100644
37797 --- a/drivers/misc/sgi-xp/xpc_uv.c
37798 +++ b/drivers/misc/sgi-xp/xpc_uv.c
37799 @@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
37800 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
37801 }
37802
37803 -static struct xpc_arch_operations xpc_arch_ops_uv = {
37804 +static const struct xpc_arch_operations xpc_arch_ops_uv = {
37805 .setup_partitions = xpc_setup_partitions_uv,
37806 .teardown_partitions = xpc_teardown_partitions_uv,
37807 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
37808 @@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
37809 int
37810 xpc_init_uv(void)
37811 {
37812 - xpc_arch_ops = xpc_arch_ops_uv;
37813 + pax_open_kernel();
37814 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
37815 + pax_close_kernel();
37816
37817 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
37818 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
37819 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
37820 index 6fd20b42..650efe3 100644
37821 --- a/drivers/mmc/host/sdhci-pci.c
37822 +++ b/drivers/mmc/host/sdhci-pci.c
37823 @@ -297,7 +297,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
37824 .probe = via_probe,
37825 };
37826
37827 -static const struct pci_device_id pci_ids[] __devinitdata = {
37828 +static const struct pci_device_id pci_ids[] __devinitconst = {
37829 {
37830 .vendor = PCI_VENDOR_ID_RICOH,
37831 .device = PCI_DEVICE_ID_RICOH_R5C822,
37832 diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
37833 index e7563a9..5f90ce5 100644
37834 --- a/drivers/mtd/chips/cfi_cmdset_0001.c
37835 +++ b/drivers/mtd/chips/cfi_cmdset_0001.c
37836 @@ -743,6 +743,8 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
37837 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
37838 unsigned long timeo = jiffies + HZ;
37839
37840 + pax_track_stack();
37841 +
37842 /* Prevent setting state FL_SYNCING for chip in suspended state. */
37843 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
37844 goto sleep;
37845 @@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
37846 unsigned long initial_adr;
37847 int initial_len = len;
37848
37849 + pax_track_stack();
37850 +
37851 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
37852 adr += chip->start;
37853 initial_adr = adr;
37854 @@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
37855 int retries = 3;
37856 int ret;
37857
37858 + pax_track_stack();
37859 +
37860 adr += chip->start;
37861
37862 retry:
37863 diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
37864 index 0667a67..3ab97ed 100644
37865 --- a/drivers/mtd/chips/cfi_cmdset_0020.c
37866 +++ b/drivers/mtd/chips/cfi_cmdset_0020.c
37867 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
37868 unsigned long cmd_addr;
37869 struct cfi_private *cfi = map->fldrv_priv;
37870
37871 + pax_track_stack();
37872 +
37873 adr += chip->start;
37874
37875 /* Ensure cmd read/writes are aligned. */
37876 @@ -428,6 +430,8 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
37877 DECLARE_WAITQUEUE(wait, current);
37878 int wbufsize, z;
37879
37880 + pax_track_stack();
37881 +
37882 /* M58LW064A requires bus alignment for buffer wriets -- saw */
37883 if (adr & (map_bankwidth(map)-1))
37884 return -EINVAL;
37885 @@ -742,6 +746,8 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
37886 DECLARE_WAITQUEUE(wait, current);
37887 int ret = 0;
37888
37889 + pax_track_stack();
37890 +
37891 adr += chip->start;
37892
37893 /* Let's determine this according to the interleave only once */
37894 @@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
37895 unsigned long timeo = jiffies + HZ;
37896 DECLARE_WAITQUEUE(wait, current);
37897
37898 + pax_track_stack();
37899 +
37900 adr += chip->start;
37901
37902 /* Let's determine this according to the interleave only once */
37903 @@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
37904 unsigned long timeo = jiffies + HZ;
37905 DECLARE_WAITQUEUE(wait, current);
37906
37907 + pax_track_stack();
37908 +
37909 adr += chip->start;
37910
37911 /* Let's determine this according to the interleave only once */
37912 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
37913 index 5bf5f46..c5de373 100644
37914 --- a/drivers/mtd/devices/doc2000.c
37915 +++ b/drivers/mtd/devices/doc2000.c
37916 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
37917
37918 /* The ECC will not be calculated correctly if less than 512 is written */
37919 /* DBB-
37920 - if (len != 0x200 && eccbuf)
37921 + if (len != 0x200)
37922 printk(KERN_WARNING
37923 "ECC needs a full sector write (adr: %lx size %lx)\n",
37924 (long) to, (long) len);
37925 diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
37926 index 0990f78..bb4e8a4 100644
37927 --- a/drivers/mtd/devices/doc2001.c
37928 +++ b/drivers/mtd/devices/doc2001.c
37929 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
37930 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
37931
37932 /* Don't allow read past end of device */
37933 - if (from >= this->totlen)
37934 + if (from >= this->totlen || !len)
37935 return -EINVAL;
37936
37937 /* Don't allow a single read to cross a 512-byte block boundary */
37938 diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
37939 index e56d6b4..f07e6cf 100644
37940 --- a/drivers/mtd/ftl.c
37941 +++ b/drivers/mtd/ftl.c
37942 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
37943 loff_t offset;
37944 uint16_t srcunitswap = cpu_to_le16(srcunit);
37945
37946 + pax_track_stack();
37947 +
37948 eun = &part->EUNInfo[srcunit];
37949 xfer = &part->XferInfo[xferunit];
37950 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
37951 diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
37952 index 8aca552..146446e 100755
37953 --- a/drivers/mtd/inftlcore.c
37954 +++ b/drivers/mtd/inftlcore.c
37955 @@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
37956 struct inftl_oob oob;
37957 size_t retlen;
37958
37959 + pax_track_stack();
37960 +
37961 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
37962 "pending=%d)\n", inftl, thisVUC, pendingblock);
37963
37964 diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
37965 index 32e82ae..ed50953 100644
37966 --- a/drivers/mtd/inftlmount.c
37967 +++ b/drivers/mtd/inftlmount.c
37968 @@ -54,6 +54,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
37969 struct INFTLPartition *ip;
37970 size_t retlen;
37971
37972 + pax_track_stack();
37973 +
37974 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
37975
37976 /*
37977 diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
37978 index 79bf40f..fe5f8fd 100644
37979 --- a/drivers/mtd/lpddr/qinfo_probe.c
37980 +++ b/drivers/mtd/lpddr/qinfo_probe.c
37981 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
37982 {
37983 map_word pfow_val[4];
37984
37985 + pax_track_stack();
37986 +
37987 /* Check identification string */
37988 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
37989 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
37990 diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
37991 index 726a1b8..f46b460 100644
37992 --- a/drivers/mtd/mtdchar.c
37993 +++ b/drivers/mtd/mtdchar.c
37994 @@ -461,6 +461,8 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
37995 u_long size;
37996 struct mtd_info_user info;
37997
37998 + pax_track_stack();
37999 +
38000 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
38001
38002 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
38003 diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
38004 index 1002e18..26d82d5 100644
38005 --- a/drivers/mtd/nftlcore.c
38006 +++ b/drivers/mtd/nftlcore.c
38007 @@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
38008 int inplace = 1;
38009 size_t retlen;
38010
38011 + pax_track_stack();
38012 +
38013 memset(BlockMap, 0xff, sizeof(BlockMap));
38014 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
38015
38016 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
38017 index 8b22b18..6fada85 100644
38018 --- a/drivers/mtd/nftlmount.c
38019 +++ b/drivers/mtd/nftlmount.c
38020 @@ -23,6 +23,7 @@
38021 #include <asm/errno.h>
38022 #include <linux/delay.h>
38023 #include <linux/slab.h>
38024 +#include <linux/sched.h>
38025 #include <linux/mtd/mtd.h>
38026 #include <linux/mtd/nand.h>
38027 #include <linux/mtd/nftl.h>
38028 @@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
38029 struct mtd_info *mtd = nftl->mbd.mtd;
38030 unsigned int i;
38031
38032 + pax_track_stack();
38033 +
38034 /* Assume logical EraseSize == physical erasesize for starting the scan.
38035 We'll sort it out later if we find a MediaHeader which says otherwise */
38036 /* Actually, we won't. The new DiskOnChip driver has already scanned
38037 diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
38038 index 14cec04..d775b87 100644
38039 --- a/drivers/mtd/ubi/build.c
38040 +++ b/drivers/mtd/ubi/build.c
38041 @@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
38042 static int __init bytes_str_to_int(const char *str)
38043 {
38044 char *endp;
38045 - unsigned long result;
38046 + unsigned long result, scale = 1;
38047
38048 result = simple_strtoul(str, &endp, 0);
38049 if (str == endp || result >= INT_MAX) {
38050 @@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const char *str)
38051
38052 switch (*endp) {
38053 case 'G':
38054 - result *= 1024;
38055 + scale *= 1024;
38056 case 'M':
38057 - result *= 1024;
38058 + scale *= 1024;
38059 case 'K':
38060 - result *= 1024;
38061 + scale *= 1024;
38062 if (endp[1] == 'i' && endp[2] == 'B')
38063 endp += 2;
38064 case '\0':
38065 @@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const char *str)
38066 return -EINVAL;
38067 }
38068
38069 - return result;
38070 + if ((intoverflow_t)result*scale >= INT_MAX) {
38071 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
38072 + str);
38073 + return -EINVAL;
38074 + }
38075 +
38076 + return result*scale;
38077 }
38078
38079 /**
38080 diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
38081 index ab68886..ca405e8 100644
38082 --- a/drivers/net/atlx/atl2.c
38083 +++ b/drivers/net/atlx/atl2.c
38084 @@ -2845,7 +2845,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
38085 */
38086
38087 #define ATL2_PARAM(X, desc) \
38088 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
38089 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
38090 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
38091 MODULE_PARM_DESC(X, desc);
38092 #else
38093 diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
38094 index 4874b2b..67f8526 100644
38095 --- a/drivers/net/bnx2.c
38096 +++ b/drivers/net/bnx2.c
38097 @@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
38098 int rc = 0;
38099 u32 magic, csum;
38100
38101 + pax_track_stack();
38102 +
38103 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
38104 goto test_nvram_done;
38105
38106 diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
38107 index fd3eb07..8a6978d 100644
38108 --- a/drivers/net/cxgb3/l2t.h
38109 +++ b/drivers/net/cxgb3/l2t.h
38110 @@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
38111 */
38112 struct l2t_skb_cb {
38113 arp_failure_handler_func arp_failure_handler;
38114 -};
38115 +} __no_const;
38116
38117 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
38118
38119 diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
38120 index 032cfe0..411af379 100644
38121 --- a/drivers/net/cxgb3/t3_hw.c
38122 +++ b/drivers/net/cxgb3/t3_hw.c
38123 @@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
38124 int i, addr, ret;
38125 struct t3_vpd vpd;
38126
38127 + pax_track_stack();
38128 +
38129 /*
38130 * Card information is normally at VPD_BASE but some early cards had
38131 * it at 0.
38132 diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
38133 index d1e0563..b9e129c 100644
38134 --- a/drivers/net/e1000e/82571.c
38135 +++ b/drivers/net/e1000e/82571.c
38136 @@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
38137 {
38138 struct e1000_hw *hw = &adapter->hw;
38139 struct e1000_mac_info *mac = &hw->mac;
38140 - struct e1000_mac_operations *func = &mac->ops;
38141 + e1000_mac_operations_no_const *func = &mac->ops;
38142 u32 swsm = 0;
38143 u32 swsm2 = 0;
38144 bool force_clear_smbi = false;
38145 @@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
38146 temp = er32(ICRXDMTC);
38147 }
38148
38149 -static struct e1000_mac_operations e82571_mac_ops = {
38150 +static const struct e1000_mac_operations e82571_mac_ops = {
38151 /* .check_mng_mode: mac type dependent */
38152 /* .check_for_link: media type dependent */
38153 .id_led_init = e1000e_id_led_init,
38154 @@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
38155 .setup_led = e1000e_setup_led_generic,
38156 };
38157
38158 -static struct e1000_phy_operations e82_phy_ops_igp = {
38159 +static const struct e1000_phy_operations e82_phy_ops_igp = {
38160 .acquire_phy = e1000_get_hw_semaphore_82571,
38161 .check_reset_block = e1000e_check_reset_block_generic,
38162 .commit_phy = NULL,
38163 @@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_phy_ops_igp = {
38164 .cfg_on_link_up = NULL,
38165 };
38166
38167 -static struct e1000_phy_operations e82_phy_ops_m88 = {
38168 +static const struct e1000_phy_operations e82_phy_ops_m88 = {
38169 .acquire_phy = e1000_get_hw_semaphore_82571,
38170 .check_reset_block = e1000e_check_reset_block_generic,
38171 .commit_phy = e1000e_phy_sw_reset,
38172 @@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
38173 .cfg_on_link_up = NULL,
38174 };
38175
38176 -static struct e1000_phy_operations e82_phy_ops_bm = {
38177 +static const struct e1000_phy_operations e82_phy_ops_bm = {
38178 .acquire_phy = e1000_get_hw_semaphore_82571,
38179 .check_reset_block = e1000e_check_reset_block_generic,
38180 .commit_phy = e1000e_phy_sw_reset,
38181 @@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_phy_ops_bm = {
38182 .cfg_on_link_up = NULL,
38183 };
38184
38185 -static struct e1000_nvm_operations e82571_nvm_ops = {
38186 +static const struct e1000_nvm_operations e82571_nvm_ops = {
38187 .acquire_nvm = e1000_acquire_nvm_82571,
38188 .read_nvm = e1000e_read_nvm_eerd,
38189 .release_nvm = e1000_release_nvm_82571,
38190 diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
38191 index 47db9bd..fa58ccd 100644
38192 --- a/drivers/net/e1000e/e1000.h
38193 +++ b/drivers/net/e1000e/e1000.h
38194 @@ -375,9 +375,9 @@ struct e1000_info {
38195 u32 pba;
38196 u32 max_hw_frame_size;
38197 s32 (*get_variants)(struct e1000_adapter *);
38198 - struct e1000_mac_operations *mac_ops;
38199 - struct e1000_phy_operations *phy_ops;
38200 - struct e1000_nvm_operations *nvm_ops;
38201 + const struct e1000_mac_operations *mac_ops;
38202 + const struct e1000_phy_operations *phy_ops;
38203 + const struct e1000_nvm_operations *nvm_ops;
38204 };
38205
38206 /* hardware capability, feature, and workaround flags */
38207 diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
38208 index ae5d736..e9a93a1 100644
38209 --- a/drivers/net/e1000e/es2lan.c
38210 +++ b/drivers/net/e1000e/es2lan.c
38211 @@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
38212 {
38213 struct e1000_hw *hw = &adapter->hw;
38214 struct e1000_mac_info *mac = &hw->mac;
38215 - struct e1000_mac_operations *func = &mac->ops;
38216 + e1000_mac_operations_no_const *func = &mac->ops;
38217
38218 /* Set media type */
38219 switch (adapter->pdev->device) {
38220 @@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
38221 temp = er32(ICRXDMTC);
38222 }
38223
38224 -static struct e1000_mac_operations es2_mac_ops = {
38225 +static const struct e1000_mac_operations es2_mac_ops = {
38226 .id_led_init = e1000e_id_led_init,
38227 .check_mng_mode = e1000e_check_mng_mode_generic,
38228 /* check_for_link dependent on media type */
38229 @@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_mac_ops = {
38230 .setup_led = e1000e_setup_led_generic,
38231 };
38232
38233 -static struct e1000_phy_operations es2_phy_ops = {
38234 +static const struct e1000_phy_operations es2_phy_ops = {
38235 .acquire_phy = e1000_acquire_phy_80003es2lan,
38236 .check_reset_block = e1000e_check_reset_block_generic,
38237 .commit_phy = e1000e_phy_sw_reset,
38238 @@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_phy_ops = {
38239 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
38240 };
38241
38242 -static struct e1000_nvm_operations es2_nvm_ops = {
38243 +static const struct e1000_nvm_operations es2_nvm_ops = {
38244 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
38245 .read_nvm = e1000e_read_nvm_eerd,
38246 .release_nvm = e1000_release_nvm_80003es2lan,
38247 diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
38248 index 11f3b7c..6381887 100644
38249 --- a/drivers/net/e1000e/hw.h
38250 +++ b/drivers/net/e1000e/hw.h
38251 @@ -753,6 +753,7 @@ struct e1000_mac_operations {
38252 s32 (*setup_physical_interface)(struct e1000_hw *);
38253 s32 (*setup_led)(struct e1000_hw *);
38254 };
38255 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38256
38257 /* Function pointers for the PHY. */
38258 struct e1000_phy_operations {
38259 @@ -774,6 +775,7 @@ struct e1000_phy_operations {
38260 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
38261 s32 (*cfg_on_link_up)(struct e1000_hw *);
38262 };
38263 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
38264
38265 /* Function pointers for the NVM. */
38266 struct e1000_nvm_operations {
38267 @@ -785,9 +787,10 @@ struct e1000_nvm_operations {
38268 s32 (*validate_nvm)(struct e1000_hw *);
38269 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
38270 };
38271 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
38272
38273 struct e1000_mac_info {
38274 - struct e1000_mac_operations ops;
38275 + e1000_mac_operations_no_const ops;
38276
38277 u8 addr[6];
38278 u8 perm_addr[6];
38279 @@ -823,7 +826,7 @@ struct e1000_mac_info {
38280 };
38281
38282 struct e1000_phy_info {
38283 - struct e1000_phy_operations ops;
38284 + e1000_phy_operations_no_const ops;
38285
38286 enum e1000_phy_type type;
38287
38288 @@ -857,7 +860,7 @@ struct e1000_phy_info {
38289 };
38290
38291 struct e1000_nvm_info {
38292 - struct e1000_nvm_operations ops;
38293 + e1000_nvm_operations_no_const ops;
38294
38295 enum e1000_nvm_type type;
38296 enum e1000_nvm_override override;
38297 diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
38298 index de39f9a..e28d3e0 100644
38299 --- a/drivers/net/e1000e/ich8lan.c
38300 +++ b/drivers/net/e1000e/ich8lan.c
38301 @@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
38302 }
38303 }
38304
38305 -static struct e1000_mac_operations ich8_mac_ops = {
38306 +static const struct e1000_mac_operations ich8_mac_ops = {
38307 .id_led_init = e1000e_id_led_init,
38308 .check_mng_mode = e1000_check_mng_mode_ich8lan,
38309 .check_for_link = e1000_check_for_copper_link_ich8lan,
38310 @@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
38311 /* id_led_init dependent on mac type */
38312 };
38313
38314 -static struct e1000_phy_operations ich8_phy_ops = {
38315 +static const struct e1000_phy_operations ich8_phy_ops = {
38316 .acquire_phy = e1000_acquire_swflag_ich8lan,
38317 .check_reset_block = e1000_check_reset_block_ich8lan,
38318 .commit_phy = NULL,
38319 @@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_phy_ops = {
38320 .write_phy_reg = e1000e_write_phy_reg_igp,
38321 };
38322
38323 -static struct e1000_nvm_operations ich8_nvm_ops = {
38324 +static const struct e1000_nvm_operations ich8_nvm_ops = {
38325 .acquire_nvm = e1000_acquire_nvm_ich8lan,
38326 .read_nvm = e1000_read_nvm_ich8lan,
38327 .release_nvm = e1000_release_nvm_ich8lan,
38328 diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
38329 index 18d5fbb..542d96d 100644
38330 --- a/drivers/net/fealnx.c
38331 +++ b/drivers/net/fealnx.c
38332 @@ -151,7 +151,7 @@ struct chip_info {
38333 int flags;
38334 };
38335
38336 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
38337 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
38338 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
38339 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
38340 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
38341 diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
38342 index 0e5b54b..b503f82 100644
38343 --- a/drivers/net/hamradio/6pack.c
38344 +++ b/drivers/net/hamradio/6pack.c
38345 @@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct tty_struct *tty,
38346 unsigned char buf[512];
38347 int count1;
38348
38349 + pax_track_stack();
38350 +
38351 if (!count)
38352 return;
38353
38354 diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
38355 index 5862282..7cce8cb 100644
38356 --- a/drivers/net/ibmveth.c
38357 +++ b/drivers/net/ibmveth.c
38358 @@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attrs[] = {
38359 NULL,
38360 };
38361
38362 -static struct sysfs_ops veth_pool_ops = {
38363 +static const struct sysfs_ops veth_pool_ops = {
38364 .show = veth_pool_show,
38365 .store = veth_pool_store,
38366 };
38367 diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
38368 index d617f2d..57b5309 100644
38369 --- a/drivers/net/igb/e1000_82575.c
38370 +++ b/drivers/net/igb/e1000_82575.c
38371 @@ -1411,7 +1411,7 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
38372 wr32(E1000_VT_CTL, vt_ctl);
38373 }
38374
38375 -static struct e1000_mac_operations e1000_mac_ops_82575 = {
38376 +static const struct e1000_mac_operations e1000_mac_ops_82575 = {
38377 .reset_hw = igb_reset_hw_82575,
38378 .init_hw = igb_init_hw_82575,
38379 .check_for_link = igb_check_for_link_82575,
38380 @@ -1420,13 +1420,13 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
38381 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
38382 };
38383
38384 -static struct e1000_phy_operations e1000_phy_ops_82575 = {
38385 +static const struct e1000_phy_operations e1000_phy_ops_82575 = {
38386 .acquire = igb_acquire_phy_82575,
38387 .get_cfg_done = igb_get_cfg_done_82575,
38388 .release = igb_release_phy_82575,
38389 };
38390
38391 -static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
38392 +static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
38393 .acquire = igb_acquire_nvm_82575,
38394 .read = igb_read_nvm_eerd,
38395 .release = igb_release_nvm_82575,
38396 diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
38397 index 72081df..d855cf5 100644
38398 --- a/drivers/net/igb/e1000_hw.h
38399 +++ b/drivers/net/igb/e1000_hw.h
38400 @@ -288,6 +288,7 @@ struct e1000_mac_operations {
38401 s32 (*read_mac_addr)(struct e1000_hw *);
38402 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
38403 };
38404 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38405
38406 struct e1000_phy_operations {
38407 s32 (*acquire)(struct e1000_hw *);
38408 @@ -303,6 +304,7 @@ struct e1000_phy_operations {
38409 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
38410 s32 (*write_reg)(struct e1000_hw *, u32, u16);
38411 };
38412 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
38413
38414 struct e1000_nvm_operations {
38415 s32 (*acquire)(struct e1000_hw *);
38416 @@ -310,6 +312,7 @@ struct e1000_nvm_operations {
38417 void (*release)(struct e1000_hw *);
38418 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
38419 };
38420 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
38421
38422 struct e1000_info {
38423 s32 (*get_invariants)(struct e1000_hw *);
38424 @@ -321,7 +324,7 @@ struct e1000_info {
38425 extern const struct e1000_info e1000_82575_info;
38426
38427 struct e1000_mac_info {
38428 - struct e1000_mac_operations ops;
38429 + e1000_mac_operations_no_const ops;
38430
38431 u8 addr[6];
38432 u8 perm_addr[6];
38433 @@ -365,7 +368,7 @@ struct e1000_mac_info {
38434 };
38435
38436 struct e1000_phy_info {
38437 - struct e1000_phy_operations ops;
38438 + e1000_phy_operations_no_const ops;
38439
38440 enum e1000_phy_type type;
38441
38442 @@ -400,7 +403,7 @@ struct e1000_phy_info {
38443 };
38444
38445 struct e1000_nvm_info {
38446 - struct e1000_nvm_operations ops;
38447 + e1000_nvm_operations_no_const ops;
38448
38449 enum e1000_nvm_type type;
38450 enum e1000_nvm_override override;
38451 @@ -446,6 +449,7 @@ struct e1000_mbx_operations {
38452 s32 (*check_for_ack)(struct e1000_hw *, u16);
38453 s32 (*check_for_rst)(struct e1000_hw *, u16);
38454 };
38455 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
38456
38457 struct e1000_mbx_stats {
38458 u32 msgs_tx;
38459 @@ -457,7 +461,7 @@ struct e1000_mbx_stats {
38460 };
38461
38462 struct e1000_mbx_info {
38463 - struct e1000_mbx_operations ops;
38464 + e1000_mbx_operations_no_const ops;
38465 struct e1000_mbx_stats stats;
38466 u32 timeout;
38467 u32 usec_delay;
38468 diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
38469 index 1e8ce37..549c453 100644
38470 --- a/drivers/net/igbvf/vf.h
38471 +++ b/drivers/net/igbvf/vf.h
38472 @@ -187,9 +187,10 @@ struct e1000_mac_operations {
38473 s32 (*read_mac_addr)(struct e1000_hw *);
38474 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
38475 };
38476 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38477
38478 struct e1000_mac_info {
38479 - struct e1000_mac_operations ops;
38480 + e1000_mac_operations_no_const ops;
38481 u8 addr[6];
38482 u8 perm_addr[6];
38483
38484 @@ -211,6 +212,7 @@ struct e1000_mbx_operations {
38485 s32 (*check_for_ack)(struct e1000_hw *);
38486 s32 (*check_for_rst)(struct e1000_hw *);
38487 };
38488 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
38489
38490 struct e1000_mbx_stats {
38491 u32 msgs_tx;
38492 @@ -222,7 +224,7 @@ struct e1000_mbx_stats {
38493 };
38494
38495 struct e1000_mbx_info {
38496 - struct e1000_mbx_operations ops;
38497 + e1000_mbx_operations_no_const ops;
38498 struct e1000_mbx_stats stats;
38499 u32 timeout;
38500 u32 usec_delay;
38501 diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
38502 index aa7286b..a61394f 100644
38503 --- a/drivers/net/iseries_veth.c
38504 +++ b/drivers/net/iseries_veth.c
38505 @@ -384,7 +384,7 @@ static struct attribute *veth_cnx_default_attrs[] = {
38506 NULL
38507 };
38508
38509 -static struct sysfs_ops veth_cnx_sysfs_ops = {
38510 +static const struct sysfs_ops veth_cnx_sysfs_ops = {
38511 .show = veth_cnx_attribute_show
38512 };
38513
38514 @@ -441,7 +441,7 @@ static struct attribute *veth_port_default_attrs[] = {
38515 NULL
38516 };
38517
38518 -static struct sysfs_ops veth_port_sysfs_ops = {
38519 +static const struct sysfs_ops veth_port_sysfs_ops = {
38520 .show = veth_port_attribute_show
38521 };
38522
38523 diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
38524 index 8aa44dc..fa1e797 100644
38525 --- a/drivers/net/ixgb/ixgb_main.c
38526 +++ b/drivers/net/ixgb/ixgb_main.c
38527 @@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev)
38528 u32 rctl;
38529 int i;
38530
38531 + pax_track_stack();
38532 +
38533 /* Check for Promiscuous and All Multicast modes */
38534
38535 rctl = IXGB_READ_REG(hw, RCTL);
38536 diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
38537 index af35e1d..8781785 100644
38538 --- a/drivers/net/ixgb/ixgb_param.c
38539 +++ b/drivers/net/ixgb/ixgb_param.c
38540 @@ -260,6 +260,9 @@ void __devinit
38541 ixgb_check_options(struct ixgb_adapter *adapter)
38542 {
38543 int bd = adapter->bd_number;
38544 +
38545 + pax_track_stack();
38546 +
38547 if (bd >= IXGB_MAX_NIC) {
38548 printk(KERN_NOTICE
38549 "Warning: no configuration for board #%i\n", bd);
38550 diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
38551 index b17aa73..ed74540 100644
38552 --- a/drivers/net/ixgbe/ixgbe_type.h
38553 +++ b/drivers/net/ixgbe/ixgbe_type.h
38554 @@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations {
38555 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
38556 s32 (*update_checksum)(struct ixgbe_hw *);
38557 };
38558 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
38559
38560 struct ixgbe_mac_operations {
38561 s32 (*init_hw)(struct ixgbe_hw *);
38562 @@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations {
38563 /* Flow Control */
38564 s32 (*fc_enable)(struct ixgbe_hw *, s32);
38565 };
38566 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
38567
38568 struct ixgbe_phy_operations {
38569 s32 (*identify)(struct ixgbe_hw *);
38570 @@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations {
38571 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
38572 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
38573 };
38574 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
38575
38576 struct ixgbe_eeprom_info {
38577 - struct ixgbe_eeprom_operations ops;
38578 + ixgbe_eeprom_operations_no_const ops;
38579 enum ixgbe_eeprom_type type;
38580 u32 semaphore_delay;
38581 u16 word_size;
38582 @@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info {
38583 };
38584
38585 struct ixgbe_mac_info {
38586 - struct ixgbe_mac_operations ops;
38587 + ixgbe_mac_operations_no_const ops;
38588 enum ixgbe_mac_type type;
38589 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38590 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38591 @@ -2423,7 +2426,7 @@ struct ixgbe_mac_info {
38592 };
38593
38594 struct ixgbe_phy_info {
38595 - struct ixgbe_phy_operations ops;
38596 + ixgbe_phy_operations_no_const ops;
38597 struct mdio_if_info mdio;
38598 enum ixgbe_phy_type type;
38599 u32 id;
38600 diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
38601 index 291a505..2543756 100644
38602 --- a/drivers/net/mlx4/main.c
38603 +++ b/drivers/net/mlx4/main.c
38604 @@ -38,6 +38,7 @@
38605 #include <linux/errno.h>
38606 #include <linux/pci.h>
38607 #include <linux/dma-mapping.h>
38608 +#include <linux/sched.h>
38609
38610 #include <linux/mlx4/device.h>
38611 #include <linux/mlx4/doorbell.h>
38612 @@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
38613 u64 icm_size;
38614 int err;
38615
38616 + pax_track_stack();
38617 +
38618 err = mlx4_QUERY_FW(dev);
38619 if (err) {
38620 if (err == -EACCES)
38621 diff --git a/drivers/net/niu.c b/drivers/net/niu.c
38622 index 2dce134..fa5ce75 100644
38623 --- a/drivers/net/niu.c
38624 +++ b/drivers/net/niu.c
38625 @@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
38626 int i, num_irqs, err;
38627 u8 first_ldg;
38628
38629 + pax_track_stack();
38630 +
38631 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
38632 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
38633 ldg_num_map[i] = first_ldg + i;
38634 diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
38635 index c1b3f09..97cd8c4 100644
38636 --- a/drivers/net/pcnet32.c
38637 +++ b/drivers/net/pcnet32.c
38638 @@ -79,7 +79,7 @@ static int cards_found;
38639 /*
38640 * VLB I/O addresses
38641 */
38642 -static unsigned int pcnet32_portlist[] __initdata =
38643 +static unsigned int pcnet32_portlist[] __devinitdata =
38644 { 0x300, 0x320, 0x340, 0x360, 0 };
38645
38646 static int pcnet32_debug = 0;
38647 @@ -267,7 +267,7 @@ struct pcnet32_private {
38648 struct sk_buff **rx_skbuff;
38649 dma_addr_t *tx_dma_addr;
38650 dma_addr_t *rx_dma_addr;
38651 - struct pcnet32_access a;
38652 + struct pcnet32_access *a;
38653 spinlock_t lock; /* Guard lock */
38654 unsigned int cur_rx, cur_tx; /* The next free ring entry */
38655 unsigned int rx_ring_size; /* current rx ring size */
38656 @@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct net_device *dev)
38657 u16 val;
38658
38659 netif_wake_queue(dev);
38660 - val = lp->a.read_csr(ioaddr, CSR3);
38661 + val = lp->a->read_csr(ioaddr, CSR3);
38662 val &= 0x00ff;
38663 - lp->a.write_csr(ioaddr, CSR3, val);
38664 + lp->a->write_csr(ioaddr, CSR3, val);
38665 napi_enable(&lp->napi);
38666 }
38667
38668 @@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_device *dev)
38669 r = mii_link_ok(&lp->mii_if);
38670 } else if (lp->chip_version >= PCNET32_79C970A) {
38671 ulong ioaddr = dev->base_addr; /* card base I/O address */
38672 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
38673 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
38674 } else { /* can not detect link on really old chips */
38675 r = 1;
38676 }
38677 @@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
38678 pcnet32_netif_stop(dev);
38679
38680 spin_lock_irqsave(&lp->lock, flags);
38681 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38682 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38683
38684 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
38685
38686 @@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct net_device *dev,
38687 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38688 {
38689 struct pcnet32_private *lp = netdev_priv(dev);
38690 - struct pcnet32_access *a = &lp->a; /* access to registers */
38691 + struct pcnet32_access *a = lp->a; /* access to registers */
38692 ulong ioaddr = dev->base_addr; /* card base I/O address */
38693 struct sk_buff *skb; /* sk buff */
38694 int x, i; /* counters */
38695 @@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38696 pcnet32_netif_stop(dev);
38697
38698 spin_lock_irqsave(&lp->lock, flags);
38699 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38700 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38701
38702 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
38703
38704 /* Reset the PCNET32 */
38705 - lp->a.reset(ioaddr);
38706 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38707 + lp->a->reset(ioaddr);
38708 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38709
38710 /* switch pcnet32 to 32bit mode */
38711 - lp->a.write_bcr(ioaddr, 20, 2);
38712 + lp->a->write_bcr(ioaddr, 20, 2);
38713
38714 /* purge & init rings but don't actually restart */
38715 pcnet32_restart(dev, 0x0000);
38716
38717 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38718 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38719
38720 /* Initialize Transmit buffers. */
38721 size = data_len + 15;
38722 @@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38723
38724 /* set int loopback in CSR15 */
38725 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
38726 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
38727 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
38728
38729 teststatus = cpu_to_le16(0x8000);
38730 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38731 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38732
38733 /* Check status of descriptors */
38734 for (x = 0; x < numbuffs; x++) {
38735 @@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38736 }
38737 }
38738
38739 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38740 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38741 wmb();
38742 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
38743 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
38744 @@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38745 pcnet32_restart(dev, CSR0_NORMAL);
38746 } else {
38747 pcnet32_purge_rx_ring(dev);
38748 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38749 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38750 }
38751 spin_unlock_irqrestore(&lp->lock, flags);
38752
38753 @@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38754 static void pcnet32_led_blink_callback(struct net_device *dev)
38755 {
38756 struct pcnet32_private *lp = netdev_priv(dev);
38757 - struct pcnet32_access *a = &lp->a;
38758 + struct pcnet32_access *a = lp->a;
38759 ulong ioaddr = dev->base_addr;
38760 unsigned long flags;
38761 int i;
38762 @@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(struct net_device *dev)
38763 static int pcnet32_phys_id(struct net_device *dev, u32 data)
38764 {
38765 struct pcnet32_private *lp = netdev_priv(dev);
38766 - struct pcnet32_access *a = &lp->a;
38767 + struct pcnet32_access *a = lp->a;
38768 ulong ioaddr = dev->base_addr;
38769 unsigned long flags;
38770 int i, regs[4];
38771 @@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
38772 {
38773 int csr5;
38774 struct pcnet32_private *lp = netdev_priv(dev);
38775 - struct pcnet32_access *a = &lp->a;
38776 + struct pcnet32_access *a = lp->a;
38777 ulong ioaddr = dev->base_addr;
38778 int ticks;
38779
38780 @@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38781 spin_lock_irqsave(&lp->lock, flags);
38782 if (pcnet32_tx(dev)) {
38783 /* reset the chip to clear the error condition, then restart */
38784 - lp->a.reset(ioaddr);
38785 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38786 + lp->a->reset(ioaddr);
38787 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38788 pcnet32_restart(dev, CSR0_START);
38789 netif_wake_queue(dev);
38790 }
38791 @@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38792 __napi_complete(napi);
38793
38794 /* clear interrupt masks */
38795 - val = lp->a.read_csr(ioaddr, CSR3);
38796 + val = lp->a->read_csr(ioaddr, CSR3);
38797 val &= 0x00ff;
38798 - lp->a.write_csr(ioaddr, CSR3, val);
38799 + lp->a->write_csr(ioaddr, CSR3, val);
38800
38801 /* Set interrupt enable. */
38802 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
38803 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
38804
38805 spin_unlock_irqrestore(&lp->lock, flags);
38806 }
38807 @@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38808 int i, csr0;
38809 u16 *buff = ptr;
38810 struct pcnet32_private *lp = netdev_priv(dev);
38811 - struct pcnet32_access *a = &lp->a;
38812 + struct pcnet32_access *a = lp->a;
38813 ulong ioaddr = dev->base_addr;
38814 unsigned long flags;
38815
38816 @@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38817 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
38818 if (lp->phymask & (1 << j)) {
38819 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
38820 - lp->a.write_bcr(ioaddr, 33,
38821 + lp->a->write_bcr(ioaddr, 33,
38822 (j << 5) | i);
38823 - *buff++ = lp->a.read_bcr(ioaddr, 34);
38824 + *buff++ = lp->a->read_bcr(ioaddr, 34);
38825 }
38826 }
38827 }
38828 @@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38829 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
38830 lp->options |= PCNET32_PORT_FD;
38831
38832 - lp->a = *a;
38833 + lp->a = a;
38834
38835 /* prior to register_netdev, dev->name is not yet correct */
38836 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
38837 @@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38838 if (lp->mii) {
38839 /* lp->phycount and lp->phymask are set to 0 by memset above */
38840
38841 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38842 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38843 /* scan for PHYs */
38844 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38845 unsigned short id1, id2;
38846 @@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38847 "Found PHY %04x:%04x at address %d.\n",
38848 id1, id2, i);
38849 }
38850 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38851 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38852 if (lp->phycount > 1) {
38853 lp->options |= PCNET32_PORT_MII;
38854 }
38855 @@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_device *dev)
38856 }
38857
38858 /* Reset the PCNET32 */
38859 - lp->a.reset(ioaddr);
38860 + lp->a->reset(ioaddr);
38861
38862 /* switch pcnet32 to 32bit mode */
38863 - lp->a.write_bcr(ioaddr, 20, 2);
38864 + lp->a->write_bcr(ioaddr, 20, 2);
38865
38866 if (netif_msg_ifup(lp))
38867 printk(KERN_DEBUG
38868 @@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_device *dev)
38869 (u32) (lp->init_dma_addr));
38870
38871 /* set/reset autoselect bit */
38872 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
38873 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
38874 if (lp->options & PCNET32_PORT_ASEL)
38875 val |= 2;
38876 - lp->a.write_bcr(ioaddr, 2, val);
38877 + lp->a->write_bcr(ioaddr, 2, val);
38878
38879 /* handle full duplex setting */
38880 if (lp->mii_if.full_duplex) {
38881 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
38882 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
38883 if (lp->options & PCNET32_PORT_FD) {
38884 val |= 1;
38885 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
38886 @@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_device *dev)
38887 if (lp->chip_version == 0x2627)
38888 val |= 3;
38889 }
38890 - lp->a.write_bcr(ioaddr, 9, val);
38891 + lp->a->write_bcr(ioaddr, 9, val);
38892 }
38893
38894 /* set/reset GPSI bit in test register */
38895 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
38896 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
38897 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
38898 val |= 0x10;
38899 - lp->a.write_csr(ioaddr, 124, val);
38900 + lp->a->write_csr(ioaddr, 124, val);
38901
38902 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
38903 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
38904 @@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_device *dev)
38905 * duplex, and/or enable auto negotiation, and clear DANAS
38906 */
38907 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
38908 - lp->a.write_bcr(ioaddr, 32,
38909 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
38910 + lp->a->write_bcr(ioaddr, 32,
38911 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
38912 /* disable Auto Negotiation, set 10Mpbs, HD */
38913 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
38914 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
38915 if (lp->options & PCNET32_PORT_FD)
38916 val |= 0x10;
38917 if (lp->options & PCNET32_PORT_100)
38918 val |= 0x08;
38919 - lp->a.write_bcr(ioaddr, 32, val);
38920 + lp->a->write_bcr(ioaddr, 32, val);
38921 } else {
38922 if (lp->options & PCNET32_PORT_ASEL) {
38923 - lp->a.write_bcr(ioaddr, 32,
38924 - lp->a.read_bcr(ioaddr,
38925 + lp->a->write_bcr(ioaddr, 32,
38926 + lp->a->read_bcr(ioaddr,
38927 32) | 0x0080);
38928 /* enable auto negotiate, setup, disable fd */
38929 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
38930 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
38931 val |= 0x20;
38932 - lp->a.write_bcr(ioaddr, 32, val);
38933 + lp->a->write_bcr(ioaddr, 32, val);
38934 }
38935 }
38936 } else {
38937 @@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_device *dev)
38938 * There is really no good other way to handle multiple PHYs
38939 * other than turning off all automatics
38940 */
38941 - val = lp->a.read_bcr(ioaddr, 2);
38942 - lp->a.write_bcr(ioaddr, 2, val & ~2);
38943 - val = lp->a.read_bcr(ioaddr, 32);
38944 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38945 + val = lp->a->read_bcr(ioaddr, 2);
38946 + lp->a->write_bcr(ioaddr, 2, val & ~2);
38947 + val = lp->a->read_bcr(ioaddr, 32);
38948 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38949
38950 if (!(lp->options & PCNET32_PORT_ASEL)) {
38951 /* setup ecmd */
38952 @@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_device *dev)
38953 ecmd.speed =
38954 lp->
38955 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
38956 - bcr9 = lp->a.read_bcr(ioaddr, 9);
38957 + bcr9 = lp->a->read_bcr(ioaddr, 9);
38958
38959 if (lp->options & PCNET32_PORT_FD) {
38960 ecmd.duplex = DUPLEX_FULL;
38961 @@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_device *dev)
38962 ecmd.duplex = DUPLEX_HALF;
38963 bcr9 |= ~(1 << 0);
38964 }
38965 - lp->a.write_bcr(ioaddr, 9, bcr9);
38966 + lp->a->write_bcr(ioaddr, 9, bcr9);
38967 }
38968
38969 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38970 @@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_device *dev)
38971
38972 #ifdef DO_DXSUFLO
38973 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
38974 - val = lp->a.read_csr(ioaddr, CSR3);
38975 + val = lp->a->read_csr(ioaddr, CSR3);
38976 val |= 0x40;
38977 - lp->a.write_csr(ioaddr, CSR3, val);
38978 + lp->a->write_csr(ioaddr, CSR3, val);
38979 }
38980 #endif
38981
38982 @@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_device *dev)
38983 napi_enable(&lp->napi);
38984
38985 /* Re-initialize the PCNET32, and start it when done. */
38986 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
38987 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
38988 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
38989 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
38990
38991 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38992 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
38993 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38994 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
38995
38996 netif_start_queue(dev);
38997
38998 @@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_device *dev)
38999
39000 i = 0;
39001 while (i++ < 100)
39002 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
39003 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
39004 break;
39005 /*
39006 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
39007 * reports that doing so triggers a bug in the '974.
39008 */
39009 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
39010 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
39011
39012 if (netif_msg_ifup(lp))
39013 printk(KERN_DEBUG
39014 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
39015 dev->name, i,
39016 (u32) (lp->init_dma_addr),
39017 - lp->a.read_csr(ioaddr, CSR0));
39018 + lp->a->read_csr(ioaddr, CSR0));
39019
39020 spin_unlock_irqrestore(&lp->lock, flags);
39021
39022 @@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_device *dev)
39023 * Switch back to 16bit mode to avoid problems with dumb
39024 * DOS packet driver after a warm reboot
39025 */
39026 - lp->a.write_bcr(ioaddr, 20, 4);
39027 + lp->a->write_bcr(ioaddr, 20, 4);
39028
39029 err_free_irq:
39030 spin_unlock_irqrestore(&lp->lock, flags);
39031 @@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
39032
39033 /* wait for stop */
39034 for (i = 0; i < 100; i++)
39035 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
39036 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
39037 break;
39038
39039 if (i >= 100 && netif_msg_drv(lp))
39040 @@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
39041 return;
39042
39043 /* ReInit Ring */
39044 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
39045 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
39046 i = 0;
39047 while (i++ < 1000)
39048 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
39049 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
39050 break;
39051
39052 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
39053 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
39054 }
39055
39056 static void pcnet32_tx_timeout(struct net_device *dev)
39057 @@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct net_device *dev)
39058 if (pcnet32_debug & NETIF_MSG_DRV)
39059 printk(KERN_ERR
39060 "%s: transmit timed out, status %4.4x, resetting.\n",
39061 - dev->name, lp->a.read_csr(ioaddr, CSR0));
39062 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
39063 + dev->name, lp->a->read_csr(ioaddr, CSR0));
39064 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
39065 dev->stats.tx_errors++;
39066 if (netif_msg_tx_err(lp)) {
39067 int i;
39068 @@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
39069 if (netif_msg_tx_queued(lp)) {
39070 printk(KERN_DEBUG
39071 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
39072 - dev->name, lp->a.read_csr(ioaddr, CSR0));
39073 + dev->name, lp->a->read_csr(ioaddr, CSR0));
39074 }
39075
39076 /* Default status -- will not enable Successful-TxDone
39077 @@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
39078 dev->stats.tx_bytes += skb->len;
39079
39080 /* Trigger an immediate send poll. */
39081 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
39082 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
39083
39084 dev->trans_start = jiffies;
39085
39086 @@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
39087
39088 spin_lock(&lp->lock);
39089
39090 - csr0 = lp->a.read_csr(ioaddr, CSR0);
39091 + csr0 = lp->a->read_csr(ioaddr, CSR0);
39092 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
39093 if (csr0 == 0xffff) {
39094 break; /* PCMCIA remove happened */
39095 }
39096 /* Acknowledge all of the current interrupt sources ASAP. */
39097 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
39098 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
39099
39100 if (netif_msg_intr(lp))
39101 printk(KERN_DEBUG
39102 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
39103 - dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
39104 + dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
39105
39106 /* Log misc errors. */
39107 if (csr0 & 0x4000)
39108 @@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
39109 if (napi_schedule_prep(&lp->napi)) {
39110 u16 val;
39111 /* set interrupt masks */
39112 - val = lp->a.read_csr(ioaddr, CSR3);
39113 + val = lp->a->read_csr(ioaddr, CSR3);
39114 val |= 0x5f00;
39115 - lp->a.write_csr(ioaddr, CSR3, val);
39116 + lp->a->write_csr(ioaddr, CSR3, val);
39117
39118 __napi_schedule(&lp->napi);
39119 break;
39120 }
39121 - csr0 = lp->a.read_csr(ioaddr, CSR0);
39122 + csr0 = lp->a->read_csr(ioaddr, CSR0);
39123 }
39124
39125 if (netif_msg_intr(lp))
39126 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
39127 - dev->name, lp->a.read_csr(ioaddr, CSR0));
39128 + dev->name, lp->a->read_csr(ioaddr, CSR0));
39129
39130 spin_unlock(&lp->lock);
39131
39132 @@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_device *dev)
39133
39134 spin_lock_irqsave(&lp->lock, flags);
39135
39136 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
39137 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
39138
39139 if (netif_msg_ifdown(lp))
39140 printk(KERN_DEBUG
39141 "%s: Shutting down ethercard, status was %2.2x.\n",
39142 - dev->name, lp->a.read_csr(ioaddr, CSR0));
39143 + dev->name, lp->a->read_csr(ioaddr, CSR0));
39144
39145 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
39146 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
39147 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
39148
39149 /*
39150 * Switch back to 16bit mode to avoid problems with dumb
39151 * DOS packet driver after a warm reboot
39152 */
39153 - lp->a.write_bcr(ioaddr, 20, 4);
39154 + lp->a->write_bcr(ioaddr, 20, 4);
39155
39156 spin_unlock_irqrestore(&lp->lock, flags);
39157
39158 @@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
39159 unsigned long flags;
39160
39161 spin_lock_irqsave(&lp->lock, flags);
39162 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
39163 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
39164 spin_unlock_irqrestore(&lp->lock, flags);
39165
39166 return &dev->stats;
39167 @@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
39168 if (dev->flags & IFF_ALLMULTI) {
39169 ib->filter[0] = cpu_to_le32(~0U);
39170 ib->filter[1] = cpu_to_le32(~0U);
39171 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
39172 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
39173 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
39174 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
39175 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
39176 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
39177 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
39178 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
39179 return;
39180 }
39181 /* clear the multicast filter */
39182 @@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
39183 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
39184 }
39185 for (i = 0; i < 4; i++)
39186 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
39187 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
39188 le16_to_cpu(mcast_table[i]));
39189 return;
39190 }
39191 @@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
39192
39193 spin_lock_irqsave(&lp->lock, flags);
39194 suspended = pcnet32_suspend(dev, &flags, 0);
39195 - csr15 = lp->a.read_csr(ioaddr, CSR15);
39196 + csr15 = lp->a->read_csr(ioaddr, CSR15);
39197 if (dev->flags & IFF_PROMISC) {
39198 /* Log any net taps. */
39199 if (netif_msg_hw(lp))
39200 @@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
39201 lp->init_block->mode =
39202 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
39203 7);
39204 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
39205 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
39206 } else {
39207 lp->init_block->mode =
39208 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
39209 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
39210 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
39211 pcnet32_load_multicast(dev);
39212 }
39213
39214 if (suspended) {
39215 int csr5;
39216 /* clear SUSPEND (SPND) - CSR5 bit 0 */
39217 - csr5 = lp->a.read_csr(ioaddr, CSR5);
39218 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
39219 + csr5 = lp->a->read_csr(ioaddr, CSR5);
39220 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
39221 } else {
39222 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
39223 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
39224 pcnet32_restart(dev, CSR0_NORMAL);
39225 netif_wake_queue(dev);
39226 }
39227 @@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
39228 if (!lp->mii)
39229 return 0;
39230
39231 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39232 - val_out = lp->a.read_bcr(ioaddr, 34);
39233 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39234 + val_out = lp->a->read_bcr(ioaddr, 34);
39235
39236 return val_out;
39237 }
39238 @@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
39239 if (!lp->mii)
39240 return;
39241
39242 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39243 - lp->a.write_bcr(ioaddr, 34, val);
39244 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39245 + lp->a->write_bcr(ioaddr, 34, val);
39246 }
39247
39248 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39249 @@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
39250 curr_link = mii_link_ok(&lp->mii_if);
39251 } else {
39252 ulong ioaddr = dev->base_addr; /* card base I/O address */
39253 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
39254 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
39255 }
39256 if (!curr_link) {
39257 if (prev_link || verbose) {
39258 @@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
39259 (ecmd.duplex ==
39260 DUPLEX_FULL) ? "full" : "half");
39261 }
39262 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
39263 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
39264 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
39265 if (lp->mii_if.full_duplex)
39266 bcr9 |= (1 << 0);
39267 else
39268 bcr9 &= ~(1 << 0);
39269 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
39270 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
39271 }
39272 } else {
39273 if (netif_msg_link(lp))
39274 diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
39275 index 7cc9898..6eb50d3 100644
39276 --- a/drivers/net/sis190.c
39277 +++ b/drivers/net/sis190.c
39278 @@ -1598,7 +1598,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
39279 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
39280 struct net_device *dev)
39281 {
39282 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
39283 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
39284 struct sis190_private *tp = netdev_priv(dev);
39285 struct pci_dev *isa_bridge;
39286 u8 reg, tmp8;
39287 diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
39288 index e13685a..60c948c 100644
39289 --- a/drivers/net/sundance.c
39290 +++ b/drivers/net/sundance.c
39291 @@ -225,7 +225,7 @@ enum {
39292 struct pci_id_info {
39293 const char *name;
39294 };
39295 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
39296 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
39297 {"D-Link DFE-550TX FAST Ethernet Adapter"},
39298 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
39299 {"D-Link DFE-580TX 4 port Server Adapter"},
39300 diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
39301 index 529f55a..cccaa18 100644
39302 --- a/drivers/net/tg3.h
39303 +++ b/drivers/net/tg3.h
39304 @@ -95,6 +95,7 @@
39305 #define CHIPREV_ID_5750_A0 0x4000
39306 #define CHIPREV_ID_5750_A1 0x4001
39307 #define CHIPREV_ID_5750_A3 0x4003
39308 +#define CHIPREV_ID_5750_C1 0x4201
39309 #define CHIPREV_ID_5750_C2 0x4202
39310 #define CHIPREV_ID_5752_A0_HW 0x5000
39311 #define CHIPREV_ID_5752_A0 0x6000
39312 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
39313 index b9db1b5..720f9ce 100644
39314 --- a/drivers/net/tokenring/abyss.c
39315 +++ b/drivers/net/tokenring/abyss.c
39316 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
39317
39318 static int __init abyss_init (void)
39319 {
39320 - abyss_netdev_ops = tms380tr_netdev_ops;
39321 + pax_open_kernel();
39322 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39323
39324 - abyss_netdev_ops.ndo_open = abyss_open;
39325 - abyss_netdev_ops.ndo_stop = abyss_close;
39326 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
39327 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
39328 + pax_close_kernel();
39329
39330 return pci_register_driver(&abyss_driver);
39331 }
39332 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
39333 index 456f8bf..373e56d 100644
39334 --- a/drivers/net/tokenring/madgemc.c
39335 +++ b/drivers/net/tokenring/madgemc.c
39336 @@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver = {
39337
39338 static int __init madgemc_init (void)
39339 {
39340 - madgemc_netdev_ops = tms380tr_netdev_ops;
39341 - madgemc_netdev_ops.ndo_open = madgemc_open;
39342 - madgemc_netdev_ops.ndo_stop = madgemc_close;
39343 + pax_open_kernel();
39344 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39345 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
39346 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
39347 + pax_close_kernel();
39348
39349 return mca_register_driver (&madgemc_driver);
39350 }
39351 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
39352 index 16e8783..925bd49 100644
39353 --- a/drivers/net/tokenring/proteon.c
39354 +++ b/drivers/net/tokenring/proteon.c
39355 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
39356 struct platform_device *pdev;
39357 int i, num = 0, err = 0;
39358
39359 - proteon_netdev_ops = tms380tr_netdev_ops;
39360 - proteon_netdev_ops.ndo_open = proteon_open;
39361 - proteon_netdev_ops.ndo_stop = tms380tr_close;
39362 + pax_open_kernel();
39363 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39364 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
39365 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
39366 + pax_close_kernel();
39367
39368 err = platform_driver_register(&proteon_driver);
39369 if (err)
39370 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
39371 index 46db5c5..37c1536 100644
39372 --- a/drivers/net/tokenring/skisa.c
39373 +++ b/drivers/net/tokenring/skisa.c
39374 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
39375 struct platform_device *pdev;
39376 int i, num = 0, err = 0;
39377
39378 - sk_isa_netdev_ops = tms380tr_netdev_ops;
39379 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
39380 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
39381 + pax_open_kernel();
39382 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39383 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
39384 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
39385 + pax_close_kernel();
39386
39387 err = platform_driver_register(&sk_isa_driver);
39388 if (err)
39389 diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
39390 index 74e5ba4..5cf6bc9 100644
39391 --- a/drivers/net/tulip/de2104x.c
39392 +++ b/drivers/net/tulip/de2104x.c
39393 @@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
39394 struct de_srom_info_leaf *il;
39395 void *bufp;
39396
39397 + pax_track_stack();
39398 +
39399 /* download entire eeprom */
39400 for (i = 0; i < DE_EEPROM_WORDS; i++)
39401 ((__le16 *)ee_data)[i] =
39402 diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
39403 index a8349b7..90f9dfe 100644
39404 --- a/drivers/net/tulip/de4x5.c
39405 +++ b/drivers/net/tulip/de4x5.c
39406 @@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39407 for (i=0; i<ETH_ALEN; i++) {
39408 tmp.addr[i] = dev->dev_addr[i];
39409 }
39410 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39411 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39412 break;
39413
39414 case DE4X5_SET_HWADDR: /* Set the hardware address */
39415 @@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39416 spin_lock_irqsave(&lp->lock, flags);
39417 memcpy(&statbuf, &lp->pktStats, ioc->len);
39418 spin_unlock_irqrestore(&lp->lock, flags);
39419 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
39420 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
39421 return -EFAULT;
39422 break;
39423 }
39424 diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
39425 index 391acd3..56d11cd 100644
39426 --- a/drivers/net/tulip/eeprom.c
39427 +++ b/drivers/net/tulip/eeprom.c
39428 @@ -80,7 +80,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
39429 {NULL}};
39430
39431
39432 -static const char *block_name[] __devinitdata = {
39433 +static const char *block_name[] __devinitconst = {
39434 "21140 non-MII",
39435 "21140 MII PHY",
39436 "21142 Serial PHY",
39437 diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
39438 index b38d3b7..b1cff23 100644
39439 --- a/drivers/net/tulip/winbond-840.c
39440 +++ b/drivers/net/tulip/winbond-840.c
39441 @@ -235,7 +235,7 @@ struct pci_id_info {
39442 int drv_flags; /* Driver use, intended as capability flags. */
39443 };
39444
39445 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
39446 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
39447 { /* Sometime a Level-One switch card. */
39448 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
39449 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
39450 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
39451 index f450bc9..2b747c8 100644
39452 --- a/drivers/net/usb/hso.c
39453 +++ b/drivers/net/usb/hso.c
39454 @@ -71,7 +71,7 @@
39455 #include <asm/byteorder.h>
39456 #include <linux/serial_core.h>
39457 #include <linux/serial.h>
39458 -
39459 +#include <asm/local.h>
39460
39461 #define DRIVER_VERSION "1.2"
39462 #define MOD_AUTHOR "Option Wireless"
39463 @@ -258,7 +258,7 @@ struct hso_serial {
39464
39465 /* from usb_serial_port */
39466 struct tty_struct *tty;
39467 - int open_count;
39468 + local_t open_count;
39469 spinlock_t serial_lock;
39470
39471 int (*write_data) (struct hso_serial *serial);
39472 @@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
39473 struct urb *urb;
39474
39475 urb = serial->rx_urb[0];
39476 - if (serial->open_count > 0) {
39477 + if (local_read(&serial->open_count) > 0) {
39478 count = put_rxbuf_data(urb, serial);
39479 if (count == -1)
39480 return;
39481 @@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
39482 DUMP1(urb->transfer_buffer, urb->actual_length);
39483
39484 /* Anyone listening? */
39485 - if (serial->open_count == 0)
39486 + if (local_read(&serial->open_count) == 0)
39487 return;
39488
39489 if (status == 0) {
39490 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39491 spin_unlock_irq(&serial->serial_lock);
39492
39493 /* check for port already opened, if not set the termios */
39494 - serial->open_count++;
39495 - if (serial->open_count == 1) {
39496 + if (local_inc_return(&serial->open_count) == 1) {
39497 tty->low_latency = 1;
39498 serial->rx_state = RX_IDLE;
39499 /* Force default termio settings */
39500 @@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39501 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
39502 if (result) {
39503 hso_stop_serial_device(serial->parent);
39504 - serial->open_count--;
39505 + local_dec(&serial->open_count);
39506 kref_put(&serial->parent->ref, hso_serial_ref_free);
39507 }
39508 } else {
39509 @@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
39510
39511 /* reset the rts and dtr */
39512 /* do the actual close */
39513 - serial->open_count--;
39514 + local_dec(&serial->open_count);
39515
39516 - if (serial->open_count <= 0) {
39517 - serial->open_count = 0;
39518 + if (local_read(&serial->open_count) <= 0) {
39519 + local_set(&serial->open_count, 0);
39520 spin_lock_irq(&serial->serial_lock);
39521 if (serial->tty == tty) {
39522 serial->tty->driver_data = NULL;
39523 @@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
39524
39525 /* the actual setup */
39526 spin_lock_irqsave(&serial->serial_lock, flags);
39527 - if (serial->open_count)
39528 + if (local_read(&serial->open_count))
39529 _hso_serial_set_termios(tty, old);
39530 else
39531 tty->termios = old;
39532 @@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interface *iface)
39533 /* Start all serial ports */
39534 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
39535 if (serial_table[i] && (serial_table[i]->interface == iface)) {
39536 - if (dev2ser(serial_table[i])->open_count) {
39537 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
39538 result =
39539 hso_start_serial_device(serial_table[i], GFP_NOIO);
39540 hso_kick_transmit(dev2ser(serial_table[i]));
39541 diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
39542 index 3e94f0c..ffdd926 100644
39543 --- a/drivers/net/vxge/vxge-config.h
39544 +++ b/drivers/net/vxge/vxge-config.h
39545 @@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
39546 void (*link_down)(struct __vxge_hw_device *devh);
39547 void (*crit_err)(struct __vxge_hw_device *devh,
39548 enum vxge_hw_event type, u64 ext_data);
39549 -};
39550 +} __no_const;
39551
39552 /*
39553 * struct __vxge_hw_blockpool_entry - Block private data structure
39554 diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
39555 index 068d7a9..35293de 100644
39556 --- a/drivers/net/vxge/vxge-main.c
39557 +++ b/drivers/net/vxge/vxge-main.c
39558 @@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
39559 struct sk_buff *completed[NR_SKB_COMPLETED];
39560 int more;
39561
39562 + pax_track_stack();
39563 +
39564 do {
39565 more = 0;
39566 skb_ptr = completed;
39567 @@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
39568 u8 mtable[256] = {0}; /* CPU to vpath mapping */
39569 int index;
39570
39571 + pax_track_stack();
39572 +
39573 /*
39574 * Filling
39575 * - itable with bucket numbers
39576 diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
39577 index 461742b..81be42e 100644
39578 --- a/drivers/net/vxge/vxge-traffic.h
39579 +++ b/drivers/net/vxge/vxge-traffic.h
39580 @@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
39581 struct vxge_hw_mempool_dma *dma_object,
39582 u32 index,
39583 u32 is_last);
39584 -};
39585 +} __no_const;
39586
39587 void
39588 __vxge_hw_mempool_destroy(
39589 diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
39590 index cd8cb95..4153b79 100644
39591 --- a/drivers/net/wan/cycx_x25.c
39592 +++ b/drivers/net/wan/cycx_x25.c
39593 @@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned char *p, int len)
39594 unsigned char hex[1024],
39595 * phex = hex;
39596
39597 + pax_track_stack();
39598 +
39599 if (len >= (sizeof(hex) / 2))
39600 len = (sizeof(hex) / 2) - 1;
39601
39602 diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
39603 index aa9248f..a4e3c3b 100644
39604 --- a/drivers/net/wan/hdlc_x25.c
39605 +++ b/drivers/net/wan/hdlc_x25.c
39606 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
39607
39608 static int x25_open(struct net_device *dev)
39609 {
39610 - struct lapb_register_struct cb;
39611 + static struct lapb_register_struct cb = {
39612 + .connect_confirmation = x25_connected,
39613 + .connect_indication = x25_connected,
39614 + .disconnect_confirmation = x25_disconnected,
39615 + .disconnect_indication = x25_disconnected,
39616 + .data_indication = x25_data_indication,
39617 + .data_transmit = x25_data_transmit
39618 + };
39619 int result;
39620
39621 - cb.connect_confirmation = x25_connected;
39622 - cb.connect_indication = x25_connected;
39623 - cb.disconnect_confirmation = x25_disconnected;
39624 - cb.disconnect_indication = x25_disconnected;
39625 - cb.data_indication = x25_data_indication;
39626 - cb.data_transmit = x25_data_transmit;
39627 -
39628 result = lapb_register(dev, &cb);
39629 if (result != LAPB_OK)
39630 return result;
39631 diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
39632 index 5ad287c..783b020 100644
39633 --- a/drivers/net/wimax/i2400m/usb-fw.c
39634 +++ b/drivers/net/wimax/i2400m/usb-fw.c
39635 @@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
39636 int do_autopm = 1;
39637 DECLARE_COMPLETION_ONSTACK(notif_completion);
39638
39639 + pax_track_stack();
39640 +
39641 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
39642 i2400m, ack, ack_size);
39643 BUG_ON(_ack == i2400m->bm_ack_buf);
39644 diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
39645 index 6c26840..62c97c3 100644
39646 --- a/drivers/net/wireless/airo.c
39647 +++ b/drivers/net/wireless/airo.c
39648 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (struct airo_info *ai) {
39649 BSSListElement * loop_net;
39650 BSSListElement * tmp_net;
39651
39652 + pax_track_stack();
39653 +
39654 /* Blow away current list of scan results */
39655 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
39656 list_move_tail (&loop_net->list, &ai->network_free_list);
39657 @@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
39658 WepKeyRid wkr;
39659 int rc;
39660
39661 + pax_track_stack();
39662 +
39663 memset( &mySsid, 0, sizeof( mySsid ) );
39664 kfree (ai->flash);
39665 ai->flash = NULL;
39666 @@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct inode *inode,
39667 __le32 *vals = stats.vals;
39668 int len;
39669
39670 + pax_track_stack();
39671 +
39672 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39673 return -ENOMEM;
39674 data = (struct proc_data *)file->private_data;
39675 @@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
39676 /* If doLoseSync is not 1, we won't do a Lose Sync */
39677 int doLoseSync = -1;
39678
39679 + pax_track_stack();
39680 +
39681 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39682 return -ENOMEM;
39683 data = (struct proc_data *)file->private_data;
39684 @@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_device *dev,
39685 int i;
39686 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
39687
39688 + pax_track_stack();
39689 +
39690 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
39691 if (!qual)
39692 return -ENOMEM;
39693 @@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(struct airo_info *local)
39694 CapabilityRid cap_rid;
39695 __le32 *vals = stats_rid.vals;
39696
39697 + pax_track_stack();
39698 +
39699 /* Get stats out of the card */
39700 clear_bit(JOB_WSTATS, &local->jobs);
39701 if (local->power.event) {
39702 diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
39703 index 747508c..82e965d 100644
39704 --- a/drivers/net/wireless/ath/ath5k/debug.c
39705 +++ b/drivers/net/wireless/ath/ath5k/debug.c
39706 @@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
39707 unsigned int v;
39708 u64 tsf;
39709
39710 + pax_track_stack();
39711 +
39712 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
39713 len += snprintf(buf+len, sizeof(buf)-len,
39714 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
39715 @@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
39716 unsigned int len = 0;
39717 unsigned int i;
39718
39719 + pax_track_stack();
39720 +
39721 len += snprintf(buf+len, sizeof(buf)-len,
39722 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
39723
39724 diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
39725 index 2be4c22..593b1eb 100644
39726 --- a/drivers/net/wireless/ath/ath9k/debug.c
39727 +++ b/drivers/net/wireless/ath/ath9k/debug.c
39728 @@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
39729 char buf[512];
39730 unsigned int len = 0;
39731
39732 + pax_track_stack();
39733 +
39734 len += snprintf(buf + len, sizeof(buf) - len,
39735 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
39736 len += snprintf(buf + len, sizeof(buf) - len,
39737 @@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
39738 int i;
39739 u8 addr[ETH_ALEN];
39740
39741 + pax_track_stack();
39742 +
39743 len += snprintf(buf + len, sizeof(buf) - len,
39744 "primary: %s (%s chan=%d ht=%d)\n",
39745 wiphy_name(sc->pri_wiphy->hw->wiphy),
39746 diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
39747 index 80b19a4..dab3a45 100644
39748 --- a/drivers/net/wireless/b43/debugfs.c
39749 +++ b/drivers/net/wireless/b43/debugfs.c
39750 @@ -43,7 +43,7 @@ static struct dentry *rootdir;
39751 struct b43_debugfs_fops {
39752 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
39753 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
39754 - struct file_operations fops;
39755 + const struct file_operations fops;
39756 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
39757 size_t file_struct_offset;
39758 };
39759 diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c
39760 index 1f85ac5..c99b4b4 100644
39761 --- a/drivers/net/wireless/b43legacy/debugfs.c
39762 +++ b/drivers/net/wireless/b43legacy/debugfs.c
39763 @@ -44,7 +44,7 @@ static struct dentry *rootdir;
39764 struct b43legacy_debugfs_fops {
39765 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
39766 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
39767 - struct file_operations fops;
39768 + const struct file_operations fops;
39769 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
39770 size_t file_struct_offset;
39771 /* Take wl->irq_lock before calling read/write? */
39772 diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
39773 index 43102bf..3b569c3 100644
39774 --- a/drivers/net/wireless/ipw2x00/ipw2100.c
39775 +++ b/drivers/net/wireless/ipw2x00/ipw2100.c
39776 @@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
39777 int err;
39778 DECLARE_SSID_BUF(ssid);
39779
39780 + pax_track_stack();
39781 +
39782 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
39783
39784 if (ssid_len)
39785 @@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw2100_priv *priv,
39786 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
39787 int err;
39788
39789 + pax_track_stack();
39790 +
39791 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
39792 idx, keylen, len);
39793
39794 diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
39795 index 282b1f7..169f0cf 100644
39796 --- a/drivers/net/wireless/ipw2x00/libipw_rx.c
39797 +++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
39798 @@ -1566,6 +1566,8 @@ static void libipw_process_probe_response(struct libipw_device
39799 unsigned long flags;
39800 DECLARE_SSID_BUF(ssid);
39801
39802 + pax_track_stack();
39803 +
39804 LIBIPW_DEBUG_SCAN("'%s' (%pM"
39805 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
39806 print_ssid(ssid, info_element->data, info_element->len),
39807 diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
39808 index 950267a..80d5fd2 100644
39809 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c
39810 +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
39811 @@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib = {
39812 },
39813 };
39814
39815 -static struct iwl_ops iwl1000_ops = {
39816 +static const struct iwl_ops iwl1000_ops = {
39817 .ucode = &iwl5000_ucode,
39818 .lib = &iwl1000_lib,
39819 .hcmd = &iwl5000_hcmd,
39820 diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
39821 index 56bfcc3..b348020 100644
39822 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c
39823 +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
39824 @@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
39825 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
39826 };
39827
39828 -static struct iwl_ops iwl3945_ops = {
39829 +static const struct iwl_ops iwl3945_ops = {
39830 .ucode = &iwl3945_ucode,
39831 .lib = &iwl3945_lib,
39832 .hcmd = &iwl3945_hcmd,
39833 diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
39834 index 585b8d4..e142963 100644
39835 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c
39836 +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
39837 @@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib = {
39838 },
39839 };
39840
39841 -static struct iwl_ops iwl4965_ops = {
39842 +static const struct iwl_ops iwl4965_ops = {
39843 .ucode = &iwl4965_ucode,
39844 .lib = &iwl4965_lib,
39845 .hcmd = &iwl4965_hcmd,
39846 diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
39847 index 1f423f2..e37c192 100644
39848 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c
39849 +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
39850 @@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib = {
39851 },
39852 };
39853
39854 -struct iwl_ops iwl5000_ops = {
39855 +const struct iwl_ops iwl5000_ops = {
39856 .ucode = &iwl5000_ucode,
39857 .lib = &iwl5000_lib,
39858 .hcmd = &iwl5000_hcmd,
39859 .utils = &iwl5000_hcmd_utils,
39860 };
39861
39862 -static struct iwl_ops iwl5150_ops = {
39863 +static const struct iwl_ops iwl5150_ops = {
39864 .ucode = &iwl5000_ucode,
39865 .lib = &iwl5150_lib,
39866 .hcmd = &iwl5000_hcmd,
39867 diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
39868 index 1473452..f07d5e1 100644
39869 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c
39870 +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
39871 @@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000_hcmd_utils = {
39872 .calc_rssi = iwl5000_calc_rssi,
39873 };
39874
39875 -static struct iwl_ops iwl6000_ops = {
39876 +static const struct iwl_ops iwl6000_ops = {
39877 .ucode = &iwl5000_ucode,
39878 .lib = &iwl6000_lib,
39879 .hcmd = &iwl5000_hcmd,
39880 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39881 index 1a3dfa2..b3e0a61 100644
39882 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39883 +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39884 @@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
39885 u8 active_index = 0;
39886 s32 tpt = 0;
39887
39888 + pax_track_stack();
39889 +
39890 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
39891
39892 if (!ieee80211_is_data(hdr->frame_control) ||
39893 @@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
39894 u8 valid_tx_ant = 0;
39895 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
39896
39897 + pax_track_stack();
39898 +
39899 /* Override starting rate (index 0) if needed for debug purposes */
39900 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
39901
39902 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
39903 index 0e56d78..6a3c107 100644
39904 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c
39905 +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
39906 @@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
39907 if (iwl_debug_level & IWL_DL_INFO)
39908 dev_printk(KERN_DEBUG, &(pdev->dev),
39909 "Disabling hw_scan\n");
39910 - iwl_hw_ops.hw_scan = NULL;
39911 + pax_open_kernel();
39912 + *(void **)&iwl_hw_ops.hw_scan = NULL;
39913 + pax_close_kernel();
39914 }
39915
39916 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
39917 diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
39918 index cbc6290..eb323d7 100644
39919 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h
39920 +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
39921 @@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv);
39922 #endif
39923
39924 #else
39925 -#define IWL_DEBUG(__priv, level, fmt, args...)
39926 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
39927 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
39928 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
39929 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
39930 void *p, u32 len)
39931 {}
39932 diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39933 index a198bcf..8e68233 100644
39934 --- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39935 +++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39936 @@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
39937 int pos = 0;
39938 const size_t bufsz = sizeof(buf);
39939
39940 + pax_track_stack();
39941 +
39942 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
39943 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
39944 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
39945 @@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
39946 const size_t bufsz = sizeof(buf);
39947 ssize_t ret;
39948
39949 + pax_track_stack();
39950 +
39951 for (i = 0; i < AC_NUM; i++) {
39952 pos += scnprintf(buf + pos, bufsz - pos,
39953 "\tcw_min\tcw_max\taifsn\ttxop\n");
39954 diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
39955 index 3539ea4..b174bfa 100644
39956 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h
39957 +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
39958 @@ -68,7 +68,7 @@ struct iwl_tx_queue;
39959
39960 /* shared structures from iwl-5000.c */
39961 extern struct iwl_mod_params iwl50_mod_params;
39962 -extern struct iwl_ops iwl5000_ops;
39963 +extern const struct iwl_ops iwl5000_ops;
39964 extern struct iwl_ucode_ops iwl5000_ucode;
39965 extern struct iwl_lib_ops iwl5000_lib;
39966 extern struct iwl_hcmd_ops iwl5000_hcmd;
39967 diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39968 index 619590d..69235ee 100644
39969 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
39970 +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39971 @@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
39972 */
39973 if (iwl3945_mod_params.disable_hw_scan) {
39974 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
39975 - iwl3945_hw_ops.hw_scan = NULL;
39976 + pax_open_kernel();
39977 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
39978 + pax_close_kernel();
39979 }
39980
39981
39982 diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
39983 index 1465379..fe4d78b 100644
39984 --- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
39985 +++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
39986 @@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
39987 int buf_len = 512;
39988 size_t len = 0;
39989
39990 + pax_track_stack();
39991 +
39992 if (*ppos != 0)
39993 return 0;
39994 if (count < sizeof(buf))
39995 diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
39996 index 893a55c..7f66a50 100644
39997 --- a/drivers/net/wireless/libertas/debugfs.c
39998 +++ b/drivers/net/wireless/libertas/debugfs.c
39999 @@ -708,7 +708,7 @@ out_unlock:
40000 struct lbs_debugfs_files {
40001 const char *name;
40002 int perm;
40003 - struct file_operations fops;
40004 + const struct file_operations fops;
40005 };
40006
40007 static const struct lbs_debugfs_files debugfs_files[] = {
40008 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
40009 index 2ecbedb..42704f0 100644
40010 --- a/drivers/net/wireless/rndis_wlan.c
40011 +++ b/drivers/net/wireless/rndis_wlan.c
40012 @@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
40013
40014 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
40015
40016 - if (rts_threshold < 0 || rts_threshold > 2347)
40017 + if (rts_threshold > 2347)
40018 rts_threshold = 2347;
40019
40020 tmp = cpu_to_le32(rts_threshold);
40021 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
40022 index 334ccd6..47f8944 100644
40023 --- a/drivers/oprofile/buffer_sync.c
40024 +++ b/drivers/oprofile/buffer_sync.c
40025 @@ -342,7 +342,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
40026 if (cookie == NO_COOKIE)
40027 offset = pc;
40028 if (cookie == INVALID_COOKIE) {
40029 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
40030 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
40031 offset = pc;
40032 }
40033 if (cookie != last_cookie) {
40034 @@ -386,14 +386,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
40035 /* add userspace sample */
40036
40037 if (!mm) {
40038 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
40039 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
40040 return 0;
40041 }
40042
40043 cookie = lookup_dcookie(mm, s->eip, &offset);
40044
40045 if (cookie == INVALID_COOKIE) {
40046 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
40047 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
40048 return 0;
40049 }
40050
40051 @@ -562,7 +562,7 @@ void sync_buffer(int cpu)
40052 /* ignore backtraces if failed to add a sample */
40053 if (state == sb_bt_start) {
40054 state = sb_bt_ignore;
40055 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
40056 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
40057 }
40058 }
40059 release_mm(mm);
40060 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
40061 index 5df60a6..72f5c1c 100644
40062 --- a/drivers/oprofile/event_buffer.c
40063 +++ b/drivers/oprofile/event_buffer.c
40064 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
40065 }
40066
40067 if (buffer_pos == buffer_size) {
40068 - atomic_inc(&oprofile_stats.event_lost_overflow);
40069 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
40070 return;
40071 }
40072
40073 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
40074 index dc8a042..fe5f315 100644
40075 --- a/drivers/oprofile/oprof.c
40076 +++ b/drivers/oprofile/oprof.c
40077 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
40078 if (oprofile_ops.switch_events())
40079 return;
40080
40081 - atomic_inc(&oprofile_stats.multiplex_counter);
40082 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
40083 start_switch_worker();
40084 }
40085
40086 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
40087 index 61689e8..387f7f8 100644
40088 --- a/drivers/oprofile/oprofile_stats.c
40089 +++ b/drivers/oprofile/oprofile_stats.c
40090 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
40091 cpu_buf->sample_invalid_eip = 0;
40092 }
40093
40094 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
40095 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
40096 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
40097 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
40098 - atomic_set(&oprofile_stats.multiplex_counter, 0);
40099 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
40100 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
40101 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
40102 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
40103 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
40104 }
40105
40106
40107 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
40108 index 0b54e46..a37c527 100644
40109 --- a/drivers/oprofile/oprofile_stats.h
40110 +++ b/drivers/oprofile/oprofile_stats.h
40111 @@ -13,11 +13,11 @@
40112 #include <asm/atomic.h>
40113
40114 struct oprofile_stat_struct {
40115 - atomic_t sample_lost_no_mm;
40116 - atomic_t sample_lost_no_mapping;
40117 - atomic_t bt_lost_no_mapping;
40118 - atomic_t event_lost_overflow;
40119 - atomic_t multiplex_counter;
40120 + atomic_unchecked_t sample_lost_no_mm;
40121 + atomic_unchecked_t sample_lost_no_mapping;
40122 + atomic_unchecked_t bt_lost_no_mapping;
40123 + atomic_unchecked_t event_lost_overflow;
40124 + atomic_unchecked_t multiplex_counter;
40125 };
40126
40127 extern struct oprofile_stat_struct oprofile_stats;
40128 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
40129 index 2766a6d..80c77e2 100644
40130 --- a/drivers/oprofile/oprofilefs.c
40131 +++ b/drivers/oprofile/oprofilefs.c
40132 @@ -187,7 +187,7 @@ static const struct file_operations atomic_ro_fops = {
40133
40134
40135 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
40136 - char const *name, atomic_t *val)
40137 + char const *name, atomic_unchecked_t *val)
40138 {
40139 struct dentry *d = __oprofilefs_create_file(sb, root, name,
40140 &atomic_ro_fops, 0444);
40141 diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
40142 index 13a64bc..ad62835 100644
40143 --- a/drivers/parisc/pdc_stable.c
40144 +++ b/drivers/parisc/pdc_stable.c
40145 @@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj, struct attribute *attr,
40146 return ret;
40147 }
40148
40149 -static struct sysfs_ops pdcspath_attr_ops = {
40150 +static const struct sysfs_ops pdcspath_attr_ops = {
40151 .show = pdcspath_attr_show,
40152 .store = pdcspath_attr_store,
40153 };
40154 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
40155 index 8eefe56..40751a7 100644
40156 --- a/drivers/parport/procfs.c
40157 +++ b/drivers/parport/procfs.c
40158 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
40159
40160 *ppos += len;
40161
40162 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
40163 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
40164 }
40165
40166 #ifdef CONFIG_PARPORT_1284
40167 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
40168
40169 *ppos += len;
40170
40171 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
40172 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
40173 }
40174 #endif /* IEEE1284.3 support. */
40175
40176 diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
40177 index 73e7d8e..c80f3d2 100644
40178 --- a/drivers/pci/hotplug/acpiphp_glue.c
40179 +++ b/drivers/pci/hotplug/acpiphp_glue.c
40180 @@ -111,7 +111,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
40181 }
40182
40183
40184 -static struct acpi_dock_ops acpiphp_dock_ops = {
40185 +static const struct acpi_dock_ops acpiphp_dock_ops = {
40186 .handler = handle_hotplug_event_func,
40187 };
40188
40189 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
40190 index 9fff878..ad0ad53 100644
40191 --- a/drivers/pci/hotplug/cpci_hotplug.h
40192 +++ b/drivers/pci/hotplug/cpci_hotplug.h
40193 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
40194 int (*hardware_test) (struct slot* slot, u32 value);
40195 u8 (*get_power) (struct slot* slot);
40196 int (*set_power) (struct slot* slot, int value);
40197 -};
40198 +} __no_const;
40199
40200 struct cpci_hp_controller {
40201 unsigned int irq;
40202 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
40203 index 76ba8a1..20ca857 100644
40204 --- a/drivers/pci/hotplug/cpqphp_nvram.c
40205 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
40206 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
40207
40208 void compaq_nvram_init (void __iomem *rom_start)
40209 {
40210 +
40211 +#ifndef CONFIG_PAX_KERNEXEC
40212 if (rom_start) {
40213 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
40214 }
40215 +#endif
40216 +
40217 dbg("int15 entry = %p\n", compaq_int15_entry_point);
40218
40219 /* initialize our int15 lock */
40220 diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
40221 index 6151389..0a894ef 100644
40222 --- a/drivers/pci/hotplug/fakephp.c
40223 +++ b/drivers/pci/hotplug/fakephp.c
40224 @@ -73,7 +73,7 @@ static void legacy_release(struct kobject *kobj)
40225 }
40226
40227 static struct kobj_type legacy_ktype = {
40228 - .sysfs_ops = &(struct sysfs_ops){
40229 + .sysfs_ops = &(const struct sysfs_ops){
40230 .store = legacy_store, .show = legacy_show
40231 },
40232 .release = &legacy_release,
40233 diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
40234 index 5b680df..fe05b7e 100644
40235 --- a/drivers/pci/intel-iommu.c
40236 +++ b/drivers/pci/intel-iommu.c
40237 @@ -2643,7 +2643,7 @@ error:
40238 return 0;
40239 }
40240
40241 -static dma_addr_t intel_map_page(struct device *dev, struct page *page,
40242 +dma_addr_t intel_map_page(struct device *dev, struct page *page,
40243 unsigned long offset, size_t size,
40244 enum dma_data_direction dir,
40245 struct dma_attrs *attrs)
40246 @@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
40247 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
40248 }
40249
40250 -static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
40251 +void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
40252 size_t size, enum dma_data_direction dir,
40253 struct dma_attrs *attrs)
40254 {
40255 @@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
40256 }
40257 }
40258
40259 -static void *intel_alloc_coherent(struct device *hwdev, size_t size,
40260 +void *intel_alloc_coherent(struct device *hwdev, size_t size,
40261 dma_addr_t *dma_handle, gfp_t flags)
40262 {
40263 void *vaddr;
40264 @@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
40265 return NULL;
40266 }
40267
40268 -static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40269 +void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40270 dma_addr_t dma_handle)
40271 {
40272 int order;
40273 @@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40274 free_pages((unsigned long)vaddr, order);
40275 }
40276
40277 -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
40278 +void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
40279 int nelems, enum dma_data_direction dir,
40280 struct dma_attrs *attrs)
40281 {
40282 @@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
40283 return nelems;
40284 }
40285
40286 -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
40287 +int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
40288 enum dma_data_direction dir, struct dma_attrs *attrs)
40289 {
40290 int i;
40291 @@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
40292 return nelems;
40293 }
40294
40295 -static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
40296 +int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
40297 {
40298 return !dma_addr;
40299 }
40300
40301 -struct dma_map_ops intel_dma_ops = {
40302 +const struct dma_map_ops intel_dma_ops = {
40303 .alloc_coherent = intel_alloc_coherent,
40304 .free_coherent = intel_free_coherent,
40305 .map_sg = intel_map_sg,
40306 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
40307 index 5b7056c..607bc94 100644
40308 --- a/drivers/pci/pcie/aspm.c
40309 +++ b/drivers/pci/pcie/aspm.c
40310 @@ -27,9 +27,9 @@
40311 #define MODULE_PARAM_PREFIX "pcie_aspm."
40312
40313 /* Note: those are not register definitions */
40314 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
40315 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
40316 -#define ASPM_STATE_L1 (4) /* L1 state */
40317 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
40318 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
40319 +#define ASPM_STATE_L1 (4U) /* L1 state */
40320 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
40321 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
40322
40323 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
40324 index 8105e32..ca10419 100644
40325 --- a/drivers/pci/probe.c
40326 +++ b/drivers/pci/probe.c
40327 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
40328 return ret;
40329 }
40330
40331 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
40332 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
40333 struct device_attribute *attr,
40334 char *buf)
40335 {
40336 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
40337 }
40338
40339 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
40340 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
40341 struct device_attribute *attr,
40342 char *buf)
40343 {
40344 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
40345 index a03ad8c..024b0da 100644
40346 --- a/drivers/pci/proc.c
40347 +++ b/drivers/pci/proc.c
40348 @@ -480,7 +480,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
40349 static int __init pci_proc_init(void)
40350 {
40351 struct pci_dev *dev = NULL;
40352 +
40353 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40354 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40355 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
40356 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40357 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40358 +#endif
40359 +#else
40360 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
40361 +#endif
40362 proc_create("devices", 0, proc_bus_pci_dir,
40363 &proc_bus_pci_dev_operations);
40364 proc_initialized = 1;
40365 diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
40366 index 8c02b6c..5584d8e 100644
40367 --- a/drivers/pci/slot.c
40368 +++ b/drivers/pci/slot.c
40369 @@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struct kobject *kobj,
40370 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
40371 }
40372
40373 -static struct sysfs_ops pci_slot_sysfs_ops = {
40374 +static const struct sysfs_ops pci_slot_sysfs_ops = {
40375 .show = pci_slot_attr_show,
40376 .store = pci_slot_attr_store,
40377 };
40378 diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
40379 index 30cf71d2..50938f1 100644
40380 --- a/drivers/pcmcia/pcmcia_ioctl.c
40381 +++ b/drivers/pcmcia/pcmcia_ioctl.c
40382 @@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode, struct file * file,
40383 return -EFAULT;
40384 }
40385 }
40386 - buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
40387 + buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
40388 if (!buf)
40389 return -ENOMEM;
40390
40391 diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
40392 index 52183c4..b224c69 100644
40393 --- a/drivers/platform/x86/acer-wmi.c
40394 +++ b/drivers/platform/x86/acer-wmi.c
40395 @@ -918,7 +918,7 @@ static int update_bl_status(struct backlight_device *bd)
40396 return 0;
40397 }
40398
40399 -static struct backlight_ops acer_bl_ops = {
40400 +static const struct backlight_ops acer_bl_ops = {
40401 .get_brightness = read_brightness,
40402 .update_status = update_bl_status,
40403 };
40404 diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
40405 index 767cb61..a87380b 100644
40406 --- a/drivers/platform/x86/asus-laptop.c
40407 +++ b/drivers/platform/x86/asus-laptop.c
40408 @@ -250,7 +250,7 @@ static struct backlight_device *asus_backlight_device;
40409 */
40410 static int read_brightness(struct backlight_device *bd);
40411 static int update_bl_status(struct backlight_device *bd);
40412 -static struct backlight_ops asusbl_ops = {
40413 +static const struct backlight_ops asusbl_ops = {
40414 .get_brightness = read_brightness,
40415 .update_status = update_bl_status,
40416 };
40417 diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
40418 index d66c07a..a4abaac 100644
40419 --- a/drivers/platform/x86/asus_acpi.c
40420 +++ b/drivers/platform/x86/asus_acpi.c
40421 @@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type)
40422 return 0;
40423 }
40424
40425 -static struct backlight_ops asus_backlight_data = {
40426 +static const struct backlight_ops asus_backlight_data = {
40427 .get_brightness = read_brightness,
40428 .update_status = set_brightness_status,
40429 };
40430 diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
40431 index 11003bb..550ff1b 100644
40432 --- a/drivers/platform/x86/compal-laptop.c
40433 +++ b/drivers/platform/x86/compal-laptop.c
40434 @@ -163,7 +163,7 @@ static int bl_update_status(struct backlight_device *b)
40435 return set_lcd_level(b->props.brightness);
40436 }
40437
40438 -static struct backlight_ops compalbl_ops = {
40439 +static const struct backlight_ops compalbl_ops = {
40440 .get_brightness = bl_get_brightness,
40441 .update_status = bl_update_status,
40442 };
40443 diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
40444 index 07a74da..9dc99fa 100644
40445 --- a/drivers/platform/x86/dell-laptop.c
40446 +++ b/drivers/platform/x86/dell-laptop.c
40447 @@ -318,7 +318,7 @@ static int dell_get_intensity(struct backlight_device *bd)
40448 return buffer.output[1];
40449 }
40450
40451 -static struct backlight_ops dell_ops = {
40452 +static const struct backlight_ops dell_ops = {
40453 .get_brightness = dell_get_intensity,
40454 .update_status = dell_send_intensity,
40455 };
40456 diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
40457 index c533b1c..5c81f22 100644
40458 --- a/drivers/platform/x86/eeepc-laptop.c
40459 +++ b/drivers/platform/x86/eeepc-laptop.c
40460 @@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device;
40461 */
40462 static int read_brightness(struct backlight_device *bd);
40463 static int update_bl_status(struct backlight_device *bd);
40464 -static struct backlight_ops eeepcbl_ops = {
40465 +static const struct backlight_ops eeepcbl_ops = {
40466 .get_brightness = read_brightness,
40467 .update_status = update_bl_status,
40468 };
40469 diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
40470 index bcd4ba8..a249b35 100644
40471 --- a/drivers/platform/x86/fujitsu-laptop.c
40472 +++ b/drivers/platform/x86/fujitsu-laptop.c
40473 @@ -436,7 +436,7 @@ static int bl_update_status(struct backlight_device *b)
40474 return ret;
40475 }
40476
40477 -static struct backlight_ops fujitsubl_ops = {
40478 +static const struct backlight_ops fujitsubl_ops = {
40479 .get_brightness = bl_get_brightness,
40480 .update_status = bl_update_status,
40481 };
40482 diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
40483 index 759763d..1093ba2 100644
40484 --- a/drivers/platform/x86/msi-laptop.c
40485 +++ b/drivers/platform/x86/msi-laptop.c
40486 @@ -161,7 +161,7 @@ static int bl_update_status(struct backlight_device *b)
40487 return set_lcd_level(b->props.brightness);
40488 }
40489
40490 -static struct backlight_ops msibl_ops = {
40491 +static const struct backlight_ops msibl_ops = {
40492 .get_brightness = bl_get_brightness,
40493 .update_status = bl_update_status,
40494 };
40495 diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
40496 index fe7cf01..9012d8d 100644
40497 --- a/drivers/platform/x86/panasonic-laptop.c
40498 +++ b/drivers/platform/x86/panasonic-laptop.c
40499 @@ -352,7 +352,7 @@ static int bl_set_status(struct backlight_device *bd)
40500 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
40501 }
40502
40503 -static struct backlight_ops pcc_backlight_ops = {
40504 +static const struct backlight_ops pcc_backlight_ops = {
40505 .get_brightness = bl_get,
40506 .update_status = bl_set_status,
40507 };
40508 diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
40509 index a2a742c..b37e25e 100644
40510 --- a/drivers/platform/x86/sony-laptop.c
40511 +++ b/drivers/platform/x86/sony-laptop.c
40512 @@ -850,7 +850,7 @@ static int sony_backlight_get_brightness(struct backlight_device *bd)
40513 }
40514
40515 static struct backlight_device *sony_backlight_device;
40516 -static struct backlight_ops sony_backlight_ops = {
40517 +static const struct backlight_ops sony_backlight_ops = {
40518 .update_status = sony_backlight_update_status,
40519 .get_brightness = sony_backlight_get_brightness,
40520 };
40521 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
40522 index 68271ae..5e8fb10 100644
40523 --- a/drivers/platform/x86/thinkpad_acpi.c
40524 +++ b/drivers/platform/x86/thinkpad_acpi.c
40525 @@ -2139,7 +2139,7 @@ static int hotkey_mask_get(void)
40526 return 0;
40527 }
40528
40529 -void static hotkey_mask_warn_incomplete_mask(void)
40530 +static void hotkey_mask_warn_incomplete_mask(void)
40531 {
40532 /* log only what the user can fix... */
40533 const u32 wantedmask = hotkey_driver_mask &
40534 @@ -6125,7 +6125,7 @@ static void tpacpi_brightness_notify_change(void)
40535 BACKLIGHT_UPDATE_HOTKEY);
40536 }
40537
40538 -static struct backlight_ops ibm_backlight_data = {
40539 +static const struct backlight_ops ibm_backlight_data = {
40540 .get_brightness = brightness_get,
40541 .update_status = brightness_update_status,
40542 };
40543 diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
40544 index 51c0a8b..0786629 100644
40545 --- a/drivers/platform/x86/toshiba_acpi.c
40546 +++ b/drivers/platform/x86/toshiba_acpi.c
40547 @@ -671,7 +671,7 @@ static acpi_status remove_device(void)
40548 return AE_OK;
40549 }
40550
40551 -static struct backlight_ops toshiba_backlight_data = {
40552 +static const struct backlight_ops toshiba_backlight_data = {
40553 .get_brightness = get_lcd,
40554 .update_status = set_lcd_status,
40555 };
40556 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
40557 index fc83783c..cf370d7 100644
40558 --- a/drivers/pnp/pnpbios/bioscalls.c
40559 +++ b/drivers/pnp/pnpbios/bioscalls.c
40560 @@ -60,7 +60,7 @@ do { \
40561 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
40562 } while(0)
40563
40564 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
40565 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
40566 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
40567
40568 /*
40569 @@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40570
40571 cpu = get_cpu();
40572 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
40573 +
40574 + pax_open_kernel();
40575 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
40576 + pax_close_kernel();
40577
40578 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
40579 spin_lock_irqsave(&pnp_bios_lock, flags);
40580 @@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40581 :"memory");
40582 spin_unlock_irqrestore(&pnp_bios_lock, flags);
40583
40584 + pax_open_kernel();
40585 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
40586 + pax_close_kernel();
40587 +
40588 put_cpu();
40589
40590 /* If we get here and this is set then the PnP BIOS faulted on us. */
40591 @@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
40592 return status;
40593 }
40594
40595 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
40596 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
40597 {
40598 int i;
40599
40600 @@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40601 pnp_bios_callpoint.offset = header->fields.pm16offset;
40602 pnp_bios_callpoint.segment = PNP_CS16;
40603
40604 + pax_open_kernel();
40605 +
40606 for_each_possible_cpu(i) {
40607 struct desc_struct *gdt = get_cpu_gdt_table(i);
40608 if (!gdt)
40609 @@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40610 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
40611 (unsigned long)__va(header->fields.pm16dseg));
40612 }
40613 +
40614 + pax_close_kernel();
40615 }
40616 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
40617 index ba97654..66b99d4 100644
40618 --- a/drivers/pnp/resource.c
40619 +++ b/drivers/pnp/resource.c
40620 @@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
40621 return 1;
40622
40623 /* check if the resource is valid */
40624 - if (*irq < 0 || *irq > 15)
40625 + if (*irq > 15)
40626 return 0;
40627
40628 /* check if the resource is reserved */
40629 @@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
40630 return 1;
40631
40632 /* check if the resource is valid */
40633 - if (*dma < 0 || *dma == 4 || *dma > 7)
40634 + if (*dma == 4 || *dma > 7)
40635 return 0;
40636
40637 /* check if the resource is reserved */
40638 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
40639 index 62bb981..24a2dc9 100644
40640 --- a/drivers/power/bq27x00_battery.c
40641 +++ b/drivers/power/bq27x00_battery.c
40642 @@ -44,7 +44,7 @@ struct bq27x00_device_info;
40643 struct bq27x00_access_methods {
40644 int (*read)(u8 reg, int *rt_value, int b_single,
40645 struct bq27x00_device_info *di);
40646 -};
40647 +} __no_const;
40648
40649 struct bq27x00_device_info {
40650 struct device *dev;
40651 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
40652 index 62227cd..b5b538b 100644
40653 --- a/drivers/rtc/rtc-dev.c
40654 +++ b/drivers/rtc/rtc-dev.c
40655 @@ -14,6 +14,7 @@
40656 #include <linux/module.h>
40657 #include <linux/rtc.h>
40658 #include <linux/sched.h>
40659 +#include <linux/grsecurity.h>
40660 #include "rtc-core.h"
40661
40662 static dev_t rtc_devt;
40663 @@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *file,
40664 if (copy_from_user(&tm, uarg, sizeof(tm)))
40665 return -EFAULT;
40666
40667 + gr_log_timechange();
40668 +
40669 return rtc_set_time(rtc, &tm);
40670
40671 case RTC_PIE_ON:
40672 diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
40673 index 968e3c7..fbc637a 100644
40674 --- a/drivers/s390/cio/qdio_perf.c
40675 +++ b/drivers/s390/cio/qdio_perf.c
40676 @@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_pde;
40677 static int qdio_perf_proc_show(struct seq_file *m, void *v)
40678 {
40679 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
40680 - (long)atomic_long_read(&perf_stats.qdio_int));
40681 + (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
40682 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
40683 - (long)atomic_long_read(&perf_stats.pci_int));
40684 + (long)atomic_long_read_unchecked(&perf_stats.pci_int));
40685 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
40686 - (long)atomic_long_read(&perf_stats.thin_int));
40687 + (long)atomic_long_read_unchecked(&perf_stats.thin_int));
40688 seq_printf(m, "\n");
40689 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
40690 - (long)atomic_long_read(&perf_stats.tasklet_inbound));
40691 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
40692 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
40693 - (long)atomic_long_read(&perf_stats.tasklet_outbound));
40694 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
40695 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
40696 - (long)atomic_long_read(&perf_stats.tasklet_thinint),
40697 - (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
40698 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
40699 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
40700 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
40701 - (long)atomic_long_read(&perf_stats.thinint_inbound),
40702 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
40703 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
40704 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
40705 seq_printf(m, "\n");
40706 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
40707 - (long)atomic_long_read(&perf_stats.siga_in));
40708 + (long)atomic_long_read_unchecked(&perf_stats.siga_in));
40709 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
40710 - (long)atomic_long_read(&perf_stats.siga_out));
40711 + (long)atomic_long_read_unchecked(&perf_stats.siga_out));
40712 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
40713 - (long)atomic_long_read(&perf_stats.siga_sync));
40714 + (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
40715 seq_printf(m, "\n");
40716 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
40717 - (long)atomic_long_read(&perf_stats.inbound_handler));
40718 + (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
40719 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
40720 - (long)atomic_long_read(&perf_stats.outbound_handler));
40721 + (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
40722 seq_printf(m, "\n");
40723 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
40724 - (long)atomic_long_read(&perf_stats.fast_requeue));
40725 + (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
40726 seq_printf(m, "Number of outbound target full condition\t: %li\n",
40727 - (long)atomic_long_read(&perf_stats.outbound_target_full));
40728 + (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
40729 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
40730 - (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
40731 + (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
40732 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
40733 - (long)atomic_long_read(&perf_stats.debug_stop_polling));
40734 + (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
40735 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
40736 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
40737 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
40738 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
40739 - (long)atomic_long_read(&perf_stats.debug_eqbs_all),
40740 - (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
40741 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
40742 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
40743 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
40744 - (long)atomic_long_read(&perf_stats.debug_sqbs_all),
40745 - (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
40746 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
40747 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
40748 seq_printf(m, "\n");
40749 return 0;
40750 }
40751 diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
40752 index ff4504c..b3604c3 100644
40753 --- a/drivers/s390/cio/qdio_perf.h
40754 +++ b/drivers/s390/cio/qdio_perf.h
40755 @@ -13,46 +13,46 @@
40756
40757 struct qdio_perf_stats {
40758 /* interrupt handler calls */
40759 - atomic_long_t qdio_int;
40760 - atomic_long_t pci_int;
40761 - atomic_long_t thin_int;
40762 + atomic_long_unchecked_t qdio_int;
40763 + atomic_long_unchecked_t pci_int;
40764 + atomic_long_unchecked_t thin_int;
40765
40766 /* tasklet runs */
40767 - atomic_long_t tasklet_inbound;
40768 - atomic_long_t tasklet_outbound;
40769 - atomic_long_t tasklet_thinint;
40770 - atomic_long_t tasklet_thinint_loop;
40771 - atomic_long_t thinint_inbound;
40772 - atomic_long_t thinint_inbound_loop;
40773 - atomic_long_t thinint_inbound_loop2;
40774 + atomic_long_unchecked_t tasklet_inbound;
40775 + atomic_long_unchecked_t tasklet_outbound;
40776 + atomic_long_unchecked_t tasklet_thinint;
40777 + atomic_long_unchecked_t tasklet_thinint_loop;
40778 + atomic_long_unchecked_t thinint_inbound;
40779 + atomic_long_unchecked_t thinint_inbound_loop;
40780 + atomic_long_unchecked_t thinint_inbound_loop2;
40781
40782 /* signal adapter calls */
40783 - atomic_long_t siga_out;
40784 - atomic_long_t siga_in;
40785 - atomic_long_t siga_sync;
40786 + atomic_long_unchecked_t siga_out;
40787 + atomic_long_unchecked_t siga_in;
40788 + atomic_long_unchecked_t siga_sync;
40789
40790 /* misc */
40791 - atomic_long_t inbound_handler;
40792 - atomic_long_t outbound_handler;
40793 - atomic_long_t fast_requeue;
40794 - atomic_long_t outbound_target_full;
40795 + atomic_long_unchecked_t inbound_handler;
40796 + atomic_long_unchecked_t outbound_handler;
40797 + atomic_long_unchecked_t fast_requeue;
40798 + atomic_long_unchecked_t outbound_target_full;
40799
40800 /* for debugging */
40801 - atomic_long_t debug_tl_out_timer;
40802 - atomic_long_t debug_stop_polling;
40803 - atomic_long_t debug_eqbs_all;
40804 - atomic_long_t debug_eqbs_incomplete;
40805 - atomic_long_t debug_sqbs_all;
40806 - atomic_long_t debug_sqbs_incomplete;
40807 + atomic_long_unchecked_t debug_tl_out_timer;
40808 + atomic_long_unchecked_t debug_stop_polling;
40809 + atomic_long_unchecked_t debug_eqbs_all;
40810 + atomic_long_unchecked_t debug_eqbs_incomplete;
40811 + atomic_long_unchecked_t debug_sqbs_all;
40812 + atomic_long_unchecked_t debug_sqbs_incomplete;
40813 };
40814
40815 extern struct qdio_perf_stats perf_stats;
40816 extern int qdio_performance_stats;
40817
40818 -static inline void qdio_perf_stat_inc(atomic_long_t *count)
40819 +static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
40820 {
40821 if (qdio_performance_stats)
40822 - atomic_long_inc(count);
40823 + atomic_long_inc_unchecked(count);
40824 }
40825
40826 int qdio_setup_perf_stats(void);
40827 diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
40828 index 1ddcf40..a85f062 100644
40829 --- a/drivers/scsi/BusLogic.c
40830 +++ b/drivers/scsi/BusLogic.c
40831 @@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
40832 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
40833 *PrototypeHostAdapter)
40834 {
40835 + pax_track_stack();
40836 +
40837 /*
40838 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
40839 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
40840 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
40841 index cdbdec9..b7d560b 100644
40842 --- a/drivers/scsi/aacraid/aacraid.h
40843 +++ b/drivers/scsi/aacraid/aacraid.h
40844 @@ -471,7 +471,7 @@ struct adapter_ops
40845 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
40846 /* Administrative operations */
40847 int (*adapter_comm)(struct aac_dev * dev, int comm);
40848 -};
40849 +} __no_const;
40850
40851 /*
40852 * Define which interrupt handler needs to be installed
40853 diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
40854 index a5b8e7b..a6a0e43 100644
40855 --- a/drivers/scsi/aacraid/commctrl.c
40856 +++ b/drivers/scsi/aacraid/commctrl.c
40857 @@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
40858 u32 actual_fibsize64, actual_fibsize = 0;
40859 int i;
40860
40861 + pax_track_stack();
40862
40863 if (dev->in_reset) {
40864 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
40865 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
40866 index 9b97c3e..f099725 100644
40867 --- a/drivers/scsi/aacraid/linit.c
40868 +++ b/drivers/scsi/aacraid/linit.c
40869 @@ -91,7 +91,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
40870 #elif defined(__devinitconst)
40871 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40872 #else
40873 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
40874 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40875 #endif
40876 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
40877 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
40878 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
40879 index 996f722..9127845 100644
40880 --- a/drivers/scsi/aic94xx/aic94xx_init.c
40881 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
40882 @@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
40883 flash_error_table[i].reason);
40884 }
40885
40886 -static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
40887 +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
40888 asd_show_update_bios, asd_store_update_bios);
40889
40890 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
40891 @@ -1011,7 +1011,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
40892 .lldd_control_phy = asd_control_phy,
40893 };
40894
40895 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
40896 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
40897 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
40898 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
40899 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
40900 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
40901 index 58efd4b..cb48dc7 100644
40902 --- a/drivers/scsi/bfa/bfa_ioc.h
40903 +++ b/drivers/scsi/bfa/bfa_ioc.h
40904 @@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
40905 bfa_ioc_disable_cbfn_t disable_cbfn;
40906 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
40907 bfa_ioc_reset_cbfn_t reset_cbfn;
40908 -};
40909 +} __no_const;
40910
40911 /**
40912 * Heartbeat failure notification queue element.
40913 diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
40914 index 7ad177e..5503586 100644
40915 --- a/drivers/scsi/bfa/bfa_iocfc.h
40916 +++ b/drivers/scsi/bfa/bfa_iocfc.h
40917 @@ -61,7 +61,7 @@ struct bfa_hwif_s {
40918 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
40919 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
40920 u32 *nvecs, u32 *maxvec);
40921 -};
40922 +} __no_const;
40923 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
40924
40925 struct bfa_iocfc_s {
40926 diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
40927 index 4967643..cbec06b 100644
40928 --- a/drivers/scsi/dpt_i2o.c
40929 +++ b/drivers/scsi/dpt_i2o.c
40930 @@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
40931 dma_addr_t addr;
40932 ulong flags = 0;
40933
40934 + pax_track_stack();
40935 +
40936 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
40937 // get user msg size in u32s
40938 if(get_user(size, &user_msg[0])){
40939 @@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
40940 s32 rcode;
40941 dma_addr_t addr;
40942
40943 + pax_track_stack();
40944 +
40945 memset(msg, 0 , sizeof(msg));
40946 len = scsi_bufflen(cmd);
40947 direction = 0x00000000;
40948 diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
40949 index c7076ce..e20c67c 100644
40950 --- a/drivers/scsi/eata.c
40951 +++ b/drivers/scsi/eata.c
40952 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long port_base, unsigned int j,
40953 struct hostdata *ha;
40954 char name[16];
40955
40956 + pax_track_stack();
40957 +
40958 sprintf(name, "%s%d", driver_name, j);
40959
40960 if (!request_region(port_base, REGION_SIZE, driver_name)) {
40961 diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
40962 index 11ae5c9..891daec 100644
40963 --- a/drivers/scsi/fcoe/libfcoe.c
40964 +++ b/drivers/scsi/fcoe/libfcoe.c
40965 @@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
40966 size_t rlen;
40967 size_t dlen;
40968
40969 + pax_track_stack();
40970 +
40971 fiph = (struct fip_header *)skb->data;
40972 sub = fiph->fip_subcode;
40973 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
40974 diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
40975 index 71c7bbe..e93088a 100644
40976 --- a/drivers/scsi/fnic/fnic_main.c
40977 +++ b/drivers/scsi/fnic/fnic_main.c
40978 @@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
40979 /* Start local port initiatialization */
40980
40981 lp->link_up = 0;
40982 - lp->tt = fnic_transport_template;
40983 + memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
40984
40985 lp->max_retry_count = fnic->config.flogi_retries;
40986 lp->max_rport_retry_count = fnic->config.plogi_retries;
40987 diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
40988 index bb96d74..9ec3ce4 100644
40989 --- a/drivers/scsi/gdth.c
40990 +++ b/drivers/scsi/gdth.c
40991 @@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
40992 ulong flags;
40993 gdth_ha_str *ha;
40994
40995 + pax_track_stack();
40996 +
40997 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
40998 return -EFAULT;
40999 ha = gdth_find_ha(ldrv.ionode);
41000 @@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg, char *cmnd)
41001 gdth_ha_str *ha;
41002 int rval;
41003
41004 + pax_track_stack();
41005 +
41006 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
41007 res.number >= MAX_HDRIVES)
41008 return -EFAULT;
41009 @@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg, char *cmnd)
41010 gdth_ha_str *ha;
41011 int rval;
41012
41013 + pax_track_stack();
41014 +
41015 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
41016 return -EFAULT;
41017 ha = gdth_find_ha(gen.ionode);
41018 @@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
41019 int i;
41020 gdth_cmd_str gdtcmd;
41021 char cmnd[MAX_COMMAND_SIZE];
41022 +
41023 + pax_track_stack();
41024 +
41025 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
41026
41027 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
41028 diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
41029 index 1258da3..20d8ae6 100644
41030 --- a/drivers/scsi/gdth_proc.c
41031 +++ b/drivers/scsi/gdth_proc.c
41032 @@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
41033 ulong64 paddr;
41034
41035 char cmnd[MAX_COMMAND_SIZE];
41036 +
41037 + pax_track_stack();
41038 +
41039 memset(cmnd, 0xff, 12);
41040 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
41041
41042 @@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
41043 gdth_hget_str *phg;
41044 char cmnd[MAX_COMMAND_SIZE];
41045
41046 + pax_track_stack();
41047 +
41048 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
41049 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
41050 if (!gdtcmd || !estr)
41051 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
41052 index d03a926..f324286 100644
41053 --- a/drivers/scsi/hosts.c
41054 +++ b/drivers/scsi/hosts.c
41055 @@ -40,7 +40,7 @@
41056 #include "scsi_logging.h"
41057
41058
41059 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
41060 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
41061
41062
41063 static void scsi_host_cls_release(struct device *dev)
41064 @@ -347,7 +347,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
41065 * subtract one because we increment first then return, but we need to
41066 * know what the next host number was before increment
41067 */
41068 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
41069 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
41070 shost->dma_channel = 0xff;
41071
41072 /* These three are default values which can be overridden */
41073 diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
41074 index a601159..55e19d2 100644
41075 --- a/drivers/scsi/ipr.c
41076 +++ b/drivers/scsi/ipr.c
41077 @@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
41078 return true;
41079 }
41080
41081 -static struct ata_port_operations ipr_sata_ops = {
41082 +static const struct ata_port_operations ipr_sata_ops = {
41083 .phy_reset = ipr_ata_phy_reset,
41084 .hardreset = ipr_sata_reset,
41085 .post_internal_cmd = ipr_ata_post_internal,
41086 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
41087 index 4e49fbc..97907ff 100644
41088 --- a/drivers/scsi/ips.h
41089 +++ b/drivers/scsi/ips.h
41090 @@ -1027,7 +1027,7 @@ typedef struct {
41091 int (*intr)(struct ips_ha *);
41092 void (*enableint)(struct ips_ha *);
41093 uint32_t (*statupd)(struct ips_ha *);
41094 -} ips_hw_func_t;
41095 +} __no_const ips_hw_func_t;
41096
41097 typedef struct ips_ha {
41098 uint8_t ha_id[IPS_MAX_CHANNELS+1];
41099 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
41100 index c1c1574..a9c9348 100644
41101 --- a/drivers/scsi/libfc/fc_exch.c
41102 +++ b/drivers/scsi/libfc/fc_exch.c
41103 @@ -86,12 +86,12 @@ struct fc_exch_mgr {
41104 * all together if not used XXX
41105 */
41106 struct {
41107 - atomic_t no_free_exch;
41108 - atomic_t no_free_exch_xid;
41109 - atomic_t xid_not_found;
41110 - atomic_t xid_busy;
41111 - atomic_t seq_not_found;
41112 - atomic_t non_bls_resp;
41113 + atomic_unchecked_t no_free_exch;
41114 + atomic_unchecked_t no_free_exch_xid;
41115 + atomic_unchecked_t xid_not_found;
41116 + atomic_unchecked_t xid_busy;
41117 + atomic_unchecked_t seq_not_found;
41118 + atomic_unchecked_t non_bls_resp;
41119 } stats;
41120 };
41121 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
41122 @@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
41123 /* allocate memory for exchange */
41124 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
41125 if (!ep) {
41126 - atomic_inc(&mp->stats.no_free_exch);
41127 + atomic_inc_unchecked(&mp->stats.no_free_exch);
41128 goto out;
41129 }
41130 memset(ep, 0, sizeof(*ep));
41131 @@ -557,7 +557,7 @@ out:
41132 return ep;
41133 err:
41134 spin_unlock_bh(&pool->lock);
41135 - atomic_inc(&mp->stats.no_free_exch_xid);
41136 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
41137 mempool_free(ep, mp->ep_pool);
41138 return NULL;
41139 }
41140 @@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41141 xid = ntohs(fh->fh_ox_id); /* we originated exch */
41142 ep = fc_exch_find(mp, xid);
41143 if (!ep) {
41144 - atomic_inc(&mp->stats.xid_not_found);
41145 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41146 reject = FC_RJT_OX_ID;
41147 goto out;
41148 }
41149 @@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41150 ep = fc_exch_find(mp, xid);
41151 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
41152 if (ep) {
41153 - atomic_inc(&mp->stats.xid_busy);
41154 + atomic_inc_unchecked(&mp->stats.xid_busy);
41155 reject = FC_RJT_RX_ID;
41156 goto rel;
41157 }
41158 @@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41159 }
41160 xid = ep->xid; /* get our XID */
41161 } else if (!ep) {
41162 - atomic_inc(&mp->stats.xid_not_found);
41163 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41164 reject = FC_RJT_RX_ID; /* XID not found */
41165 goto out;
41166 }
41167 @@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41168 } else {
41169 sp = &ep->seq;
41170 if (sp->id != fh->fh_seq_id) {
41171 - atomic_inc(&mp->stats.seq_not_found);
41172 + atomic_inc_unchecked(&mp->stats.seq_not_found);
41173 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
41174 goto rel;
41175 }
41176 @@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41177
41178 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
41179 if (!ep) {
41180 - atomic_inc(&mp->stats.xid_not_found);
41181 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41182 goto out;
41183 }
41184 if (ep->esb_stat & ESB_ST_COMPLETE) {
41185 - atomic_inc(&mp->stats.xid_not_found);
41186 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41187 goto out;
41188 }
41189 if (ep->rxid == FC_XID_UNKNOWN)
41190 ep->rxid = ntohs(fh->fh_rx_id);
41191 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
41192 - atomic_inc(&mp->stats.xid_not_found);
41193 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41194 goto rel;
41195 }
41196 if (ep->did != ntoh24(fh->fh_s_id) &&
41197 ep->did != FC_FID_FLOGI) {
41198 - atomic_inc(&mp->stats.xid_not_found);
41199 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41200 goto rel;
41201 }
41202 sof = fr_sof(fp);
41203 @@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41204 } else {
41205 sp = &ep->seq;
41206 if (sp->id != fh->fh_seq_id) {
41207 - atomic_inc(&mp->stats.seq_not_found);
41208 + atomic_inc_unchecked(&mp->stats.seq_not_found);
41209 goto rel;
41210 }
41211 }
41212 @@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41213 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
41214
41215 if (!sp)
41216 - atomic_inc(&mp->stats.xid_not_found);
41217 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41218 else
41219 - atomic_inc(&mp->stats.non_bls_resp);
41220 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
41221
41222 fc_frame_free(fp);
41223 }
41224 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
41225 index 0ee989f..a582241 100644
41226 --- a/drivers/scsi/libsas/sas_ata.c
41227 +++ b/drivers/scsi/libsas/sas_ata.c
41228 @@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
41229 }
41230 }
41231
41232 -static struct ata_port_operations sas_sata_ops = {
41233 +static const struct ata_port_operations sas_sata_ops = {
41234 .phy_reset = sas_ata_phy_reset,
41235 .post_internal_cmd = sas_ata_post_internal,
41236 .qc_defer = ata_std_qc_defer,
41237 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
41238 index aa10f79..5cc79e4 100644
41239 --- a/drivers/scsi/lpfc/lpfc.h
41240 +++ b/drivers/scsi/lpfc/lpfc.h
41241 @@ -400,7 +400,7 @@ struct lpfc_vport {
41242 struct dentry *debug_nodelist;
41243 struct dentry *vport_debugfs_root;
41244 struct lpfc_debugfs_trc *disc_trc;
41245 - atomic_t disc_trc_cnt;
41246 + atomic_unchecked_t disc_trc_cnt;
41247 #endif
41248 uint8_t stat_data_enabled;
41249 uint8_t stat_data_blocked;
41250 @@ -725,8 +725,8 @@ struct lpfc_hba {
41251 struct timer_list fabric_block_timer;
41252 unsigned long bit_flags;
41253 #define FABRIC_COMANDS_BLOCKED 0
41254 - atomic_t num_rsrc_err;
41255 - atomic_t num_cmd_success;
41256 + atomic_unchecked_t num_rsrc_err;
41257 + atomic_unchecked_t num_cmd_success;
41258 unsigned long last_rsrc_error_time;
41259 unsigned long last_ramp_down_time;
41260 unsigned long last_ramp_up_time;
41261 @@ -740,7 +740,7 @@ struct lpfc_hba {
41262 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
41263 struct dentry *debug_slow_ring_trc;
41264 struct lpfc_debugfs_trc *slow_ring_trc;
41265 - atomic_t slow_ring_trc_cnt;
41266 + atomic_unchecked_t slow_ring_trc_cnt;
41267 #endif
41268
41269 /* Used for deferred freeing of ELS data buffers */
41270 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
41271 index 8d0f0de..7c77a62 100644
41272 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
41273 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
41274 @@ -124,7 +124,7 @@ struct lpfc_debug {
41275 int len;
41276 };
41277
41278 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
41279 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
41280 static unsigned long lpfc_debugfs_start_time = 0L;
41281
41282 /**
41283 @@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
41284 lpfc_debugfs_enable = 0;
41285
41286 len = 0;
41287 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
41288 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
41289 (lpfc_debugfs_max_disc_trc - 1);
41290 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
41291 dtp = vport->disc_trc + i;
41292 @@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
41293 lpfc_debugfs_enable = 0;
41294
41295 len = 0;
41296 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
41297 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
41298 (lpfc_debugfs_max_slow_ring_trc - 1);
41299 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
41300 dtp = phba->slow_ring_trc + i;
41301 @@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
41302 uint32_t *ptr;
41303 char buffer[1024];
41304
41305 + pax_track_stack();
41306 +
41307 off = 0;
41308 spin_lock_irq(&phba->hbalock);
41309
41310 @@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
41311 !vport || !vport->disc_trc)
41312 return;
41313
41314 - index = atomic_inc_return(&vport->disc_trc_cnt) &
41315 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
41316 (lpfc_debugfs_max_disc_trc - 1);
41317 dtp = vport->disc_trc + index;
41318 dtp->fmt = fmt;
41319 dtp->data1 = data1;
41320 dtp->data2 = data2;
41321 dtp->data3 = data3;
41322 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41323 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41324 dtp->jif = jiffies;
41325 #endif
41326 return;
41327 @@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
41328 !phba || !phba->slow_ring_trc)
41329 return;
41330
41331 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
41332 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
41333 (lpfc_debugfs_max_slow_ring_trc - 1);
41334 dtp = phba->slow_ring_trc + index;
41335 dtp->fmt = fmt;
41336 dtp->data1 = data1;
41337 dtp->data2 = data2;
41338 dtp->data3 = data3;
41339 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41340 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41341 dtp->jif = jiffies;
41342 #endif
41343 return;
41344 @@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41345 "slow_ring buffer\n");
41346 goto debug_failed;
41347 }
41348 - atomic_set(&phba->slow_ring_trc_cnt, 0);
41349 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
41350 memset(phba->slow_ring_trc, 0,
41351 (sizeof(struct lpfc_debugfs_trc) *
41352 lpfc_debugfs_max_slow_ring_trc));
41353 @@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41354 "buffer\n");
41355 goto debug_failed;
41356 }
41357 - atomic_set(&vport->disc_trc_cnt, 0);
41358 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
41359
41360 snprintf(name, sizeof(name), "discovery_trace");
41361 vport->debug_disc_trc =
41362 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
41363 index 549bc7d..8189dbb 100644
41364 --- a/drivers/scsi/lpfc/lpfc_init.c
41365 +++ b/drivers/scsi/lpfc/lpfc_init.c
41366 @@ -8021,8 +8021,10 @@ lpfc_init(void)
41367 printk(LPFC_COPYRIGHT "\n");
41368
41369 if (lpfc_enable_npiv) {
41370 - lpfc_transport_functions.vport_create = lpfc_vport_create;
41371 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41372 + pax_open_kernel();
41373 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
41374 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41375 + pax_close_kernel();
41376 }
41377 lpfc_transport_template =
41378 fc_attach_transport(&lpfc_transport_functions);
41379 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
41380 index c88f59f..ff2a42f 100644
41381 --- a/drivers/scsi/lpfc/lpfc_scsi.c
41382 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
41383 @@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
41384 uint32_t evt_posted;
41385
41386 spin_lock_irqsave(&phba->hbalock, flags);
41387 - atomic_inc(&phba->num_rsrc_err);
41388 + atomic_inc_unchecked(&phba->num_rsrc_err);
41389 phba->last_rsrc_error_time = jiffies;
41390
41391 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
41392 @@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
41393 unsigned long flags;
41394 struct lpfc_hba *phba = vport->phba;
41395 uint32_t evt_posted;
41396 - atomic_inc(&phba->num_cmd_success);
41397 + atomic_inc_unchecked(&phba->num_cmd_success);
41398
41399 if (vport->cfg_lun_queue_depth <= queue_depth)
41400 return;
41401 @@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41402 int i;
41403 struct lpfc_rport_data *rdata;
41404
41405 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
41406 - num_cmd_success = atomic_read(&phba->num_cmd_success);
41407 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
41408 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
41409
41410 vports = lpfc_create_vport_work_array(phba);
41411 if (vports != NULL)
41412 @@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41413 }
41414 }
41415 lpfc_destroy_vport_work_array(phba, vports);
41416 - atomic_set(&phba->num_rsrc_err, 0);
41417 - atomic_set(&phba->num_cmd_success, 0);
41418 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
41419 + atomic_set_unchecked(&phba->num_cmd_success, 0);
41420 }
41421
41422 /**
41423 @@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
41424 }
41425 }
41426 lpfc_destroy_vport_work_array(phba, vports);
41427 - atomic_set(&phba->num_rsrc_err, 0);
41428 - atomic_set(&phba->num_cmd_success, 0);
41429 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
41430 + atomic_set_unchecked(&phba->num_cmd_success, 0);
41431 }
41432
41433 /**
41434 diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
41435 index 234f0b7..3020aea 100644
41436 --- a/drivers/scsi/megaraid/megaraid_mbox.c
41437 +++ b/drivers/scsi/megaraid/megaraid_mbox.c
41438 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter)
41439 int rval;
41440 int i;
41441
41442 + pax_track_stack();
41443 +
41444 // Allocate memory for the base list of scb for management module.
41445 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
41446
41447 diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
41448 index 7a117c1..ee01e9e 100644
41449 --- a/drivers/scsi/osd/osd_initiator.c
41450 +++ b/drivers/scsi/osd/osd_initiator.c
41451 @@ -94,6 +94,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
41452 int nelem = ARRAY_SIZE(get_attrs), a = 0;
41453 int ret;
41454
41455 + pax_track_stack();
41456 +
41457 or = osd_start_request(od, GFP_KERNEL);
41458 if (!or)
41459 return -ENOMEM;
41460 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
41461 index 9ab8c86..9425ad3 100644
41462 --- a/drivers/scsi/pmcraid.c
41463 +++ b/drivers/scsi/pmcraid.c
41464 @@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
41465 res->scsi_dev = scsi_dev;
41466 scsi_dev->hostdata = res;
41467 res->change_detected = 0;
41468 - atomic_set(&res->read_failures, 0);
41469 - atomic_set(&res->write_failures, 0);
41470 + atomic_set_unchecked(&res->read_failures, 0);
41471 + atomic_set_unchecked(&res->write_failures, 0);
41472 rc = 0;
41473 }
41474 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
41475 @@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
41476
41477 /* If this was a SCSI read/write command keep count of errors */
41478 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
41479 - atomic_inc(&res->read_failures);
41480 + atomic_inc_unchecked(&res->read_failures);
41481 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
41482 - atomic_inc(&res->write_failures);
41483 + atomic_inc_unchecked(&res->write_failures);
41484
41485 if (!RES_IS_GSCSI(res->cfg_entry) &&
41486 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
41487 @@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
41488
41489 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
41490 /* add resources only after host is added into system */
41491 - if (!atomic_read(&pinstance->expose_resources))
41492 + if (!atomic_read_unchecked(&pinstance->expose_resources))
41493 return;
41494
41495 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
41496 @@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instance(
41497 init_waitqueue_head(&pinstance->reset_wait_q);
41498
41499 atomic_set(&pinstance->outstanding_cmds, 0);
41500 - atomic_set(&pinstance->expose_resources, 0);
41501 + atomic_set_unchecked(&pinstance->expose_resources, 0);
41502
41503 INIT_LIST_HEAD(&pinstance->free_res_q);
41504 INIT_LIST_HEAD(&pinstance->used_res_q);
41505 @@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
41506 /* Schedule worker thread to handle CCN and take care of adding and
41507 * removing devices to OS
41508 */
41509 - atomic_set(&pinstance->expose_resources, 1);
41510 + atomic_set_unchecked(&pinstance->expose_resources, 1);
41511 schedule_work(&pinstance->worker_q);
41512 return rc;
41513
41514 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
41515 index 3441b3f..6cbe8f7 100644
41516 --- a/drivers/scsi/pmcraid.h
41517 +++ b/drivers/scsi/pmcraid.h
41518 @@ -690,7 +690,7 @@ struct pmcraid_instance {
41519 atomic_t outstanding_cmds;
41520
41521 /* should add/delete resources to mid-layer now ?*/
41522 - atomic_t expose_resources;
41523 + atomic_unchecked_t expose_resources;
41524
41525 /* Tasklet to handle deferred processing */
41526 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
41527 @@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
41528 struct list_head queue; /* link to "to be exposed" resources */
41529 struct pmcraid_config_table_entry cfg_entry;
41530 struct scsi_device *scsi_dev; /* Link scsi_device structure */
41531 - atomic_t read_failures; /* count of failed READ commands */
41532 - atomic_t write_failures; /* count of failed WRITE commands */
41533 + atomic_unchecked_t read_failures; /* count of failed READ commands */
41534 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
41535
41536 /* To indicate add/delete/modify during CCN */
41537 u8 change_detected;
41538 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
41539 index 2150618..7034215 100644
41540 --- a/drivers/scsi/qla2xxx/qla_def.h
41541 +++ b/drivers/scsi/qla2xxx/qla_def.h
41542 @@ -2089,7 +2089,7 @@ struct isp_operations {
41543
41544 int (*get_flash_version) (struct scsi_qla_host *, void *);
41545 int (*start_scsi) (srb_t *);
41546 -};
41547 +} __no_const;
41548
41549 /* MSI-X Support *************************************************************/
41550
41551 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
41552 index 81b5f29..2ae1fad 100644
41553 --- a/drivers/scsi/qla4xxx/ql4_def.h
41554 +++ b/drivers/scsi/qla4xxx/ql4_def.h
41555 @@ -240,7 +240,7 @@ struct ddb_entry {
41556 atomic_t retry_relogin_timer; /* Min Time between relogins
41557 * (4000 only) */
41558 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
41559 - atomic_t relogin_retry_count; /* Num of times relogin has been
41560 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
41561 * retried */
41562
41563 uint16_t port;
41564 diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
41565 index af8c323..515dd51 100644
41566 --- a/drivers/scsi/qla4xxx/ql4_init.c
41567 +++ b/drivers/scsi/qla4xxx/ql4_init.c
41568 @@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
41569 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
41570 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
41571 atomic_set(&ddb_entry->relogin_timer, 0);
41572 - atomic_set(&ddb_entry->relogin_retry_count, 0);
41573 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41574 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41575 list_add_tail(&ddb_entry->list, &ha->ddb_list);
41576 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
41577 @@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
41578 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41579 atomic_set(&ddb_entry->port_down_timer,
41580 ha->port_down_retry_count);
41581 - atomic_set(&ddb_entry->relogin_retry_count, 0);
41582 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41583 atomic_set(&ddb_entry->relogin_timer, 0);
41584 clear_bit(DF_RELOGIN, &ddb_entry->flags);
41585 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
41586 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
41587 index 83c8b5e..a82b348 100644
41588 --- a/drivers/scsi/qla4xxx/ql4_os.c
41589 +++ b/drivers/scsi/qla4xxx/ql4_os.c
41590 @@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
41591 ddb_entry->fw_ddb_device_state ==
41592 DDB_DS_SESSION_FAILED) {
41593 /* Reset retry relogin timer */
41594 - atomic_inc(&ddb_entry->relogin_retry_count);
41595 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
41596 DEBUG2(printk("scsi%ld: index[%d] relogin"
41597 " timed out-retrying"
41598 " relogin (%d)\n",
41599 ha->host_no,
41600 ddb_entry->fw_ddb_index,
41601 - atomic_read(&ddb_entry->
41602 + atomic_read_unchecked(&ddb_entry->
41603 relogin_retry_count))
41604 );
41605 start_dpc++;
41606 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
41607 index dd098ca..686ce01 100644
41608 --- a/drivers/scsi/scsi.c
41609 +++ b/drivers/scsi/scsi.c
41610 @@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
41611 unsigned long timeout;
41612 int rtn = 0;
41613
41614 - atomic_inc(&cmd->device->iorequest_cnt);
41615 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41616
41617 /* check if the device is still usable */
41618 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
41619 diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
41620 index bc3e363..e1a8e50 100644
41621 --- a/drivers/scsi/scsi_debug.c
41622 +++ b/drivers/scsi/scsi_debug.c
41623 @@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
41624 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
41625 unsigned char *cmd = (unsigned char *)scp->cmnd;
41626
41627 + pax_track_stack();
41628 +
41629 if ((errsts = check_readiness(scp, 1, devip)))
41630 return errsts;
41631 memset(arr, 0, sizeof(arr));
41632 @@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cmnd * scp,
41633 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
41634 unsigned char *cmd = (unsigned char *)scp->cmnd;
41635
41636 + pax_track_stack();
41637 +
41638 if ((errsts = check_readiness(scp, 1, devip)))
41639 return errsts;
41640 memset(arr, 0, sizeof(arr));
41641 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
41642 index 8df12522..c4c1472 100644
41643 --- a/drivers/scsi/scsi_lib.c
41644 +++ b/drivers/scsi/scsi_lib.c
41645 @@ -1389,7 +1389,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
41646 shost = sdev->host;
41647 scsi_init_cmd_errh(cmd);
41648 cmd->result = DID_NO_CONNECT << 16;
41649 - atomic_inc(&cmd->device->iorequest_cnt);
41650 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41651
41652 /*
41653 * SCSI request completion path will do scsi_device_unbusy(),
41654 @@ -1420,9 +1420,9 @@ static void scsi_softirq_done(struct request *rq)
41655 */
41656 cmd->serial_number = 0;
41657
41658 - atomic_inc(&cmd->device->iodone_cnt);
41659 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
41660 if (cmd->result)
41661 - atomic_inc(&cmd->device->ioerr_cnt);
41662 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
41663
41664 disposition = scsi_decide_disposition(cmd);
41665 if (disposition != SUCCESS &&
41666 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
41667 index 91a93e0..eae0fe3 100644
41668 --- a/drivers/scsi/scsi_sysfs.c
41669 +++ b/drivers/scsi/scsi_sysfs.c
41670 @@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
41671 char *buf) \
41672 { \
41673 struct scsi_device *sdev = to_scsi_device(dev); \
41674 - unsigned long long count = atomic_read(&sdev->field); \
41675 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
41676 return snprintf(buf, 20, "0x%llx\n", count); \
41677 } \
41678 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
41679 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
41680 index 1030327..f91fd30 100644
41681 --- a/drivers/scsi/scsi_tgt_lib.c
41682 +++ b/drivers/scsi/scsi_tgt_lib.c
41683 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
41684 int err;
41685
41686 dprintk("%lx %u\n", uaddr, len);
41687 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
41688 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
41689 if (err) {
41690 /*
41691 * TODO: need to fixup sg_tablesize, max_segment_size,
41692 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
41693 index db02e31..1b42ea9 100644
41694 --- a/drivers/scsi/scsi_transport_fc.c
41695 +++ b/drivers/scsi/scsi_transport_fc.c
41696 @@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
41697 * Netlink Infrastructure
41698 */
41699
41700 -static atomic_t fc_event_seq;
41701 +static atomic_unchecked_t fc_event_seq;
41702
41703 /**
41704 * fc_get_event_number - Obtain the next sequential FC event number
41705 @@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
41706 u32
41707 fc_get_event_number(void)
41708 {
41709 - return atomic_add_return(1, &fc_event_seq);
41710 + return atomic_add_return_unchecked(1, &fc_event_seq);
41711 }
41712 EXPORT_SYMBOL(fc_get_event_number);
41713
41714 @@ -641,7 +641,7 @@ static __init int fc_transport_init(void)
41715 {
41716 int error;
41717
41718 - atomic_set(&fc_event_seq, 0);
41719 + atomic_set_unchecked(&fc_event_seq, 0);
41720
41721 error = transport_class_register(&fc_host_class);
41722 if (error)
41723 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
41724 index de2f8c4..63c5278 100644
41725 --- a/drivers/scsi/scsi_transport_iscsi.c
41726 +++ b/drivers/scsi/scsi_transport_iscsi.c
41727 @@ -81,7 +81,7 @@ struct iscsi_internal {
41728 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
41729 };
41730
41731 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
41732 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
41733 static struct workqueue_struct *iscsi_eh_timer_workq;
41734
41735 /*
41736 @@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
41737 int err;
41738
41739 ihost = shost->shost_data;
41740 - session->sid = atomic_add_return(1, &iscsi_session_nr);
41741 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
41742
41743 if (id == ISCSI_MAX_TARGET) {
41744 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
41745 @@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(void)
41746 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
41747 ISCSI_TRANSPORT_VERSION);
41748
41749 - atomic_set(&iscsi_session_nr, 0);
41750 + atomic_set_unchecked(&iscsi_session_nr, 0);
41751
41752 err = class_register(&iscsi_transport_class);
41753 if (err)
41754 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
41755 index 21a045e..ec89e03 100644
41756 --- a/drivers/scsi/scsi_transport_srp.c
41757 +++ b/drivers/scsi/scsi_transport_srp.c
41758 @@ -33,7 +33,7 @@
41759 #include "scsi_transport_srp_internal.h"
41760
41761 struct srp_host_attrs {
41762 - atomic_t next_port_id;
41763 + atomic_unchecked_t next_port_id;
41764 };
41765 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
41766
41767 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
41768 struct Scsi_Host *shost = dev_to_shost(dev);
41769 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
41770
41771 - atomic_set(&srp_host->next_port_id, 0);
41772 + atomic_set_unchecked(&srp_host->next_port_id, 0);
41773 return 0;
41774 }
41775
41776 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
41777 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
41778 rport->roles = ids->roles;
41779
41780 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
41781 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
41782 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
41783
41784 transport_setup_device(&rport->dev);
41785 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
41786 index 040f751..98a5ed2 100644
41787 --- a/drivers/scsi/sg.c
41788 +++ b/drivers/scsi/sg.c
41789 @@ -1064,7 +1064,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
41790 sdp->disk->disk_name,
41791 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
41792 NULL,
41793 - (char *)arg);
41794 + (char __user *)arg);
41795 case BLKTRACESTART:
41796 return blk_trace_startstop(sdp->device->request_queue, 1);
41797 case BLKTRACESTOP:
41798 @@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
41799 const struct file_operations * fops;
41800 };
41801
41802 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
41803 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
41804 {"allow_dio", &adio_fops},
41805 {"debug", &debug_fops},
41806 {"def_reserved_size", &dressz_fops},
41807 @@ -2307,7 +2307,7 @@ sg_proc_init(void)
41808 {
41809 int k, mask;
41810 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
41811 - struct sg_proc_leaf * leaf;
41812 + const struct sg_proc_leaf * leaf;
41813
41814 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
41815 if (!sg_proc_sgp)
41816 diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
41817 index c19ca5e..3eb5959 100644
41818 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c
41819 +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
41820 @@ -1758,6 +1758,8 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
41821 int do_iounmap = 0;
41822 int do_disable_device = 1;
41823
41824 + pax_track_stack();
41825 +
41826 memset(&sym_dev, 0, sizeof(sym_dev));
41827 memset(&nvram, 0, sizeof(nvram));
41828 sym_dev.pdev = pdev;
41829 diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
41830 index eadc1ab..2d81457 100644
41831 --- a/drivers/serial/kgdboc.c
41832 +++ b/drivers/serial/kgdboc.c
41833 @@ -18,7 +18,7 @@
41834
41835 #define MAX_CONFIG_LEN 40
41836
41837 -static struct kgdb_io kgdboc_io_ops;
41838 +static const struct kgdb_io kgdboc_io_ops;
41839
41840 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
41841 static int configured = -1;
41842 @@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void)
41843 module_put(THIS_MODULE);
41844 }
41845
41846 -static struct kgdb_io kgdboc_io_ops = {
41847 +static const struct kgdb_io kgdboc_io_ops = {
41848 .name = "kgdboc",
41849 .read_char = kgdboc_get_char,
41850 .write_char = kgdboc_put_char,
41851 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
41852 index b76f246..7f41af7 100644
41853 --- a/drivers/spi/spi.c
41854 +++ b/drivers/spi/spi.c
41855 @@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
41856 EXPORT_SYMBOL_GPL(spi_sync);
41857
41858 /* portable code must never pass more than 32 bytes */
41859 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
41860 +#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
41861
41862 static u8 *buf;
41863
41864 diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
41865 index b9b37ff..19dfa23 100644
41866 --- a/drivers/staging/android/binder.c
41867 +++ b/drivers/staging/android/binder.c
41868 @@ -2761,7 +2761,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
41869 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
41870 }
41871
41872 -static struct vm_operations_struct binder_vm_ops = {
41873 +static const struct vm_operations_struct binder_vm_ops = {
41874 .open = binder_vma_open,
41875 .close = binder_vma_close,
41876 };
41877 diff --git a/drivers/staging/b3dfg/b3dfg.c b/drivers/staging/b3dfg/b3dfg.c
41878 index cda26bb..39fed3f 100644
41879 --- a/drivers/staging/b3dfg/b3dfg.c
41880 +++ b/drivers/staging/b3dfg/b3dfg.c
41881 @@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_area_struct *vma,
41882 return VM_FAULT_NOPAGE;
41883 }
41884
41885 -static struct vm_operations_struct b3dfg_vm_ops = {
41886 +static const struct vm_operations_struct b3dfg_vm_ops = {
41887 .fault = b3dfg_vma_fault,
41888 };
41889
41890 @@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp, struct vm_area_struct *vma)
41891 return r;
41892 }
41893
41894 -static struct file_operations b3dfg_fops = {
41895 +static const struct file_operations b3dfg_fops = {
41896 .owner = THIS_MODULE,
41897 .open = b3dfg_open,
41898 .release = b3dfg_release,
41899 diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
41900 index 908f25a..c9a579b 100644
41901 --- a/drivers/staging/comedi/comedi_fops.c
41902 +++ b/drivers/staging/comedi/comedi_fops.c
41903 @@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct *area)
41904 mutex_unlock(&dev->mutex);
41905 }
41906
41907 -static struct vm_operations_struct comedi_vm_ops = {
41908 +static const struct vm_operations_struct comedi_vm_ops = {
41909 .close = comedi_unmap,
41910 };
41911
41912 diff --git a/drivers/staging/dream/qdsp5/adsp_driver.c b/drivers/staging/dream/qdsp5/adsp_driver.c
41913 index e55a0db..577b776 100644
41914 --- a/drivers/staging/dream/qdsp5/adsp_driver.c
41915 +++ b/drivers/staging/dream/qdsp5/adsp_driver.c
41916 @@ -576,7 +576,7 @@ static struct adsp_device *inode_to_device(struct inode *inode)
41917 static dev_t adsp_devno;
41918 static struct class *adsp_class;
41919
41920 -static struct file_operations adsp_fops = {
41921 +static const struct file_operations adsp_fops = {
41922 .owner = THIS_MODULE,
41923 .open = adsp_open,
41924 .unlocked_ioctl = adsp_ioctl,
41925 diff --git a/drivers/staging/dream/qdsp5/audio_aac.c b/drivers/staging/dream/qdsp5/audio_aac.c
41926 index ad2390f..4116ee8 100644
41927 --- a/drivers/staging/dream/qdsp5/audio_aac.c
41928 +++ b/drivers/staging/dream/qdsp5/audio_aac.c
41929 @@ -1022,7 +1022,7 @@ done:
41930 return rc;
41931 }
41932
41933 -static struct file_operations audio_aac_fops = {
41934 +static const struct file_operations audio_aac_fops = {
41935 .owner = THIS_MODULE,
41936 .open = audio_open,
41937 .release = audio_release,
41938 diff --git a/drivers/staging/dream/qdsp5/audio_amrnb.c b/drivers/staging/dream/qdsp5/audio_amrnb.c
41939 index cd818a5..870b37b 100644
41940 --- a/drivers/staging/dream/qdsp5/audio_amrnb.c
41941 +++ b/drivers/staging/dream/qdsp5/audio_amrnb.c
41942 @@ -833,7 +833,7 @@ done:
41943 return rc;
41944 }
41945
41946 -static struct file_operations audio_amrnb_fops = {
41947 +static const struct file_operations audio_amrnb_fops = {
41948 .owner = THIS_MODULE,
41949 .open = audamrnb_open,
41950 .release = audamrnb_release,
41951 diff --git a/drivers/staging/dream/qdsp5/audio_evrc.c b/drivers/staging/dream/qdsp5/audio_evrc.c
41952 index 4b43e18..cedafda 100644
41953 --- a/drivers/staging/dream/qdsp5/audio_evrc.c
41954 +++ b/drivers/staging/dream/qdsp5/audio_evrc.c
41955 @@ -805,7 +805,7 @@ dma_fail:
41956 return rc;
41957 }
41958
41959 -static struct file_operations audio_evrc_fops = {
41960 +static const struct file_operations audio_evrc_fops = {
41961 .owner = THIS_MODULE,
41962 .open = audevrc_open,
41963 .release = audevrc_release,
41964 diff --git a/drivers/staging/dream/qdsp5/audio_in.c b/drivers/staging/dream/qdsp5/audio_in.c
41965 index 3d950a2..9431118 100644
41966 --- a/drivers/staging/dream/qdsp5/audio_in.c
41967 +++ b/drivers/staging/dream/qdsp5/audio_in.c
41968 @@ -913,7 +913,7 @@ static int audpre_open(struct inode *inode, struct file *file)
41969 return 0;
41970 }
41971
41972 -static struct file_operations audio_fops = {
41973 +static const struct file_operations audio_fops = {
41974 .owner = THIS_MODULE,
41975 .open = audio_in_open,
41976 .release = audio_in_release,
41977 @@ -922,7 +922,7 @@ static struct file_operations audio_fops = {
41978 .unlocked_ioctl = audio_in_ioctl,
41979 };
41980
41981 -static struct file_operations audpre_fops = {
41982 +static const struct file_operations audpre_fops = {
41983 .owner = THIS_MODULE,
41984 .open = audpre_open,
41985 .unlocked_ioctl = audpre_ioctl,
41986 diff --git a/drivers/staging/dream/qdsp5/audio_mp3.c b/drivers/staging/dream/qdsp5/audio_mp3.c
41987 index b95574f..286c2f4 100644
41988 --- a/drivers/staging/dream/qdsp5/audio_mp3.c
41989 +++ b/drivers/staging/dream/qdsp5/audio_mp3.c
41990 @@ -941,7 +941,7 @@ done:
41991 return rc;
41992 }
41993
41994 -static struct file_operations audio_mp3_fops = {
41995 +static const struct file_operations audio_mp3_fops = {
41996 .owner = THIS_MODULE,
41997 .open = audio_open,
41998 .release = audio_release,
41999 diff --git a/drivers/staging/dream/qdsp5/audio_out.c b/drivers/staging/dream/qdsp5/audio_out.c
42000 index d1adcf6..f8f9833 100644
42001 --- a/drivers/staging/dream/qdsp5/audio_out.c
42002 +++ b/drivers/staging/dream/qdsp5/audio_out.c
42003 @@ -810,7 +810,7 @@ static int audpp_open(struct inode *inode, struct file *file)
42004 return 0;
42005 }
42006
42007 -static struct file_operations audio_fops = {
42008 +static const struct file_operations audio_fops = {
42009 .owner = THIS_MODULE,
42010 .open = audio_open,
42011 .release = audio_release,
42012 @@ -819,7 +819,7 @@ static struct file_operations audio_fops = {
42013 .unlocked_ioctl = audio_ioctl,
42014 };
42015
42016 -static struct file_operations audpp_fops = {
42017 +static const struct file_operations audpp_fops = {
42018 .owner = THIS_MODULE,
42019 .open = audpp_open,
42020 .unlocked_ioctl = audpp_ioctl,
42021 diff --git a/drivers/staging/dream/qdsp5/audio_qcelp.c b/drivers/staging/dream/qdsp5/audio_qcelp.c
42022 index f0f50e3..f6b9dbc 100644
42023 --- a/drivers/staging/dream/qdsp5/audio_qcelp.c
42024 +++ b/drivers/staging/dream/qdsp5/audio_qcelp.c
42025 @@ -816,7 +816,7 @@ err:
42026 return rc;
42027 }
42028
42029 -static struct file_operations audio_qcelp_fops = {
42030 +static const struct file_operations audio_qcelp_fops = {
42031 .owner = THIS_MODULE,
42032 .open = audqcelp_open,
42033 .release = audqcelp_release,
42034 diff --git a/drivers/staging/dream/qdsp5/snd.c b/drivers/staging/dream/qdsp5/snd.c
42035 index 037d7ff..5469ec3 100644
42036 --- a/drivers/staging/dream/qdsp5/snd.c
42037 +++ b/drivers/staging/dream/qdsp5/snd.c
42038 @@ -242,7 +242,7 @@ err:
42039 return rc;
42040 }
42041
42042 -static struct file_operations snd_fops = {
42043 +static const struct file_operations snd_fops = {
42044 .owner = THIS_MODULE,
42045 .open = snd_open,
42046 .release = snd_release,
42047 diff --git a/drivers/staging/dream/smd/smd_qmi.c b/drivers/staging/dream/smd/smd_qmi.c
42048 index d4e7d88..0ea632a 100644
42049 --- a/drivers/staging/dream/smd/smd_qmi.c
42050 +++ b/drivers/staging/dream/smd/smd_qmi.c
42051 @@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip, struct file *fp)
42052 return 0;
42053 }
42054
42055 -static struct file_operations qmi_fops = {
42056 +static const struct file_operations qmi_fops = {
42057 .owner = THIS_MODULE,
42058 .read = qmi_read,
42059 .write = qmi_write,
42060 diff --git a/drivers/staging/dream/smd/smd_rpcrouter_device.c b/drivers/staging/dream/smd/smd_rpcrouter_device.c
42061 index cd3910b..ff053d3 100644
42062 --- a/drivers/staging/dream/smd/smd_rpcrouter_device.c
42063 +++ b/drivers/staging/dream/smd/smd_rpcrouter_device.c
42064 @@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file *filp, unsigned int cmd,
42065 return rc;
42066 }
42067
42068 -static struct file_operations rpcrouter_server_fops = {
42069 +static const struct file_operations rpcrouter_server_fops = {
42070 .owner = THIS_MODULE,
42071 .open = rpcrouter_open,
42072 .release = rpcrouter_release,
42073 @@ -224,7 +224,7 @@ static struct file_operations rpcrouter_server_fops = {
42074 .unlocked_ioctl = rpcrouter_ioctl,
42075 };
42076
42077 -static struct file_operations rpcrouter_router_fops = {
42078 +static const struct file_operations rpcrouter_router_fops = {
42079 .owner = THIS_MODULE,
42080 .open = rpcrouter_open,
42081 .release = rpcrouter_release,
42082 diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
42083 index c24e4e0..07665be 100644
42084 --- a/drivers/staging/dst/dcore.c
42085 +++ b/drivers/staging/dst/dcore.c
42086 @@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendisk *disk, fmode_t mode)
42087 return 0;
42088 }
42089
42090 -static struct block_device_operations dst_blk_ops = {
42091 +static const struct block_device_operations dst_blk_ops = {
42092 .open = dst_bdev_open,
42093 .release = dst_bdev_release,
42094 .owner = THIS_MODULE,
42095 @@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(struct dst_ctl *ctl,
42096 n->size = ctl->size;
42097
42098 atomic_set(&n->refcnt, 1);
42099 - atomic_long_set(&n->gen, 0);
42100 + atomic_long_set_unchecked(&n->gen, 0);
42101 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
42102
42103 err = dst_node_sysfs_init(n);
42104 diff --git a/drivers/staging/dst/trans.c b/drivers/staging/dst/trans.c
42105 index 557d372..8d84422 100644
42106 --- a/drivers/staging/dst/trans.c
42107 +++ b/drivers/staging/dst/trans.c
42108 @@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n, struct bio *bio)
42109 t->error = 0;
42110 t->retries = 0;
42111 atomic_set(&t->refcnt, 1);
42112 - t->gen = atomic_long_inc_return(&n->gen);
42113 + t->gen = atomic_long_inc_return_unchecked(&n->gen);
42114
42115 t->enc = bio_data_dir(bio);
42116 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
42117 diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
42118 index 94f7752..d051514 100644
42119 --- a/drivers/staging/et131x/et1310_tx.c
42120 +++ b/drivers/staging/et131x/et1310_tx.c
42121 @@ -710,11 +710,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
42122 struct net_device_stats *stats = &etdev->net_stats;
42123
42124 if (pMpTcb->Flags & fMP_DEST_BROAD)
42125 - atomic_inc(&etdev->Stats.brdcstxmt);
42126 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
42127 else if (pMpTcb->Flags & fMP_DEST_MULTI)
42128 - atomic_inc(&etdev->Stats.multixmt);
42129 + atomic_inc_unchecked(&etdev->Stats.multixmt);
42130 else
42131 - atomic_inc(&etdev->Stats.unixmt);
42132 + atomic_inc_unchecked(&etdev->Stats.unixmt);
42133
42134 if (pMpTcb->Packet) {
42135 stats->tx_bytes += pMpTcb->Packet->len;
42136 diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
42137 index 1dfe06f..f469b4d 100644
42138 --- a/drivers/staging/et131x/et131x_adapter.h
42139 +++ b/drivers/staging/et131x/et131x_adapter.h
42140 @@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
42141 * operations
42142 */
42143 u32 unircv; /* # multicast packets received */
42144 - atomic_t unixmt; /* # multicast packets for Tx */
42145 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
42146 u32 multircv; /* # multicast packets received */
42147 - atomic_t multixmt; /* # multicast packets for Tx */
42148 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
42149 u32 brdcstrcv; /* # broadcast packets received */
42150 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
42151 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
42152 u32 norcvbuf; /* # Rx packets discarded */
42153 u32 noxmtbuf; /* # Tx packets discarded */
42154
42155 diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c
42156 index 4bd353a..e28f455 100644
42157 --- a/drivers/staging/go7007/go7007-v4l2.c
42158 +++ b/drivers/staging/go7007/go7007-v4l2.c
42159 @@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42160 return 0;
42161 }
42162
42163 -static struct vm_operations_struct go7007_vm_ops = {
42164 +static const struct vm_operations_struct go7007_vm_ops = {
42165 .open = go7007_vm_open,
42166 .close = go7007_vm_close,
42167 .fault = go7007_vm_fault,
42168 diff --git a/drivers/staging/hv/Channel.c b/drivers/staging/hv/Channel.c
42169 index 366dc95..b974d87 100644
42170 --- a/drivers/staging/hv/Channel.c
42171 +++ b/drivers/staging/hv/Channel.c
42172 @@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vmbus_channel *Channel, void *Kbuffer,
42173
42174 DPRINT_ENTER(VMBUS);
42175
42176 - nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
42177 - atomic_inc(&gVmbusConnection.NextGpadlHandle);
42178 + nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
42179 + atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
42180
42181 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
42182 ASSERT(msgInfo != NULL);
42183 diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
42184 index b12237f..01ae28a 100644
42185 --- a/drivers/staging/hv/Hv.c
42186 +++ b/drivers/staging/hv/Hv.c
42187 @@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, void *Input, void *Output)
42188 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
42189 u32 outputAddressHi = outputAddress >> 32;
42190 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
42191 - volatile void *hypercallPage = gHvContext.HypercallPage;
42192 + volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
42193
42194 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
42195 Control, Input, Output);
42196 diff --git a/drivers/staging/hv/VmbusApi.h b/drivers/staging/hv/VmbusApi.h
42197 index d089bb1..2ebc158 100644
42198 --- a/drivers/staging/hv/VmbusApi.h
42199 +++ b/drivers/staging/hv/VmbusApi.h
42200 @@ -109,7 +109,7 @@ struct vmbus_channel_interface {
42201 u32 *GpadlHandle);
42202 int (*TeardownGpadl)(struct hv_device *device, u32 GpadlHandle);
42203 void (*GetInfo)(struct hv_device *dev, struct hv_device_info *devinfo);
42204 -};
42205 +} __no_const;
42206
42207 /* Base driver object */
42208 struct hv_driver {
42209 diff --git a/drivers/staging/hv/VmbusPrivate.h b/drivers/staging/hv/VmbusPrivate.h
42210 index 5a37cce..6ecc88c 100644
42211 --- a/drivers/staging/hv/VmbusPrivate.h
42212 +++ b/drivers/staging/hv/VmbusPrivate.h
42213 @@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
42214 struct VMBUS_CONNECTION {
42215 enum VMBUS_CONNECT_STATE ConnectState;
42216
42217 - atomic_t NextGpadlHandle;
42218 + atomic_unchecked_t NextGpadlHandle;
42219
42220 /*
42221 * Represents channel interrupts. Each bit position represents a
42222 diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
42223 index 871a202..ca50ddf 100644
42224 --- a/drivers/staging/hv/blkvsc_drv.c
42225 +++ b/drivers/staging/hv/blkvsc_drv.c
42226 @@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
42227 /* The one and only one */
42228 static struct blkvsc_driver_context g_blkvsc_drv;
42229
42230 -static struct block_device_operations block_ops = {
42231 +static const struct block_device_operations block_ops = {
42232 .owner = THIS_MODULE,
42233 .open = blkvsc_open,
42234 .release = blkvsc_release,
42235 diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
42236 index 6acc49a..fbc8d46 100644
42237 --- a/drivers/staging/hv/vmbus_drv.c
42238 +++ b/drivers/staging/hv/vmbus_drv.c
42239 @@ -532,7 +532,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
42240 to_device_context(root_device_obj);
42241 struct device_context *child_device_ctx =
42242 to_device_context(child_device_obj);
42243 - static atomic_t device_num = ATOMIC_INIT(0);
42244 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
42245
42246 DPRINT_ENTER(VMBUS_DRV);
42247
42248 @@ -541,7 +541,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
42249
42250 /* Set the device name. Otherwise, device_register() will fail. */
42251 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
42252 - atomic_inc_return(&device_num));
42253 + atomic_inc_return_unchecked(&device_num));
42254
42255 /* The new device belongs to this bus */
42256 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
42257 diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
42258 index d926189..17b19fd 100644
42259 --- a/drivers/staging/iio/ring_generic.h
42260 +++ b/drivers/staging/iio/ring_generic.h
42261 @@ -87,7 +87,7 @@ struct iio_ring_access_funcs {
42262
42263 int (*is_enabled)(struct iio_ring_buffer *ring);
42264 int (*enable)(struct iio_ring_buffer *ring);
42265 -};
42266 +} __no_const;
42267
42268 /**
42269 * struct iio_ring_buffer - general ring buffer structure
42270 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
42271 index 1b237b7..88c624e 100644
42272 --- a/drivers/staging/octeon/ethernet-rx.c
42273 +++ b/drivers/staging/octeon/ethernet-rx.c
42274 @@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long unused)
42275 /* Increment RX stats for virtual ports */
42276 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
42277 #ifdef CONFIG_64BIT
42278 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
42279 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
42280 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
42281 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
42282 #else
42283 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
42284 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
42285 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
42286 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
42287 #endif
42288 }
42289 netif_receive_skb(skb);
42290 @@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long unused)
42291 dev->name);
42292 */
42293 #ifdef CONFIG_64BIT
42294 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
42295 + atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
42296 #else
42297 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
42298 + atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
42299 #endif
42300 dev_kfree_skb_irq(skb);
42301 }
42302 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
42303 index 492c502..d9909f1 100644
42304 --- a/drivers/staging/octeon/ethernet.c
42305 +++ b/drivers/staging/octeon/ethernet.c
42306 @@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
42307 * since the RX tasklet also increments it.
42308 */
42309 #ifdef CONFIG_64BIT
42310 - atomic64_add(rx_status.dropped_packets,
42311 - (atomic64_t *)&priv->stats.rx_dropped);
42312 + atomic64_add_unchecked(rx_status.dropped_packets,
42313 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
42314 #else
42315 - atomic_add(rx_status.dropped_packets,
42316 - (atomic_t *)&priv->stats.rx_dropped);
42317 + atomic_add_unchecked(rx_status.dropped_packets,
42318 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
42319 #endif
42320 }
42321
42322 diff --git a/drivers/staging/otus/80211core/pub_zfi.h b/drivers/staging/otus/80211core/pub_zfi.h
42323 index a35bd5d..28fff45 100644
42324 --- a/drivers/staging/otus/80211core/pub_zfi.h
42325 +++ b/drivers/staging/otus/80211core/pub_zfi.h
42326 @@ -531,7 +531,7 @@ struct zsCbFuncTbl
42327 u8_t (*zfcbClassifyTxPacket)(zdev_t* dev, zbuf_t* buf);
42328
42329 void (*zfcbHwWatchDogNotify)(zdev_t* dev);
42330 -};
42331 +} __no_const;
42332
42333 extern void zfZeroMemory(u8_t* va, u16_t length);
42334 #define ZM_INIT_CB_FUNC_TABLE(p) zfZeroMemory((u8_t *)p, sizeof(struct zsCbFuncTbl));
42335 diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
42336 index c39a25f..696f5aa 100644
42337 --- a/drivers/staging/panel/panel.c
42338 +++ b/drivers/staging/panel/panel.c
42339 @@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *inode, struct file *file)
42340 return 0;
42341 }
42342
42343 -static struct file_operations lcd_fops = {
42344 +static const struct file_operations lcd_fops = {
42345 .write = lcd_write,
42346 .open = lcd_open,
42347 .release = lcd_release,
42348 @@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *inode, struct file *file)
42349 return 0;
42350 }
42351
42352 -static struct file_operations keypad_fops = {
42353 +static const struct file_operations keypad_fops = {
42354 .read = keypad_read, /* read */
42355 .open = keypad_open, /* open */
42356 .release = keypad_release, /* close */
42357 diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
42358 index 270ebcb..37e46af 100644
42359 --- a/drivers/staging/phison/phison.c
42360 +++ b/drivers/staging/phison/phison.c
42361 @@ -43,7 +43,7 @@ static struct scsi_host_template phison_sht = {
42362 ATA_BMDMA_SHT(DRV_NAME),
42363 };
42364
42365 -static struct ata_port_operations phison_ops = {
42366 +static const struct ata_port_operations phison_ops = {
42367 .inherits = &ata_bmdma_port_ops,
42368 .prereset = phison_pre_reset,
42369 };
42370 diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c
42371 index 2eb8e3d..57616a7 100644
42372 --- a/drivers/staging/poch/poch.c
42373 +++ b/drivers/staging/poch/poch.c
42374 @@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inode, struct file *filp,
42375 return 0;
42376 }
42377
42378 -static struct file_operations poch_fops = {
42379 +static const struct file_operations poch_fops = {
42380 .owner = THIS_MODULE,
42381 .open = poch_open,
42382 .release = poch_release,
42383 diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
42384 index c94de31..19402bc 100644
42385 --- a/drivers/staging/pohmelfs/inode.c
42386 +++ b/drivers/staging/pohmelfs/inode.c
42387 @@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
42388 mutex_init(&psb->mcache_lock);
42389 psb->mcache_root = RB_ROOT;
42390 psb->mcache_timeout = msecs_to_jiffies(5000);
42391 - atomic_long_set(&psb->mcache_gen, 0);
42392 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
42393
42394 psb->trans_max_pages = 100;
42395
42396 @@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
42397 INIT_LIST_HEAD(&psb->crypto_ready_list);
42398 INIT_LIST_HEAD(&psb->crypto_active_list);
42399
42400 - atomic_set(&psb->trans_gen, 1);
42401 + atomic_set_unchecked(&psb->trans_gen, 1);
42402 atomic_long_set(&psb->total_inodes, 0);
42403
42404 mutex_init(&psb->state_lock);
42405 diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
42406 index e22665c..a2a9390 100644
42407 --- a/drivers/staging/pohmelfs/mcache.c
42408 +++ b/drivers/staging/pohmelfs/mcache.c
42409 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
42410 m->data = data;
42411 m->start = start;
42412 m->size = size;
42413 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
42414 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
42415
42416 mutex_lock(&psb->mcache_lock);
42417 err = pohmelfs_mcache_insert(psb, m);
42418 diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
42419 index 623a07d..4035c19 100644
42420 --- a/drivers/staging/pohmelfs/netfs.h
42421 +++ b/drivers/staging/pohmelfs/netfs.h
42422 @@ -570,14 +570,14 @@ struct pohmelfs_config;
42423 struct pohmelfs_sb {
42424 struct rb_root mcache_root;
42425 struct mutex mcache_lock;
42426 - atomic_long_t mcache_gen;
42427 + atomic_long_unchecked_t mcache_gen;
42428 unsigned long mcache_timeout;
42429
42430 unsigned int idx;
42431
42432 unsigned int trans_retries;
42433
42434 - atomic_t trans_gen;
42435 + atomic_unchecked_t trans_gen;
42436
42437 unsigned int crypto_attached_size;
42438 unsigned int crypto_align_size;
42439 diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
42440 index 36a2535..0591bf4 100644
42441 --- a/drivers/staging/pohmelfs/trans.c
42442 +++ b/drivers/staging/pohmelfs/trans.c
42443 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
42444 int err;
42445 struct netfs_cmd *cmd = t->iovec.iov_base;
42446
42447 - t->gen = atomic_inc_return(&psb->trans_gen);
42448 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
42449
42450 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
42451 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
42452 diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
42453 index f890a16..509ece8 100644
42454 --- a/drivers/staging/sep/sep_driver.c
42455 +++ b/drivers/staging/sep/sep_driver.c
42456 @@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver = {
42457 static dev_t sep_devno;
42458
42459 /* the files operations structure of the driver */
42460 -static struct file_operations sep_file_operations = {
42461 +static const struct file_operations sep_file_operations = {
42462 .owner = THIS_MODULE,
42463 .ioctl = sep_ioctl,
42464 .poll = sep_poll,
42465 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
42466 index 5e16bc3..7655b10 100644
42467 --- a/drivers/staging/usbip/usbip_common.h
42468 +++ b/drivers/staging/usbip/usbip_common.h
42469 @@ -374,7 +374,7 @@ struct usbip_device {
42470 void (*shutdown)(struct usbip_device *);
42471 void (*reset)(struct usbip_device *);
42472 void (*unusable)(struct usbip_device *);
42473 - } eh_ops;
42474 + } __no_const eh_ops;
42475 };
42476
42477
42478 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
42479 index 57f7946..d9df23d 100644
42480 --- a/drivers/staging/usbip/vhci.h
42481 +++ b/drivers/staging/usbip/vhci.h
42482 @@ -92,7 +92,7 @@ struct vhci_hcd {
42483 unsigned resuming:1;
42484 unsigned long re_timeout;
42485
42486 - atomic_t seqnum;
42487 + atomic_unchecked_t seqnum;
42488
42489 /*
42490 * NOTE:
42491 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
42492 index 20cd7db..c2693ff 100644
42493 --- a/drivers/staging/usbip/vhci_hcd.c
42494 +++ b/drivers/staging/usbip/vhci_hcd.c
42495 @@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
42496 return;
42497 }
42498
42499 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
42500 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42501 if (priv->seqnum == 0xffff)
42502 usbip_uinfo("seqnum max\n");
42503
42504 @@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
42505 return -ENOMEM;
42506 }
42507
42508 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
42509 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42510 if (unlink->seqnum == 0xffff)
42511 usbip_uinfo("seqnum max\n");
42512
42513 @@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hcd)
42514 vdev->rhport = rhport;
42515 }
42516
42517 - atomic_set(&vhci->seqnum, 0);
42518 + atomic_set_unchecked(&vhci->seqnum, 0);
42519 spin_lock_init(&vhci->lock);
42520
42521
42522 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
42523 index 7fd76fe..673695a 100644
42524 --- a/drivers/staging/usbip/vhci_rx.c
42525 +++ b/drivers/staging/usbip/vhci_rx.c
42526 @@ -79,7 +79,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
42527 usbip_uerr("cannot find a urb of seqnum %u\n",
42528 pdu->base.seqnum);
42529 usbip_uinfo("max seqnum %d\n",
42530 - atomic_read(&the_controller->seqnum));
42531 + atomic_read_unchecked(&the_controller->seqnum));
42532 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
42533 return;
42534 }
42535 diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
42536 index 7891288..8e31300 100644
42537 --- a/drivers/staging/vme/devices/vme_user.c
42538 +++ b/drivers/staging/vme/devices/vme_user.c
42539 @@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *, struct file *, unsigned int,
42540 static int __init vme_user_probe(struct device *, int, int);
42541 static int __exit vme_user_remove(struct device *, int, int);
42542
42543 -static struct file_operations vme_user_fops = {
42544 +static const struct file_operations vme_user_fops = {
42545 .open = vme_user_open,
42546 .release = vme_user_release,
42547 .read = vme_user_read,
42548 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
42549 index 58abf44..00c1fc8 100644
42550 --- a/drivers/staging/vt6655/hostap.c
42551 +++ b/drivers/staging/vt6655/hostap.c
42552 @@ -84,7 +84,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42553 PSDevice apdev_priv;
42554 struct net_device *dev = pDevice->dev;
42555 int ret;
42556 - const struct net_device_ops apdev_netdev_ops = {
42557 + net_device_ops_no_const apdev_netdev_ops = {
42558 .ndo_start_xmit = pDevice->tx_80211,
42559 };
42560
42561 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
42562 index 0c8267a..db1f363 100644
42563 --- a/drivers/staging/vt6656/hostap.c
42564 +++ b/drivers/staging/vt6656/hostap.c
42565 @@ -86,7 +86,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42566 PSDevice apdev_priv;
42567 struct net_device *dev = pDevice->dev;
42568 int ret;
42569 - const struct net_device_ops apdev_netdev_ops = {
42570 + net_device_ops_no_const apdev_netdev_ops = {
42571 .ndo_start_xmit = pDevice->tx_80211,
42572 };
42573
42574 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
42575 index 925678b..da7f5ed 100644
42576 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
42577 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
42578 @@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
42579
42580 struct usbctlx_completor {
42581 int (*complete) (struct usbctlx_completor *);
42582 -};
42583 +} __no_const;
42584 typedef struct usbctlx_completor usbctlx_completor_t;
42585
42586 static int
42587 diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
42588 index 40de151..924f268 100644
42589 --- a/drivers/telephony/ixj.c
42590 +++ b/drivers/telephony/ixj.c
42591 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
42592 bool mContinue;
42593 char *pIn, *pOut;
42594
42595 + pax_track_stack();
42596 +
42597 if (!SCI_Prepare(j))
42598 return 0;
42599
42600 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
42601 index e941367..b631f5a 100644
42602 --- a/drivers/uio/uio.c
42603 +++ b/drivers/uio/uio.c
42604 @@ -23,6 +23,7 @@
42605 #include <linux/string.h>
42606 #include <linux/kobject.h>
42607 #include <linux/uio_driver.h>
42608 +#include <asm/local.h>
42609
42610 #define UIO_MAX_DEVICES 255
42611
42612 @@ -30,10 +31,10 @@ struct uio_device {
42613 struct module *owner;
42614 struct device *dev;
42615 int minor;
42616 - atomic_t event;
42617 + atomic_unchecked_t event;
42618 struct fasync_struct *async_queue;
42619 wait_queue_head_t wait;
42620 - int vma_count;
42621 + local_t vma_count;
42622 struct uio_info *info;
42623 struct kobject *map_dir;
42624 struct kobject *portio_dir;
42625 @@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr,
42626 return entry->show(mem, buf);
42627 }
42628
42629 -static struct sysfs_ops map_sysfs_ops = {
42630 +static const struct sysfs_ops map_sysfs_ops = {
42631 .show = map_type_show,
42632 };
42633
42634 @@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr,
42635 return entry->show(port, buf);
42636 }
42637
42638 -static struct sysfs_ops portio_sysfs_ops = {
42639 +static const struct sysfs_ops portio_sysfs_ops = {
42640 .show = portio_type_show,
42641 };
42642
42643 @@ -255,7 +256,7 @@ static ssize_t show_event(struct device *dev,
42644 struct uio_device *idev = dev_get_drvdata(dev);
42645 if (idev)
42646 return sprintf(buf, "%u\n",
42647 - (unsigned int)atomic_read(&idev->event));
42648 + (unsigned int)atomic_read_unchecked(&idev->event));
42649 else
42650 return -ENODEV;
42651 }
42652 @@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *info)
42653 {
42654 struct uio_device *idev = info->uio_dev;
42655
42656 - atomic_inc(&idev->event);
42657 + atomic_inc_unchecked(&idev->event);
42658 wake_up_interruptible(&idev->wait);
42659 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
42660 }
42661 @@ -477,7 +478,7 @@ static int uio_open(struct inode *inode, struct file *filep)
42662 }
42663
42664 listener->dev = idev;
42665 - listener->event_count = atomic_read(&idev->event);
42666 + listener->event_count = atomic_read_unchecked(&idev->event);
42667 filep->private_data = listener;
42668
42669 if (idev->info->open) {
42670 @@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
42671 return -EIO;
42672
42673 poll_wait(filep, &idev->wait, wait);
42674 - if (listener->event_count != atomic_read(&idev->event))
42675 + if (listener->event_count != atomic_read_unchecked(&idev->event))
42676 return POLLIN | POLLRDNORM;
42677 return 0;
42678 }
42679 @@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
42680 do {
42681 set_current_state(TASK_INTERRUPTIBLE);
42682
42683 - event_count = atomic_read(&idev->event);
42684 + event_count = atomic_read_unchecked(&idev->event);
42685 if (event_count != listener->event_count) {
42686 if (copy_to_user(buf, &event_count, count))
42687 retval = -EFAULT;
42688 @@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
42689 static void uio_vma_open(struct vm_area_struct *vma)
42690 {
42691 struct uio_device *idev = vma->vm_private_data;
42692 - idev->vma_count++;
42693 + local_inc(&idev->vma_count);
42694 }
42695
42696 static void uio_vma_close(struct vm_area_struct *vma)
42697 {
42698 struct uio_device *idev = vma->vm_private_data;
42699 - idev->vma_count--;
42700 + local_dec(&idev->vma_count);
42701 }
42702
42703 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42704 @@ -840,7 +841,7 @@ int __uio_register_device(struct module *owner,
42705 idev->owner = owner;
42706 idev->info = info;
42707 init_waitqueue_head(&idev->wait);
42708 - atomic_set(&idev->event, 0);
42709 + atomic_set_unchecked(&idev->event, 0);
42710
42711 ret = uio_get_minor(idev);
42712 if (ret)
42713 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
42714 index fbea856..06efea6 100644
42715 --- a/drivers/usb/atm/usbatm.c
42716 +++ b/drivers/usb/atm/usbatm.c
42717 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42718 if (printk_ratelimit())
42719 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
42720 __func__, vpi, vci);
42721 - atomic_inc(&vcc->stats->rx_err);
42722 + atomic_inc_unchecked(&vcc->stats->rx_err);
42723 return;
42724 }
42725
42726 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42727 if (length > ATM_MAX_AAL5_PDU) {
42728 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
42729 __func__, length, vcc);
42730 - atomic_inc(&vcc->stats->rx_err);
42731 + atomic_inc_unchecked(&vcc->stats->rx_err);
42732 goto out;
42733 }
42734
42735 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42736 if (sarb->len < pdu_length) {
42737 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
42738 __func__, pdu_length, sarb->len, vcc);
42739 - atomic_inc(&vcc->stats->rx_err);
42740 + atomic_inc_unchecked(&vcc->stats->rx_err);
42741 goto out;
42742 }
42743
42744 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
42745 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
42746 __func__, vcc);
42747 - atomic_inc(&vcc->stats->rx_err);
42748 + atomic_inc_unchecked(&vcc->stats->rx_err);
42749 goto out;
42750 }
42751
42752 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42753 if (printk_ratelimit())
42754 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
42755 __func__, length);
42756 - atomic_inc(&vcc->stats->rx_drop);
42757 + atomic_inc_unchecked(&vcc->stats->rx_drop);
42758 goto out;
42759 }
42760
42761 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42762
42763 vcc->push(vcc, skb);
42764
42765 - atomic_inc(&vcc->stats->rx);
42766 + atomic_inc_unchecked(&vcc->stats->rx);
42767 out:
42768 skb_trim(sarb, 0);
42769 }
42770 @@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned long data)
42771 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
42772
42773 usbatm_pop(vcc, skb);
42774 - atomic_inc(&vcc->stats->tx);
42775 + atomic_inc_unchecked(&vcc->stats->tx);
42776
42777 skb = skb_dequeue(&instance->sndqueue);
42778 }
42779 @@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
42780 if (!left--)
42781 return sprintf(page,
42782 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
42783 - atomic_read(&atm_dev->stats.aal5.tx),
42784 - atomic_read(&atm_dev->stats.aal5.tx_err),
42785 - atomic_read(&atm_dev->stats.aal5.rx),
42786 - atomic_read(&atm_dev->stats.aal5.rx_err),
42787 - atomic_read(&atm_dev->stats.aal5.rx_drop));
42788 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
42789 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
42790 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
42791 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
42792 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
42793
42794 if (!left--) {
42795 if (instance->disconnected)
42796 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
42797 index 24e6205..fe5a5d4 100644
42798 --- a/drivers/usb/core/hcd.c
42799 +++ b/drivers/usb/core/hcd.c
42800 @@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown);
42801
42802 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42803
42804 -struct usb_mon_operations *mon_ops;
42805 +const struct usb_mon_operations *mon_ops;
42806
42807 /*
42808 * The registration is unlocked.
42809 @@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
42810 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
42811 */
42812
42813 -int usb_mon_register (struct usb_mon_operations *ops)
42814 +int usb_mon_register (const struct usb_mon_operations *ops)
42815 {
42816
42817 if (mon_ops)
42818 diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
42819 index bcbe104..9cfd1c6 100644
42820 --- a/drivers/usb/core/hcd.h
42821 +++ b/drivers/usb/core/hcd.h
42822 @@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) { }
42823 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42824
42825 struct usb_mon_operations {
42826 - void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
42827 - void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42828 - void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42829 + void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
42830 + void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42831 + void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42832 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
42833 };
42834
42835 -extern struct usb_mon_operations *mon_ops;
42836 +extern const struct usb_mon_operations *mon_ops;
42837
42838 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
42839 {
42840 @@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
42841 (*mon_ops->urb_complete)(bus, urb, status);
42842 }
42843
42844 -int usb_mon_register(struct usb_mon_operations *ops);
42845 +int usb_mon_register(const struct usb_mon_operations *ops);
42846 void usb_mon_deregister(void);
42847
42848 #else
42849 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
42850 index 409cc94..a673bad 100644
42851 --- a/drivers/usb/core/message.c
42852 +++ b/drivers/usb/core/message.c
42853 @@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
42854 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
42855 if (buf) {
42856 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
42857 - if (len > 0) {
42858 - smallbuf = kmalloc(++len, GFP_NOIO);
42859 + if (len++ > 0) {
42860 + smallbuf = kmalloc(len, GFP_NOIO);
42861 if (!smallbuf)
42862 return buf;
42863 memcpy(smallbuf, buf, len);
42864 diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
42865 index 62ff5e7..530b74e 100644
42866 --- a/drivers/usb/misc/appledisplay.c
42867 +++ b/drivers/usb/misc/appledisplay.c
42868 @@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
42869 return pdata->msgdata[1];
42870 }
42871
42872 -static struct backlight_ops appledisplay_bl_data = {
42873 +static const struct backlight_ops appledisplay_bl_data = {
42874 .get_brightness = appledisplay_bl_get_brightness,
42875 .update_status = appledisplay_bl_update_status,
42876 };
42877 diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
42878 index e0c2db3..bd8cb66 100644
42879 --- a/drivers/usb/mon/mon_main.c
42880 +++ b/drivers/usb/mon/mon_main.c
42881 @@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
42882 /*
42883 * Ops
42884 */
42885 -static struct usb_mon_operations mon_ops_0 = {
42886 +static const struct usb_mon_operations mon_ops_0 = {
42887 .urb_submit = mon_submit,
42888 .urb_submit_error = mon_submit_error,
42889 .urb_complete = mon_complete,
42890 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
42891 index d6bea3e..60b250e 100644
42892 --- a/drivers/usb/wusbcore/wa-hc.h
42893 +++ b/drivers/usb/wusbcore/wa-hc.h
42894 @@ -192,7 +192,7 @@ struct wahc {
42895 struct list_head xfer_delayed_list;
42896 spinlock_t xfer_list_lock;
42897 struct work_struct xfer_work;
42898 - atomic_t xfer_id_count;
42899 + atomic_unchecked_t xfer_id_count;
42900 };
42901
42902
42903 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
42904 INIT_LIST_HEAD(&wa->xfer_delayed_list);
42905 spin_lock_init(&wa->xfer_list_lock);
42906 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
42907 - atomic_set(&wa->xfer_id_count, 1);
42908 + atomic_set_unchecked(&wa->xfer_id_count, 1);
42909 }
42910
42911 /**
42912 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
42913 index 613a5fc..3174865 100644
42914 --- a/drivers/usb/wusbcore/wa-xfer.c
42915 +++ b/drivers/usb/wusbcore/wa-xfer.c
42916 @@ -293,7 +293,7 @@ out:
42917 */
42918 static void wa_xfer_id_init(struct wa_xfer *xfer)
42919 {
42920 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
42921 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
42922 }
42923
42924 /*
42925 diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
42926 index aa42fce..f8a828c 100644
42927 --- a/drivers/uwb/wlp/messages.c
42928 +++ b/drivers/uwb/wlp/messages.c
42929 @@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb)
42930 size_t len = skb->len;
42931 size_t used;
42932 ssize_t result;
42933 - struct wlp_nonce enonce, rnonce;
42934 + struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
42935 enum wlp_assc_error assc_err;
42936 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
42937 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
42938 diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c
42939 index 0370399..6627c94 100644
42940 --- a/drivers/uwb/wlp/sysfs.c
42941 +++ b/drivers/uwb/wlp/sysfs.c
42942 @@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr,
42943 return ret;
42944 }
42945
42946 -static
42947 -struct sysfs_ops wss_sysfs_ops = {
42948 +static const struct sysfs_ops wss_sysfs_ops = {
42949 .show = wlp_wss_attr_show,
42950 .store = wlp_wss_attr_store,
42951 };
42952 diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
42953 index 8c5e432..5ee90ea 100644
42954 --- a/drivers/video/atmel_lcdfb.c
42955 +++ b/drivers/video/atmel_lcdfb.c
42956 @@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struct backlight_device *bl)
42957 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
42958 }
42959
42960 -static struct backlight_ops atmel_lcdc_bl_ops = {
42961 +static const struct backlight_ops atmel_lcdc_bl_ops = {
42962 .update_status = atmel_bl_update_status,
42963 .get_brightness = atmel_bl_get_brightness,
42964 };
42965 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
42966 index e4e4d43..66bcbcc 100644
42967 --- a/drivers/video/aty/aty128fb.c
42968 +++ b/drivers/video/aty/aty128fb.c
42969 @@ -149,7 +149,7 @@ enum {
42970 };
42971
42972 /* Must match above enum */
42973 -static const char *r128_family[] __devinitdata = {
42974 +static const char *r128_family[] __devinitconst = {
42975 "AGP",
42976 "PCI",
42977 "PRO AGP",
42978 @@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(struct backlight_device *bd)
42979 return bd->props.brightness;
42980 }
42981
42982 -static struct backlight_ops aty128_bl_data = {
42983 +static const struct backlight_ops aty128_bl_data = {
42984 .get_brightness = aty128_bl_get_brightness,
42985 .update_status = aty128_bl_update_status,
42986 };
42987 diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
42988 index 913b4a4..9295a38 100644
42989 --- a/drivers/video/aty/atyfb_base.c
42990 +++ b/drivers/video/aty/atyfb_base.c
42991 @@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct backlight_device *bd)
42992 return bd->props.brightness;
42993 }
42994
42995 -static struct backlight_ops aty_bl_data = {
42996 +static const struct backlight_ops aty_bl_data = {
42997 .get_brightness = aty_bl_get_brightness,
42998 .update_status = aty_bl_update_status,
42999 };
43000 diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
43001 index 1a056ad..221bd6a 100644
43002 --- a/drivers/video/aty/radeon_backlight.c
43003 +++ b/drivers/video/aty/radeon_backlight.c
43004 @@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(struct backlight_device *bd)
43005 return bd->props.brightness;
43006 }
43007
43008 -static struct backlight_ops radeon_bl_data = {
43009 +static const struct backlight_ops radeon_bl_data = {
43010 .get_brightness = radeon_bl_get_brightness,
43011 .update_status = radeon_bl_update_status,
43012 };
43013 diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
43014 index ad05da5..3cb2cb9 100644
43015 --- a/drivers/video/backlight/adp5520_bl.c
43016 +++ b/drivers/video/backlight/adp5520_bl.c
43017 @@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(struct backlight_device *bl)
43018 return error ? data->current_brightness : reg_val;
43019 }
43020
43021 -static struct backlight_ops adp5520_bl_ops = {
43022 +static const struct backlight_ops adp5520_bl_ops = {
43023 .update_status = adp5520_bl_update_status,
43024 .get_brightness = adp5520_bl_get_brightness,
43025 };
43026 diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
43027 index 2c3bdfc..d769b0b 100644
43028 --- a/drivers/video/backlight/adx_bl.c
43029 +++ b/drivers/video/backlight/adx_bl.c
43030 @@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct fb_info *fb)
43031 return 1;
43032 }
43033
43034 -static struct backlight_ops adx_backlight_ops = {
43035 +static const struct backlight_ops adx_backlight_ops = {
43036 .options = 0,
43037 .update_status = adx_backlight_update_status,
43038 .get_brightness = adx_backlight_get_brightness,
43039 diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
43040 index 505c082..6b6b3cc 100644
43041 --- a/drivers/video/backlight/atmel-pwm-bl.c
43042 +++ b/drivers/video/backlight/atmel-pwm-bl.c
43043 @@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
43044 return pwm_channel_enable(&pwmbl->pwmc);
43045 }
43046
43047 -static struct backlight_ops atmel_pwm_bl_ops = {
43048 +static const struct backlight_ops atmel_pwm_bl_ops = {
43049 .get_brightness = atmel_pwm_bl_get_intensity,
43050 .update_status = atmel_pwm_bl_set_intensity,
43051 };
43052 diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
43053 index 5e20e6e..89025e6 100644
43054 --- a/drivers/video/backlight/backlight.c
43055 +++ b/drivers/video/backlight/backlight.c
43056 @@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
43057 * ERR_PTR() or a pointer to the newly allocated device.
43058 */
43059 struct backlight_device *backlight_device_register(const char *name,
43060 - struct device *parent, void *devdata, struct backlight_ops *ops)
43061 + struct device *parent, void *devdata, const struct backlight_ops *ops)
43062 {
43063 struct backlight_device *new_bd;
43064 int rc;
43065 diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
43066 index 9677494..b4bcf80 100644
43067 --- a/drivers/video/backlight/corgi_lcd.c
43068 +++ b/drivers/video/backlight/corgi_lcd.c
43069 @@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit)
43070 }
43071 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
43072
43073 -static struct backlight_ops corgi_bl_ops = {
43074 +static const struct backlight_ops corgi_bl_ops = {
43075 .get_brightness = corgi_bl_get_intensity,
43076 .update_status = corgi_bl_update_status,
43077 };
43078 diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
43079 index b9fe62b..2914bf1 100644
43080 --- a/drivers/video/backlight/cr_bllcd.c
43081 +++ b/drivers/video/backlight/cr_bllcd.c
43082 @@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(struct backlight_device *bd)
43083 return intensity;
43084 }
43085
43086 -static struct backlight_ops cr_backlight_ops = {
43087 +static const struct backlight_ops cr_backlight_ops = {
43088 .get_brightness = cr_backlight_get_intensity,
43089 .update_status = cr_backlight_set_intensity,
43090 };
43091 diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
43092 index 701a108..feacfd5 100644
43093 --- a/drivers/video/backlight/da903x_bl.c
43094 +++ b/drivers/video/backlight/da903x_bl.c
43095 @@ -94,7 +94,7 @@ static int da903x_backlight_get_brightness(struct backlight_device *bl)
43096 return data->current_brightness;
43097 }
43098
43099 -static struct backlight_ops da903x_backlight_ops = {
43100 +static const struct backlight_ops da903x_backlight_ops = {
43101 .update_status = da903x_backlight_update_status,
43102 .get_brightness = da903x_backlight_get_brightness,
43103 };
43104 diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
43105 index 6d27f62..e6d348e 100644
43106 --- a/drivers/video/backlight/generic_bl.c
43107 +++ b/drivers/video/backlight/generic_bl.c
43108 @@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
43109 }
43110 EXPORT_SYMBOL(corgibl_limit_intensity);
43111
43112 -static struct backlight_ops genericbl_ops = {
43113 +static const struct backlight_ops genericbl_ops = {
43114 .options = BL_CORE_SUSPENDRESUME,
43115 .get_brightness = genericbl_get_intensity,
43116 .update_status = genericbl_send_intensity,
43117 diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
43118 index 7fb4eef..f7cc528 100644
43119 --- a/drivers/video/backlight/hp680_bl.c
43120 +++ b/drivers/video/backlight/hp680_bl.c
43121 @@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct backlight_device *bd)
43122 return current_intensity;
43123 }
43124
43125 -static struct backlight_ops hp680bl_ops = {
43126 +static const struct backlight_ops hp680bl_ops = {
43127 .get_brightness = hp680bl_get_intensity,
43128 .update_status = hp680bl_set_intensity,
43129 };
43130 diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
43131 index 7aed256..db9071f 100644
43132 --- a/drivers/video/backlight/jornada720_bl.c
43133 +++ b/drivers/video/backlight/jornada720_bl.c
43134 @@ -93,7 +93,7 @@ out:
43135 return ret;
43136 }
43137
43138 -static struct backlight_ops jornada_bl_ops = {
43139 +static const struct backlight_ops jornada_bl_ops = {
43140 .get_brightness = jornada_bl_get_brightness,
43141 .update_status = jornada_bl_update_status,
43142 .options = BL_CORE_SUSPENDRESUME,
43143 diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
43144 index a38fda1..939e7b8 100644
43145 --- a/drivers/video/backlight/kb3886_bl.c
43146 +++ b/drivers/video/backlight/kb3886_bl.c
43147 @@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct backlight_device *bd)
43148 return kb3886bl_intensity;
43149 }
43150
43151 -static struct backlight_ops kb3886bl_ops = {
43152 +static const struct backlight_ops kb3886bl_ops = {
43153 .get_brightness = kb3886bl_get_intensity,
43154 .update_status = kb3886bl_send_intensity,
43155 };
43156 diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
43157 index 6b488b8..00a9591 100644
43158 --- a/drivers/video/backlight/locomolcd.c
43159 +++ b/drivers/video/backlight/locomolcd.c
43160 @@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struct backlight_device *bd)
43161 return current_intensity;
43162 }
43163
43164 -static struct backlight_ops locomobl_data = {
43165 +static const struct backlight_ops locomobl_data = {
43166 .get_brightness = locomolcd_get_intensity,
43167 .update_status = locomolcd_set_intensity,
43168 };
43169 diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
43170 index 99bdfa8..3dac448 100644
43171 --- a/drivers/video/backlight/mbp_nvidia_bl.c
43172 +++ b/drivers/video/backlight/mbp_nvidia_bl.c
43173 @@ -33,7 +33,7 @@ struct dmi_match_data {
43174 unsigned long iostart;
43175 unsigned long iolen;
43176 /* Backlight operations structure. */
43177 - struct backlight_ops backlight_ops;
43178 + const struct backlight_ops backlight_ops;
43179 };
43180
43181 /* Module parameters. */
43182 diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
43183 index cbad67e..3cf900e 100644
43184 --- a/drivers/video/backlight/omap1_bl.c
43185 +++ b/drivers/video/backlight/omap1_bl.c
43186 @@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct backlight_device *dev)
43187 return bl->current_intensity;
43188 }
43189
43190 -static struct backlight_ops omapbl_ops = {
43191 +static const struct backlight_ops omapbl_ops = {
43192 .get_brightness = omapbl_get_intensity,
43193 .update_status = omapbl_update_status,
43194 };
43195 diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
43196 index 9edaf24..075786e 100644
43197 --- a/drivers/video/backlight/progear_bl.c
43198 +++ b/drivers/video/backlight/progear_bl.c
43199 @@ -54,7 +54,7 @@ static int progearbl_get_intensity(struct backlight_device *bd)
43200 return intensity - HW_LEVEL_MIN;
43201 }
43202
43203 -static struct backlight_ops progearbl_ops = {
43204 +static const struct backlight_ops progearbl_ops = {
43205 .get_brightness = progearbl_get_intensity,
43206 .update_status = progearbl_set_intensity,
43207 };
43208 diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
43209 index 8871662..df9e0b3 100644
43210 --- a/drivers/video/backlight/pwm_bl.c
43211 +++ b/drivers/video/backlight/pwm_bl.c
43212 @@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
43213 return bl->props.brightness;
43214 }
43215
43216 -static struct backlight_ops pwm_backlight_ops = {
43217 +static const struct backlight_ops pwm_backlight_ops = {
43218 .update_status = pwm_backlight_update_status,
43219 .get_brightness = pwm_backlight_get_brightness,
43220 };
43221 diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
43222 index 43edbad..e14ce4d 100644
43223 --- a/drivers/video/backlight/tosa_bl.c
43224 +++ b/drivers/video/backlight/tosa_bl.c
43225 @@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct backlight_device *dev)
43226 return props->brightness;
43227 }
43228
43229 -static struct backlight_ops bl_ops = {
43230 +static const struct backlight_ops bl_ops = {
43231 .get_brightness = tosa_bl_get_brightness,
43232 .update_status = tosa_bl_update_status,
43233 };
43234 diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
43235 index 467bdb7..e32add3 100644
43236 --- a/drivers/video/backlight/wm831x_bl.c
43237 +++ b/drivers/video/backlight/wm831x_bl.c
43238 @@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightness(struct backlight_device *bl)
43239 return data->current_brightness;
43240 }
43241
43242 -static struct backlight_ops wm831x_backlight_ops = {
43243 +static const struct backlight_ops wm831x_backlight_ops = {
43244 .options = BL_CORE_SUSPENDRESUME,
43245 .update_status = wm831x_backlight_update_status,
43246 .get_brightness = wm831x_backlight_get_brightness,
43247 diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
43248 index e49ae5e..db4e6f7 100644
43249 --- a/drivers/video/bf54x-lq043fb.c
43250 +++ b/drivers/video/bf54x-lq043fb.c
43251 @@ -463,7 +463,7 @@ static int bl_get_brightness(struct backlight_device *bd)
43252 return 0;
43253 }
43254
43255 -static struct backlight_ops bfin_lq043fb_bl_ops = {
43256 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
43257 .get_brightness = bl_get_brightness,
43258 };
43259
43260 diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
43261 index 2c72a7c..d523e52 100644
43262 --- a/drivers/video/bfin-t350mcqb-fb.c
43263 +++ b/drivers/video/bfin-t350mcqb-fb.c
43264 @@ -381,7 +381,7 @@ static int bl_get_brightness(struct backlight_device *bd)
43265 return 0;
43266 }
43267
43268 -static struct backlight_ops bfin_lq043fb_bl_ops = {
43269 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
43270 .get_brightness = bl_get_brightness,
43271 };
43272
43273 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
43274 index f53b9f1..958bf4e 100644
43275 --- a/drivers/video/fbcmap.c
43276 +++ b/drivers/video/fbcmap.c
43277 @@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
43278 rc = -ENODEV;
43279 goto out;
43280 }
43281 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
43282 - !info->fbops->fb_setcmap)) {
43283 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
43284 rc = -EINVAL;
43285 goto out1;
43286 }
43287 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
43288 index 99bbd28..ad3829e 100644
43289 --- a/drivers/video/fbmem.c
43290 +++ b/drivers/video/fbmem.c
43291 @@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
43292 image->dx += image->width + 8;
43293 }
43294 } else if (rotate == FB_ROTATE_UD) {
43295 - for (x = 0; x < num && image->dx >= 0; x++) {
43296 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
43297 info->fbops->fb_imageblit(info, image);
43298 image->dx -= image->width + 8;
43299 }
43300 @@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
43301 image->dy += image->height + 8;
43302 }
43303 } else if (rotate == FB_ROTATE_CCW) {
43304 - for (x = 0; x < num && image->dy >= 0; x++) {
43305 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
43306 info->fbops->fb_imageblit(info, image);
43307 image->dy -= image->height + 8;
43308 }
43309 @@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
43310 int flags = info->flags;
43311 int ret = 0;
43312
43313 + pax_track_stack();
43314 +
43315 if (var->activate & FB_ACTIVATE_INV_MODE) {
43316 struct fb_videomode mode1, mode2;
43317
43318 @@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
43319 void __user *argp = (void __user *)arg;
43320 long ret = 0;
43321
43322 + pax_track_stack();
43323 +
43324 switch (cmd) {
43325 case FBIOGET_VSCREENINFO:
43326 if (!lock_fb_info(info))
43327 @@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
43328 return -EFAULT;
43329 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
43330 return -EINVAL;
43331 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
43332 + if (con2fb.framebuffer >= FB_MAX)
43333 return -EINVAL;
43334 if (!registered_fb[con2fb.framebuffer])
43335 request_module("fb%d", con2fb.framebuffer);
43336 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
43337 index f20eff8..3e4f622 100644
43338 --- a/drivers/video/geode/gx1fb_core.c
43339 +++ b/drivers/video/geode/gx1fb_core.c
43340 @@ -30,7 +30,7 @@ static int crt_option = 1;
43341 static char panel_option[32] = "";
43342
43343 /* Modes relevant to the GX1 (taken from modedb.c) */
43344 -static const struct fb_videomode __initdata gx1_modedb[] = {
43345 +static const struct fb_videomode __initconst gx1_modedb[] = {
43346 /* 640x480-60 VESA */
43347 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
43348 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
43349 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
43350 index 896e53d..4d87d0b 100644
43351 --- a/drivers/video/gxt4500.c
43352 +++ b/drivers/video/gxt4500.c
43353 @@ -156,7 +156,7 @@ struct gxt4500_par {
43354 static char *mode_option;
43355
43356 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
43357 -static const struct fb_videomode defaultmode __devinitdata = {
43358 +static const struct fb_videomode defaultmode __devinitconst = {
43359 .refresh = 60,
43360 .xres = 1280,
43361 .yres = 1024,
43362 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
43363 return 0;
43364 }
43365
43366 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
43367 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
43368 .id = "IBM GXT4500P",
43369 .type = FB_TYPE_PACKED_PIXELS,
43370 .visual = FB_VISUAL_PSEUDOCOLOR,
43371 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
43372 index f5bedee..28c6028 100644
43373 --- a/drivers/video/i810/i810_accel.c
43374 +++ b/drivers/video/i810/i810_accel.c
43375 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
43376 }
43377 }
43378 printk("ringbuffer lockup!!!\n");
43379 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
43380 i810_report_error(mmio);
43381 par->dev_flags |= LOCKUP;
43382 info->pixmap.scan_align = 1;
43383 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
43384 index 5743ea2..457f82c 100644
43385 --- a/drivers/video/i810/i810_main.c
43386 +++ b/drivers/video/i810/i810_main.c
43387 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
43388 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
43389
43390 /* PCI */
43391 -static const char *i810_pci_list[] __devinitdata = {
43392 +static const char *i810_pci_list[] __devinitconst = {
43393 "Intel(R) 810 Framebuffer Device" ,
43394 "Intel(R) 810-DC100 Framebuffer Device" ,
43395 "Intel(R) 810E Framebuffer Device" ,
43396 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
43397 index 3c14e43..eafa544 100644
43398 --- a/drivers/video/logo/logo_linux_clut224.ppm
43399 +++ b/drivers/video/logo/logo_linux_clut224.ppm
43400 @@ -1,1604 +1,1123 @@
43401 P3
43402 -# Standard 224-color Linux logo
43403 80 80
43404 255
43405 - 0 0 0 0 0 0 0 0 0 0 0 0
43406 - 0 0 0 0 0 0 0 0 0 0 0 0
43407 - 0 0 0 0 0 0 0 0 0 0 0 0
43408 - 0 0 0 0 0 0 0 0 0 0 0 0
43409 - 0 0 0 0 0 0 0 0 0 0 0 0
43410 - 0 0 0 0 0 0 0 0 0 0 0 0
43411 - 0 0 0 0 0 0 0 0 0 0 0 0
43412 - 0 0 0 0 0 0 0 0 0 0 0 0
43413 - 0 0 0 0 0 0 0 0 0 0 0 0
43414 - 6 6 6 6 6 6 10 10 10 10 10 10
43415 - 10 10 10 6 6 6 6 6 6 6 6 6
43416 - 0 0 0 0 0 0 0 0 0 0 0 0
43417 - 0 0 0 0 0 0 0 0 0 0 0 0
43418 - 0 0 0 0 0 0 0 0 0 0 0 0
43419 - 0 0 0 0 0 0 0 0 0 0 0 0
43420 - 0 0 0 0 0 0 0 0 0 0 0 0
43421 - 0 0 0 0 0 0 0 0 0 0 0 0
43422 - 0 0 0 0 0 0 0 0 0 0 0 0
43423 - 0 0 0 0 0 0 0 0 0 0 0 0
43424 - 0 0 0 0 0 0 0 0 0 0 0 0
43425 - 0 0 0 0 0 0 0 0 0 0 0 0
43426 - 0 0 0 0 0 0 0 0 0 0 0 0
43427 - 0 0 0 0 0 0 0 0 0 0 0 0
43428 - 0 0 0 0 0 0 0 0 0 0 0 0
43429 - 0 0 0 0 0 0 0 0 0 0 0 0
43430 - 0 0 0 0 0 0 0 0 0 0 0 0
43431 - 0 0 0 0 0 0 0 0 0 0 0 0
43432 - 0 0 0 0 0 0 0 0 0 0 0 0
43433 - 0 0 0 6 6 6 10 10 10 14 14 14
43434 - 22 22 22 26 26 26 30 30 30 34 34 34
43435 - 30 30 30 30 30 30 26 26 26 18 18 18
43436 - 14 14 14 10 10 10 6 6 6 0 0 0
43437 - 0 0 0 0 0 0 0 0 0 0 0 0
43438 - 0 0 0 0 0 0 0 0 0 0 0 0
43439 - 0 0 0 0 0 0 0 0 0 0 0 0
43440 - 0 0 0 0 0 0 0 0 0 0 0 0
43441 - 0 0 0 0 0 0 0 0 0 0 0 0
43442 - 0 0 0 0 0 0 0 0 0 0 0 0
43443 - 0 0 0 0 0 0 0 0 0 0 0 0
43444 - 0 0 0 0 0 0 0 0 0 0 0 0
43445 - 0 0 0 0 0 0 0 0 0 0 0 0
43446 - 0 0 0 0 0 1 0 0 1 0 0 0
43447 - 0 0 0 0 0 0 0 0 0 0 0 0
43448 - 0 0 0 0 0 0 0 0 0 0 0 0
43449 - 0 0 0 0 0 0 0 0 0 0 0 0
43450 - 0 0 0 0 0 0 0 0 0 0 0 0
43451 - 0 0 0 0 0 0 0 0 0 0 0 0
43452 - 0 0 0 0 0 0 0 0 0 0 0 0
43453 - 6 6 6 14 14 14 26 26 26 42 42 42
43454 - 54 54 54 66 66 66 78 78 78 78 78 78
43455 - 78 78 78 74 74 74 66 66 66 54 54 54
43456 - 42 42 42 26 26 26 18 18 18 10 10 10
43457 - 6 6 6 0 0 0 0 0 0 0 0 0
43458 - 0 0 0 0 0 0 0 0 0 0 0 0
43459 - 0 0 0 0 0 0 0 0 0 0 0 0
43460 - 0 0 0 0 0 0 0 0 0 0 0 0
43461 - 0 0 0 0 0 0 0 0 0 0 0 0
43462 - 0 0 0 0 0 0 0 0 0 0 0 0
43463 - 0 0 0 0 0 0 0 0 0 0 0 0
43464 - 0 0 0 0 0 0 0 0 0 0 0 0
43465 - 0 0 0 0 0 0 0 0 0 0 0 0
43466 - 0 0 1 0 0 0 0 0 0 0 0 0
43467 - 0 0 0 0 0 0 0 0 0 0 0 0
43468 - 0 0 0 0 0 0 0 0 0 0 0 0
43469 - 0 0 0 0 0 0 0 0 0 0 0 0
43470 - 0 0 0 0 0 0 0 0 0 0 0 0
43471 - 0 0 0 0 0 0 0 0 0 0 0 0
43472 - 0 0 0 0 0 0 0 0 0 10 10 10
43473 - 22 22 22 42 42 42 66 66 66 86 86 86
43474 - 66 66 66 38 38 38 38 38 38 22 22 22
43475 - 26 26 26 34 34 34 54 54 54 66 66 66
43476 - 86 86 86 70 70 70 46 46 46 26 26 26
43477 - 14 14 14 6 6 6 0 0 0 0 0 0
43478 - 0 0 0 0 0 0 0 0 0 0 0 0
43479 - 0 0 0 0 0 0 0 0 0 0 0 0
43480 - 0 0 0 0 0 0 0 0 0 0 0 0
43481 - 0 0 0 0 0 0 0 0 0 0 0 0
43482 - 0 0 0 0 0 0 0 0 0 0 0 0
43483 - 0 0 0 0 0 0 0 0 0 0 0 0
43484 - 0 0 0 0 0 0 0 0 0 0 0 0
43485 - 0 0 0 0 0 0 0 0 0 0 0 0
43486 - 0 0 1 0 0 1 0 0 1 0 0 0
43487 - 0 0 0 0 0 0 0 0 0 0 0 0
43488 - 0 0 0 0 0 0 0 0 0 0 0 0
43489 - 0 0 0 0 0 0 0 0 0 0 0 0
43490 - 0 0 0 0 0 0 0 0 0 0 0 0
43491 - 0 0 0 0 0 0 0 0 0 0 0 0
43492 - 0 0 0 0 0 0 10 10 10 26 26 26
43493 - 50 50 50 82 82 82 58 58 58 6 6 6
43494 - 2 2 6 2 2 6 2 2 6 2 2 6
43495 - 2 2 6 2 2 6 2 2 6 2 2 6
43496 - 6 6 6 54 54 54 86 86 86 66 66 66
43497 - 38 38 38 18 18 18 6 6 6 0 0 0
43498 - 0 0 0 0 0 0 0 0 0 0 0 0
43499 - 0 0 0 0 0 0 0 0 0 0 0 0
43500 - 0 0 0 0 0 0 0 0 0 0 0 0
43501 - 0 0 0 0 0 0 0 0 0 0 0 0
43502 - 0 0 0 0 0 0 0 0 0 0 0 0
43503 - 0 0 0 0 0 0 0 0 0 0 0 0
43504 - 0 0 0 0 0 0 0 0 0 0 0 0
43505 - 0 0 0 0 0 0 0 0 0 0 0 0
43506 - 0 0 0 0 0 0 0 0 0 0 0 0
43507 - 0 0 0 0 0 0 0 0 0 0 0 0
43508 - 0 0 0 0 0 0 0 0 0 0 0 0
43509 - 0 0 0 0 0 0 0 0 0 0 0 0
43510 - 0 0 0 0 0 0 0 0 0 0 0 0
43511 - 0 0 0 0 0 0 0 0 0 0 0 0
43512 - 0 0 0 6 6 6 22 22 22 50 50 50
43513 - 78 78 78 34 34 34 2 2 6 2 2 6
43514 - 2 2 6 2 2 6 2 2 6 2 2 6
43515 - 2 2 6 2 2 6 2 2 6 2 2 6
43516 - 2 2 6 2 2 6 6 6 6 70 70 70
43517 - 78 78 78 46 46 46 22 22 22 6 6 6
43518 - 0 0 0 0 0 0 0 0 0 0 0 0
43519 - 0 0 0 0 0 0 0 0 0 0 0 0
43520 - 0 0 0 0 0 0 0 0 0 0 0 0
43521 - 0 0 0 0 0 0 0 0 0 0 0 0
43522 - 0 0 0 0 0 0 0 0 0 0 0 0
43523 - 0 0 0 0 0 0 0 0 0 0 0 0
43524 - 0 0 0 0 0 0 0 0 0 0 0 0
43525 - 0 0 0 0 0 0 0 0 0 0 0 0
43526 - 0 0 1 0 0 1 0 0 1 0 0 0
43527 - 0 0 0 0 0 0 0 0 0 0 0 0
43528 - 0 0 0 0 0 0 0 0 0 0 0 0
43529 - 0 0 0 0 0 0 0 0 0 0 0 0
43530 - 0 0 0 0 0 0 0 0 0 0 0 0
43531 - 0 0 0 0 0 0 0 0 0 0 0 0
43532 - 6 6 6 18 18 18 42 42 42 82 82 82
43533 - 26 26 26 2 2 6 2 2 6 2 2 6
43534 - 2 2 6 2 2 6 2 2 6 2 2 6
43535 - 2 2 6 2 2 6 2 2 6 14 14 14
43536 - 46 46 46 34 34 34 6 6 6 2 2 6
43537 - 42 42 42 78 78 78 42 42 42 18 18 18
43538 - 6 6 6 0 0 0 0 0 0 0 0 0
43539 - 0 0 0 0 0 0 0 0 0 0 0 0
43540 - 0 0 0 0 0 0 0 0 0 0 0 0
43541 - 0 0 0 0 0 0 0 0 0 0 0 0
43542 - 0 0 0 0 0 0 0 0 0 0 0 0
43543 - 0 0 0 0 0 0 0 0 0 0 0 0
43544 - 0 0 0 0 0 0 0 0 0 0 0 0
43545 - 0 0 0 0 0 0 0 0 0 0 0 0
43546 - 0 0 1 0 0 0 0 0 1 0 0 0
43547 - 0 0 0 0 0 0 0 0 0 0 0 0
43548 - 0 0 0 0 0 0 0 0 0 0 0 0
43549 - 0 0 0 0 0 0 0 0 0 0 0 0
43550 - 0 0 0 0 0 0 0 0 0 0 0 0
43551 - 0 0 0 0 0 0 0 0 0 0 0 0
43552 - 10 10 10 30 30 30 66 66 66 58 58 58
43553 - 2 2 6 2 2 6 2 2 6 2 2 6
43554 - 2 2 6 2 2 6 2 2 6 2 2 6
43555 - 2 2 6 2 2 6 2 2 6 26 26 26
43556 - 86 86 86 101 101 101 46 46 46 10 10 10
43557 - 2 2 6 58 58 58 70 70 70 34 34 34
43558 - 10 10 10 0 0 0 0 0 0 0 0 0
43559 - 0 0 0 0 0 0 0 0 0 0 0 0
43560 - 0 0 0 0 0 0 0 0 0 0 0 0
43561 - 0 0 0 0 0 0 0 0 0 0 0 0
43562 - 0 0 0 0 0 0 0 0 0 0 0 0
43563 - 0 0 0 0 0 0 0 0 0 0 0 0
43564 - 0 0 0 0 0 0 0 0 0 0 0 0
43565 - 0 0 0 0 0 0 0 0 0 0 0 0
43566 - 0 0 1 0 0 1 0 0 1 0 0 0
43567 - 0 0 0 0 0 0 0 0 0 0 0 0
43568 - 0 0 0 0 0 0 0 0 0 0 0 0
43569 - 0 0 0 0 0 0 0 0 0 0 0 0
43570 - 0 0 0 0 0 0 0 0 0 0 0 0
43571 - 0 0 0 0 0 0 0 0 0 0 0 0
43572 - 14 14 14 42 42 42 86 86 86 10 10 10
43573 - 2 2 6 2 2 6 2 2 6 2 2 6
43574 - 2 2 6 2 2 6 2 2 6 2 2 6
43575 - 2 2 6 2 2 6 2 2 6 30 30 30
43576 - 94 94 94 94 94 94 58 58 58 26 26 26
43577 - 2 2 6 6 6 6 78 78 78 54 54 54
43578 - 22 22 22 6 6 6 0 0 0 0 0 0
43579 - 0 0 0 0 0 0 0 0 0 0 0 0
43580 - 0 0 0 0 0 0 0 0 0 0 0 0
43581 - 0 0 0 0 0 0 0 0 0 0 0 0
43582 - 0 0 0 0 0 0 0 0 0 0 0 0
43583 - 0 0 0 0 0 0 0 0 0 0 0 0
43584 - 0 0 0 0 0 0 0 0 0 0 0 0
43585 - 0 0 0 0 0 0 0 0 0 0 0 0
43586 - 0 0 0 0 0 0 0 0 0 0 0 0
43587 - 0 0 0 0 0 0 0 0 0 0 0 0
43588 - 0 0 0 0 0 0 0 0 0 0 0 0
43589 - 0 0 0 0 0 0 0 0 0 0 0 0
43590 - 0 0 0 0 0 0 0 0 0 0 0 0
43591 - 0 0 0 0 0 0 0 0 0 6 6 6
43592 - 22 22 22 62 62 62 62 62 62 2 2 6
43593 - 2 2 6 2 2 6 2 2 6 2 2 6
43594 - 2 2 6 2 2 6 2 2 6 2 2 6
43595 - 2 2 6 2 2 6 2 2 6 26 26 26
43596 - 54 54 54 38 38 38 18 18 18 10 10 10
43597 - 2 2 6 2 2 6 34 34 34 82 82 82
43598 - 38 38 38 14 14 14 0 0 0 0 0 0
43599 - 0 0 0 0 0 0 0 0 0 0 0 0
43600 - 0 0 0 0 0 0 0 0 0 0 0 0
43601 - 0 0 0 0 0 0 0 0 0 0 0 0
43602 - 0 0 0 0 0 0 0 0 0 0 0 0
43603 - 0 0 0 0 0 0 0 0 0 0 0 0
43604 - 0 0 0 0 0 0 0 0 0 0 0 0
43605 - 0 0 0 0 0 0 0 0 0 0 0 0
43606 - 0 0 0 0 0 1 0 0 1 0 0 0
43607 - 0 0 0 0 0 0 0 0 0 0 0 0
43608 - 0 0 0 0 0 0 0 0 0 0 0 0
43609 - 0 0 0 0 0 0 0 0 0 0 0 0
43610 - 0 0 0 0 0 0 0 0 0 0 0 0
43611 - 0 0 0 0 0 0 0 0 0 6 6 6
43612 - 30 30 30 78 78 78 30 30 30 2 2 6
43613 - 2 2 6 2 2 6 2 2 6 2 2 6
43614 - 2 2 6 2 2 6 2 2 6 2 2 6
43615 - 2 2 6 2 2 6 2 2 6 10 10 10
43616 - 10 10 10 2 2 6 2 2 6 2 2 6
43617 - 2 2 6 2 2 6 2 2 6 78 78 78
43618 - 50 50 50 18 18 18 6 6 6 0 0 0
43619 - 0 0 0 0 0 0 0 0 0 0 0 0
43620 - 0 0 0 0 0 0 0 0 0 0 0 0
43621 - 0 0 0 0 0 0 0 0 0 0 0 0
43622 - 0 0 0 0 0 0 0 0 0 0 0 0
43623 - 0 0 0 0 0 0 0 0 0 0 0 0
43624 - 0 0 0 0 0 0 0 0 0 0 0 0
43625 - 0 0 0 0 0 0 0 0 0 0 0 0
43626 - 0 0 1 0 0 0 0 0 0 0 0 0
43627 - 0 0 0 0 0 0 0 0 0 0 0 0
43628 - 0 0 0 0 0 0 0 0 0 0 0 0
43629 - 0 0 0 0 0 0 0 0 0 0 0 0
43630 - 0 0 0 0 0 0 0 0 0 0 0 0
43631 - 0 0 0 0 0 0 0 0 0 10 10 10
43632 - 38 38 38 86 86 86 14 14 14 2 2 6
43633 - 2 2 6 2 2 6 2 2 6 2 2 6
43634 - 2 2 6 2 2 6 2 2 6 2 2 6
43635 - 2 2 6 2 2 6 2 2 6 2 2 6
43636 - 2 2 6 2 2 6 2 2 6 2 2 6
43637 - 2 2 6 2 2 6 2 2 6 54 54 54
43638 - 66 66 66 26 26 26 6 6 6 0 0 0
43639 - 0 0 0 0 0 0 0 0 0 0 0 0
43640 - 0 0 0 0 0 0 0 0 0 0 0 0
43641 - 0 0 0 0 0 0 0 0 0 0 0 0
43642 - 0 0 0 0 0 0 0 0 0 0 0 0
43643 - 0 0 0 0 0 0 0 0 0 0 0 0
43644 - 0 0 0 0 0 0 0 0 0 0 0 0
43645 - 0 0 0 0 0 0 0 0 0 0 0 0
43646 - 0 0 0 0 0 1 0 0 1 0 0 0
43647 - 0 0 0 0 0 0 0 0 0 0 0 0
43648 - 0 0 0 0 0 0 0 0 0 0 0 0
43649 - 0 0 0 0 0 0 0 0 0 0 0 0
43650 - 0 0 0 0 0 0 0 0 0 0 0 0
43651 - 0 0 0 0 0 0 0 0 0 14 14 14
43652 - 42 42 42 82 82 82 2 2 6 2 2 6
43653 - 2 2 6 6 6 6 10 10 10 2 2 6
43654 - 2 2 6 2 2 6 2 2 6 2 2 6
43655 - 2 2 6 2 2 6 2 2 6 6 6 6
43656 - 14 14 14 10 10 10 2 2 6 2 2 6
43657 - 2 2 6 2 2 6 2 2 6 18 18 18
43658 - 82 82 82 34 34 34 10 10 10 0 0 0
43659 - 0 0 0 0 0 0 0 0 0 0 0 0
43660 - 0 0 0 0 0 0 0 0 0 0 0 0
43661 - 0 0 0 0 0 0 0 0 0 0 0 0
43662 - 0 0 0 0 0 0 0 0 0 0 0 0
43663 - 0 0 0 0 0 0 0 0 0 0 0 0
43664 - 0 0 0 0 0 0 0 0 0 0 0 0
43665 - 0 0 0 0 0 0 0 0 0 0 0 0
43666 - 0 0 1 0 0 0 0 0 0 0 0 0
43667 - 0 0 0 0 0 0 0 0 0 0 0 0
43668 - 0 0 0 0 0 0 0 0 0 0 0 0
43669 - 0 0 0 0 0 0 0 0 0 0 0 0
43670 - 0 0 0 0 0 0 0 0 0 0 0 0
43671 - 0 0 0 0 0 0 0 0 0 14 14 14
43672 - 46 46 46 86 86 86 2 2 6 2 2 6
43673 - 6 6 6 6 6 6 22 22 22 34 34 34
43674 - 6 6 6 2 2 6 2 2 6 2 2 6
43675 - 2 2 6 2 2 6 18 18 18 34 34 34
43676 - 10 10 10 50 50 50 22 22 22 2 2 6
43677 - 2 2 6 2 2 6 2 2 6 10 10 10
43678 - 86 86 86 42 42 42 14 14 14 0 0 0
43679 - 0 0 0 0 0 0 0 0 0 0 0 0
43680 - 0 0 0 0 0 0 0 0 0 0 0 0
43681 - 0 0 0 0 0 0 0 0 0 0 0 0
43682 - 0 0 0 0 0 0 0 0 0 0 0 0
43683 - 0 0 0 0 0 0 0 0 0 0 0 0
43684 - 0 0 0 0 0 0 0 0 0 0 0 0
43685 - 0 0 0 0 0 0 0 0 0 0 0 0
43686 - 0 0 1 0 0 1 0 0 1 0 0 0
43687 - 0 0 0 0 0 0 0 0 0 0 0 0
43688 - 0 0 0 0 0 0 0 0 0 0 0 0
43689 - 0 0 0 0 0 0 0 0 0 0 0 0
43690 - 0 0 0 0 0 0 0 0 0 0 0 0
43691 - 0 0 0 0 0 0 0 0 0 14 14 14
43692 - 46 46 46 86 86 86 2 2 6 2 2 6
43693 - 38 38 38 116 116 116 94 94 94 22 22 22
43694 - 22 22 22 2 2 6 2 2 6 2 2 6
43695 - 14 14 14 86 86 86 138 138 138 162 162 162
43696 -154 154 154 38 38 38 26 26 26 6 6 6
43697 - 2 2 6 2 2 6 2 2 6 2 2 6
43698 - 86 86 86 46 46 46 14 14 14 0 0 0
43699 - 0 0 0 0 0 0 0 0 0 0 0 0
43700 - 0 0 0 0 0 0 0 0 0 0 0 0
43701 - 0 0 0 0 0 0 0 0 0 0 0 0
43702 - 0 0 0 0 0 0 0 0 0 0 0 0
43703 - 0 0 0 0 0 0 0 0 0 0 0 0
43704 - 0 0 0 0 0 0 0 0 0 0 0 0
43705 - 0 0 0 0 0 0 0 0 0 0 0 0
43706 - 0 0 0 0 0 0 0 0 0 0 0 0
43707 - 0 0 0 0 0 0 0 0 0 0 0 0
43708 - 0 0 0 0 0 0 0 0 0 0 0 0
43709 - 0 0 0 0 0 0 0 0 0 0 0 0
43710 - 0 0 0 0 0 0 0 0 0 0 0 0
43711 - 0 0 0 0 0 0 0 0 0 14 14 14
43712 - 46 46 46 86 86 86 2 2 6 14 14 14
43713 -134 134 134 198 198 198 195 195 195 116 116 116
43714 - 10 10 10 2 2 6 2 2 6 6 6 6
43715 -101 98 89 187 187 187 210 210 210 218 218 218
43716 -214 214 214 134 134 134 14 14 14 6 6 6
43717 - 2 2 6 2 2 6 2 2 6 2 2 6
43718 - 86 86 86 50 50 50 18 18 18 6 6 6
43719 - 0 0 0 0 0 0 0 0 0 0 0 0
43720 - 0 0 0 0 0 0 0 0 0 0 0 0
43721 - 0 0 0 0 0 0 0 0 0 0 0 0
43722 - 0 0 0 0 0 0 0 0 0 0 0 0
43723 - 0 0 0 0 0 0 0 0 0 0 0 0
43724 - 0 0 0 0 0 0 0 0 0 0 0 0
43725 - 0 0 0 0 0 0 0 0 1 0 0 0
43726 - 0 0 1 0 0 1 0 0 1 0 0 0
43727 - 0 0 0 0 0 0 0 0 0 0 0 0
43728 - 0 0 0 0 0 0 0 0 0 0 0 0
43729 - 0 0 0 0 0 0 0 0 0 0 0 0
43730 - 0 0 0 0 0 0 0 0 0 0 0 0
43731 - 0 0 0 0 0 0 0 0 0 14 14 14
43732 - 46 46 46 86 86 86 2 2 6 54 54 54
43733 -218 218 218 195 195 195 226 226 226 246 246 246
43734 - 58 58 58 2 2 6 2 2 6 30 30 30
43735 -210 210 210 253 253 253 174 174 174 123 123 123
43736 -221 221 221 234 234 234 74 74 74 2 2 6
43737 - 2 2 6 2 2 6 2 2 6 2 2 6
43738 - 70 70 70 58 58 58 22 22 22 6 6 6
43739 - 0 0 0 0 0 0 0 0 0 0 0 0
43740 - 0 0 0 0 0 0 0 0 0 0 0 0
43741 - 0 0 0 0 0 0 0 0 0 0 0 0
43742 - 0 0 0 0 0 0 0 0 0 0 0 0
43743 - 0 0 0 0 0 0 0 0 0 0 0 0
43744 - 0 0 0 0 0 0 0 0 0 0 0 0
43745 - 0 0 0 0 0 0 0 0 0 0 0 0
43746 - 0 0 0 0 0 0 0 0 0 0 0 0
43747 - 0 0 0 0 0 0 0 0 0 0 0 0
43748 - 0 0 0 0 0 0 0 0 0 0 0 0
43749 - 0 0 0 0 0 0 0 0 0 0 0 0
43750 - 0 0 0 0 0 0 0 0 0 0 0 0
43751 - 0 0 0 0 0 0 0 0 0 14 14 14
43752 - 46 46 46 82 82 82 2 2 6 106 106 106
43753 -170 170 170 26 26 26 86 86 86 226 226 226
43754 -123 123 123 10 10 10 14 14 14 46 46 46
43755 -231 231 231 190 190 190 6 6 6 70 70 70
43756 - 90 90 90 238 238 238 158 158 158 2 2 6
43757 - 2 2 6 2 2 6 2 2 6 2 2 6
43758 - 70 70 70 58 58 58 22 22 22 6 6 6
43759 - 0 0 0 0 0 0 0 0 0 0 0 0
43760 - 0 0 0 0 0 0 0 0 0 0 0 0
43761 - 0 0 0 0 0 0 0 0 0 0 0 0
43762 - 0 0 0 0 0 0 0 0 0 0 0 0
43763 - 0 0 0 0 0 0 0 0 0 0 0 0
43764 - 0 0 0 0 0 0 0 0 0 0 0 0
43765 - 0 0 0 0 0 0 0 0 1 0 0 0
43766 - 0 0 1 0 0 1 0 0 1 0 0 0
43767 - 0 0 0 0 0 0 0 0 0 0 0 0
43768 - 0 0 0 0 0 0 0 0 0 0 0 0
43769 - 0 0 0 0 0 0 0 0 0 0 0 0
43770 - 0 0 0 0 0 0 0 0 0 0 0 0
43771 - 0 0 0 0 0 0 0 0 0 14 14 14
43772 - 42 42 42 86 86 86 6 6 6 116 116 116
43773 -106 106 106 6 6 6 70 70 70 149 149 149
43774 -128 128 128 18 18 18 38 38 38 54 54 54
43775 -221 221 221 106 106 106 2 2 6 14 14 14
43776 - 46 46 46 190 190 190 198 198 198 2 2 6
43777 - 2 2 6 2 2 6 2 2 6 2 2 6
43778 - 74 74 74 62 62 62 22 22 22 6 6 6
43779 - 0 0 0 0 0 0 0 0 0 0 0 0
43780 - 0 0 0 0 0 0 0 0 0 0 0 0
43781 - 0 0 0 0 0 0 0 0 0 0 0 0
43782 - 0 0 0 0 0 0 0 0 0 0 0 0
43783 - 0 0 0 0 0 0 0 0 0 0 0 0
43784 - 0 0 0 0 0 0 0 0 0 0 0 0
43785 - 0 0 0 0 0 0 0 0 1 0 0 0
43786 - 0 0 1 0 0 0 0 0 1 0 0 0
43787 - 0 0 0 0 0 0 0 0 0 0 0 0
43788 - 0 0 0 0 0 0 0 0 0 0 0 0
43789 - 0 0 0 0 0 0 0 0 0 0 0 0
43790 - 0 0 0 0 0 0 0 0 0 0 0 0
43791 - 0 0 0 0 0 0 0 0 0 14 14 14
43792 - 42 42 42 94 94 94 14 14 14 101 101 101
43793 -128 128 128 2 2 6 18 18 18 116 116 116
43794 -118 98 46 121 92 8 121 92 8 98 78 10
43795 -162 162 162 106 106 106 2 2 6 2 2 6
43796 - 2 2 6 195 195 195 195 195 195 6 6 6
43797 - 2 2 6 2 2 6 2 2 6 2 2 6
43798 - 74 74 74 62 62 62 22 22 22 6 6 6
43799 - 0 0 0 0 0 0 0 0 0 0 0 0
43800 - 0 0 0 0 0 0 0 0 0 0 0 0
43801 - 0 0 0 0 0 0 0 0 0 0 0 0
43802 - 0 0 0 0 0 0 0 0 0 0 0 0
43803 - 0 0 0 0 0 0 0 0 0 0 0 0
43804 - 0 0 0 0 0 0 0 0 0 0 0 0
43805 - 0 0 0 0 0 0 0 0 1 0 0 1
43806 - 0 0 1 0 0 0 0 0 1 0 0 0
43807 - 0 0 0 0 0 0 0 0 0 0 0 0
43808 - 0 0 0 0 0 0 0 0 0 0 0 0
43809 - 0 0 0 0 0 0 0 0 0 0 0 0
43810 - 0 0 0 0 0 0 0 0 0 0 0 0
43811 - 0 0 0 0 0 0 0 0 0 10 10 10
43812 - 38 38 38 90 90 90 14 14 14 58 58 58
43813 -210 210 210 26 26 26 54 38 6 154 114 10
43814 -226 170 11 236 186 11 225 175 15 184 144 12
43815 -215 174 15 175 146 61 37 26 9 2 2 6
43816 - 70 70 70 246 246 246 138 138 138 2 2 6
43817 - 2 2 6 2 2 6 2 2 6 2 2 6
43818 - 70 70 70 66 66 66 26 26 26 6 6 6
43819 - 0 0 0 0 0 0 0 0 0 0 0 0
43820 - 0 0 0 0 0 0 0 0 0 0 0 0
43821 - 0 0 0 0 0 0 0 0 0 0 0 0
43822 - 0 0 0 0 0 0 0 0 0 0 0 0
43823 - 0 0 0 0 0 0 0 0 0 0 0 0
43824 - 0 0 0 0 0 0 0 0 0 0 0 0
43825 - 0 0 0 0 0 0 0 0 0 0 0 0
43826 - 0 0 0 0 0 0 0 0 0 0 0 0
43827 - 0 0 0 0 0 0 0 0 0 0 0 0
43828 - 0 0 0 0 0 0 0 0 0 0 0 0
43829 - 0 0 0 0 0 0 0 0 0 0 0 0
43830 - 0 0 0 0 0 0 0 0 0 0 0 0
43831 - 0 0 0 0 0 0 0 0 0 10 10 10
43832 - 38 38 38 86 86 86 14 14 14 10 10 10
43833 -195 195 195 188 164 115 192 133 9 225 175 15
43834 -239 182 13 234 190 10 232 195 16 232 200 30
43835 -245 207 45 241 208 19 232 195 16 184 144 12
43836 -218 194 134 211 206 186 42 42 42 2 2 6
43837 - 2 2 6 2 2 6 2 2 6 2 2 6
43838 - 50 50 50 74 74 74 30 30 30 6 6 6
43839 - 0 0 0 0 0 0 0 0 0 0 0 0
43840 - 0 0 0 0 0 0 0 0 0 0 0 0
43841 - 0 0 0 0 0 0 0 0 0 0 0 0
43842 - 0 0 0 0 0 0 0 0 0 0 0 0
43843 - 0 0 0 0 0 0 0 0 0 0 0 0
43844 - 0 0 0 0 0 0 0 0 0 0 0 0
43845 - 0 0 0 0 0 0 0 0 0 0 0 0
43846 - 0 0 0 0 0 0 0 0 0 0 0 0
43847 - 0 0 0 0 0 0 0 0 0 0 0 0
43848 - 0 0 0 0 0 0 0 0 0 0 0 0
43849 - 0 0 0 0 0 0 0 0 0 0 0 0
43850 - 0 0 0 0 0 0 0 0 0 0 0 0
43851 - 0 0 0 0 0 0 0 0 0 10 10 10
43852 - 34 34 34 86 86 86 14 14 14 2 2 6
43853 -121 87 25 192 133 9 219 162 10 239 182 13
43854 -236 186 11 232 195 16 241 208 19 244 214 54
43855 -246 218 60 246 218 38 246 215 20 241 208 19
43856 -241 208 19 226 184 13 121 87 25 2 2 6
43857 - 2 2 6 2 2 6 2 2 6 2 2 6
43858 - 50 50 50 82 82 82 34 34 34 10 10 10
43859 - 0 0 0 0 0 0 0 0 0 0 0 0
43860 - 0 0 0 0 0 0 0 0 0 0 0 0
43861 - 0 0 0 0 0 0 0 0 0 0 0 0
43862 - 0 0 0 0 0 0 0 0 0 0 0 0
43863 - 0 0 0 0 0 0 0 0 0 0 0 0
43864 - 0 0 0 0 0 0 0 0 0 0 0 0
43865 - 0 0 0 0 0 0 0 0 0 0 0 0
43866 - 0 0 0 0 0 0 0 0 0 0 0 0
43867 - 0 0 0 0 0 0 0 0 0 0 0 0
43868 - 0 0 0 0 0 0 0 0 0 0 0 0
43869 - 0 0 0 0 0 0 0 0 0 0 0 0
43870 - 0 0 0 0 0 0 0 0 0 0 0 0
43871 - 0 0 0 0 0 0 0 0 0 10 10 10
43872 - 34 34 34 82 82 82 30 30 30 61 42 6
43873 -180 123 7 206 145 10 230 174 11 239 182 13
43874 -234 190 10 238 202 15 241 208 19 246 218 74
43875 -246 218 38 246 215 20 246 215 20 246 215 20
43876 -226 184 13 215 174 15 184 144 12 6 6 6
43877 - 2 2 6 2 2 6 2 2 6 2 2 6
43878 - 26 26 26 94 94 94 42 42 42 14 14 14
43879 - 0 0 0 0 0 0 0 0 0 0 0 0
43880 - 0 0 0 0 0 0 0 0 0 0 0 0
43881 - 0 0 0 0 0 0 0 0 0 0 0 0
43882 - 0 0 0 0 0 0 0 0 0 0 0 0
43883 - 0 0 0 0 0 0 0 0 0 0 0 0
43884 - 0 0 0 0 0 0 0 0 0 0 0 0
43885 - 0 0 0 0 0 0 0 0 0 0 0 0
43886 - 0 0 0 0 0 0 0 0 0 0 0 0
43887 - 0 0 0 0 0 0 0 0 0 0 0 0
43888 - 0 0 0 0 0 0 0 0 0 0 0 0
43889 - 0 0 0 0 0 0 0 0 0 0 0 0
43890 - 0 0 0 0 0 0 0 0 0 0 0 0
43891 - 0 0 0 0 0 0 0 0 0 10 10 10
43892 - 30 30 30 78 78 78 50 50 50 104 69 6
43893 -192 133 9 216 158 10 236 178 12 236 186 11
43894 -232 195 16 241 208 19 244 214 54 245 215 43
43895 -246 215 20 246 215 20 241 208 19 198 155 10
43896 -200 144 11 216 158 10 156 118 10 2 2 6
43897 - 2 2 6 2 2 6 2 2 6 2 2 6
43898 - 6 6 6 90 90 90 54 54 54 18 18 18
43899 - 6 6 6 0 0 0 0 0 0 0 0 0
43900 - 0 0 0 0 0 0 0 0 0 0 0 0
43901 - 0 0 0 0 0 0 0 0 0 0 0 0
43902 - 0 0 0 0 0 0 0 0 0 0 0 0
43903 - 0 0 0 0 0 0 0 0 0 0 0 0
43904 - 0 0 0 0 0 0 0 0 0 0 0 0
43905 - 0 0 0 0 0 0 0 0 0 0 0 0
43906 - 0 0 0 0 0 0 0 0 0 0 0 0
43907 - 0 0 0 0 0 0 0 0 0 0 0 0
43908 - 0 0 0 0 0 0 0 0 0 0 0 0
43909 - 0 0 0 0 0 0 0 0 0 0 0 0
43910 - 0 0 0 0 0 0 0 0 0 0 0 0
43911 - 0 0 0 0 0 0 0 0 0 10 10 10
43912 - 30 30 30 78 78 78 46 46 46 22 22 22
43913 -137 92 6 210 162 10 239 182 13 238 190 10
43914 -238 202 15 241 208 19 246 215 20 246 215 20
43915 -241 208 19 203 166 17 185 133 11 210 150 10
43916 -216 158 10 210 150 10 102 78 10 2 2 6
43917 - 6 6 6 54 54 54 14 14 14 2 2 6
43918 - 2 2 6 62 62 62 74 74 74 30 30 30
43919 - 10 10 10 0 0 0 0 0 0 0 0 0
43920 - 0 0 0 0 0 0 0 0 0 0 0 0
43921 - 0 0 0 0 0 0 0 0 0 0 0 0
43922 - 0 0 0 0 0 0 0 0 0 0 0 0
43923 - 0 0 0 0 0 0 0 0 0 0 0 0
43924 - 0 0 0 0 0 0 0 0 0 0 0 0
43925 - 0 0 0 0 0 0 0 0 0 0 0 0
43926 - 0 0 0 0 0 0 0 0 0 0 0 0
43927 - 0 0 0 0 0 0 0 0 0 0 0 0
43928 - 0 0 0 0 0 0 0 0 0 0 0 0
43929 - 0 0 0 0 0 0 0 0 0 0 0 0
43930 - 0 0 0 0 0 0 0 0 0 0 0 0
43931 - 0 0 0 0 0 0 0 0 0 10 10 10
43932 - 34 34 34 78 78 78 50 50 50 6 6 6
43933 - 94 70 30 139 102 15 190 146 13 226 184 13
43934 -232 200 30 232 195 16 215 174 15 190 146 13
43935 -168 122 10 192 133 9 210 150 10 213 154 11
43936 -202 150 34 182 157 106 101 98 89 2 2 6
43937 - 2 2 6 78 78 78 116 116 116 58 58 58
43938 - 2 2 6 22 22 22 90 90 90 46 46 46
43939 - 18 18 18 6 6 6 0 0 0 0 0 0
43940 - 0 0 0 0 0 0 0 0 0 0 0 0
43941 - 0 0 0 0 0 0 0 0 0 0 0 0
43942 - 0 0 0 0 0 0 0 0 0 0 0 0
43943 - 0 0 0 0 0 0 0 0 0 0 0 0
43944 - 0 0 0 0 0 0 0 0 0 0 0 0
43945 - 0 0 0 0 0 0 0 0 0 0 0 0
43946 - 0 0 0 0 0 0 0 0 0 0 0 0
43947 - 0 0 0 0 0 0 0 0 0 0 0 0
43948 - 0 0 0 0 0 0 0 0 0 0 0 0
43949 - 0 0 0 0 0 0 0 0 0 0 0 0
43950 - 0 0 0 0 0 0 0 0 0 0 0 0
43951 - 0 0 0 0 0 0 0 0 0 10 10 10
43952 - 38 38 38 86 86 86 50 50 50 6 6 6
43953 -128 128 128 174 154 114 156 107 11 168 122 10
43954 -198 155 10 184 144 12 197 138 11 200 144 11
43955 -206 145 10 206 145 10 197 138 11 188 164 115
43956 -195 195 195 198 198 198 174 174 174 14 14 14
43957 - 2 2 6 22 22 22 116 116 116 116 116 116
43958 - 22 22 22 2 2 6 74 74 74 70 70 70
43959 - 30 30 30 10 10 10 0 0 0 0 0 0
43960 - 0 0 0 0 0 0 0 0 0 0 0 0
43961 - 0 0 0 0 0 0 0 0 0 0 0 0
43962 - 0 0 0 0 0 0 0 0 0 0 0 0
43963 - 0 0 0 0 0 0 0 0 0 0 0 0
43964 - 0 0 0 0 0 0 0 0 0 0 0 0
43965 - 0 0 0 0 0 0 0 0 0 0 0 0
43966 - 0 0 0 0 0 0 0 0 0 0 0 0
43967 - 0 0 0 0 0 0 0 0 0 0 0 0
43968 - 0 0 0 0 0 0 0 0 0 0 0 0
43969 - 0 0 0 0 0 0 0 0 0 0 0 0
43970 - 0 0 0 0 0 0 0 0 0 0 0 0
43971 - 0 0 0 0 0 0 6 6 6 18 18 18
43972 - 50 50 50 101 101 101 26 26 26 10 10 10
43973 -138 138 138 190 190 190 174 154 114 156 107 11
43974 -197 138 11 200 144 11 197 138 11 192 133 9
43975 -180 123 7 190 142 34 190 178 144 187 187 187
43976 -202 202 202 221 221 221 214 214 214 66 66 66
43977 - 2 2 6 2 2 6 50 50 50 62 62 62
43978 - 6 6 6 2 2 6 10 10 10 90 90 90
43979 - 50 50 50 18 18 18 6 6 6 0 0 0
43980 - 0 0 0 0 0 0 0 0 0 0 0 0
43981 - 0 0 0 0 0 0 0 0 0 0 0 0
43982 - 0 0 0 0 0 0 0 0 0 0 0 0
43983 - 0 0 0 0 0 0 0 0 0 0 0 0
43984 - 0 0 0 0 0 0 0 0 0 0 0 0
43985 - 0 0 0 0 0 0 0 0 0 0 0 0
43986 - 0 0 0 0 0 0 0 0 0 0 0 0
43987 - 0 0 0 0 0 0 0 0 0 0 0 0
43988 - 0 0 0 0 0 0 0 0 0 0 0 0
43989 - 0 0 0 0 0 0 0 0 0 0 0 0
43990 - 0 0 0 0 0 0 0 0 0 0 0 0
43991 - 0 0 0 0 0 0 10 10 10 34 34 34
43992 - 74 74 74 74 74 74 2 2 6 6 6 6
43993 -144 144 144 198 198 198 190 190 190 178 166 146
43994 -154 121 60 156 107 11 156 107 11 168 124 44
43995 -174 154 114 187 187 187 190 190 190 210 210 210
43996 -246 246 246 253 253 253 253 253 253 182 182 182
43997 - 6 6 6 2 2 6 2 2 6 2 2 6
43998 - 2 2 6 2 2 6 2 2 6 62 62 62
43999 - 74 74 74 34 34 34 14 14 14 0 0 0
44000 - 0 0 0 0 0 0 0 0 0 0 0 0
44001 - 0 0 0 0 0 0 0 0 0 0 0 0
44002 - 0 0 0 0 0 0 0 0 0 0 0 0
44003 - 0 0 0 0 0 0 0 0 0 0 0 0
44004 - 0 0 0 0 0 0 0 0 0 0 0 0
44005 - 0 0 0 0 0 0 0 0 0 0 0 0
44006 - 0 0 0 0 0 0 0 0 0 0 0 0
44007 - 0 0 0 0 0 0 0 0 0 0 0 0
44008 - 0 0 0 0 0 0 0 0 0 0 0 0
44009 - 0 0 0 0 0 0 0 0 0 0 0 0
44010 - 0 0 0 0 0 0 0 0 0 0 0 0
44011 - 0 0 0 10 10 10 22 22 22 54 54 54
44012 - 94 94 94 18 18 18 2 2 6 46 46 46
44013 -234 234 234 221 221 221 190 190 190 190 190 190
44014 -190 190 190 187 187 187 187 187 187 190 190 190
44015 -190 190 190 195 195 195 214 214 214 242 242 242
44016 -253 253 253 253 253 253 253 253 253 253 253 253
44017 - 82 82 82 2 2 6 2 2 6 2 2 6
44018 - 2 2 6 2 2 6 2 2 6 14 14 14
44019 - 86 86 86 54 54 54 22 22 22 6 6 6
44020 - 0 0 0 0 0 0 0 0 0 0 0 0
44021 - 0 0 0 0 0 0 0 0 0 0 0 0
44022 - 0 0 0 0 0 0 0 0 0 0 0 0
44023 - 0 0 0 0 0 0 0 0 0 0 0 0
44024 - 0 0 0 0 0 0 0 0 0 0 0 0
44025 - 0 0 0 0 0 0 0 0 0 0 0 0
44026 - 0 0 0 0 0 0 0 0 0 0 0 0
44027 - 0 0 0 0 0 0 0 0 0 0 0 0
44028 - 0 0 0 0 0 0 0 0 0 0 0 0
44029 - 0 0 0 0 0 0 0 0 0 0 0 0
44030 - 0 0 0 0 0 0 0 0 0 0 0 0
44031 - 6 6 6 18 18 18 46 46 46 90 90 90
44032 - 46 46 46 18 18 18 6 6 6 182 182 182
44033 -253 253 253 246 246 246 206 206 206 190 190 190
44034 -190 190 190 190 190 190 190 190 190 190 190 190
44035 -206 206 206 231 231 231 250 250 250 253 253 253
44036 -253 253 253 253 253 253 253 253 253 253 253 253
44037 -202 202 202 14 14 14 2 2 6 2 2 6
44038 - 2 2 6 2 2 6 2 2 6 2 2 6
44039 - 42 42 42 86 86 86 42 42 42 18 18 18
44040 - 6 6 6 0 0 0 0 0 0 0 0 0
44041 - 0 0 0 0 0 0 0 0 0 0 0 0
44042 - 0 0 0 0 0 0 0 0 0 0 0 0
44043 - 0 0 0 0 0 0 0 0 0 0 0 0
44044 - 0 0 0 0 0 0 0 0 0 0 0 0
44045 - 0 0 0 0 0 0 0 0 0 0 0 0
44046 - 0 0 0 0 0 0 0 0 0 0 0 0
44047 - 0 0 0 0 0 0 0 0 0 0 0 0
44048 - 0 0 0 0 0 0 0 0 0 0 0 0
44049 - 0 0 0 0 0 0 0 0 0 0 0 0
44050 - 0 0 0 0 0 0 0 0 0 6 6 6
44051 - 14 14 14 38 38 38 74 74 74 66 66 66
44052 - 2 2 6 6 6 6 90 90 90 250 250 250
44053 -253 253 253 253 253 253 238 238 238 198 198 198
44054 -190 190 190 190 190 190 195 195 195 221 221 221
44055 -246 246 246 253 253 253 253 253 253 253 253 253
44056 -253 253 253 253 253 253 253 253 253 253 253 253
44057 -253 253 253 82 82 82 2 2 6 2 2 6
44058 - 2 2 6 2 2 6 2 2 6 2 2 6
44059 - 2 2 6 78 78 78 70 70 70 34 34 34
44060 - 14 14 14 6 6 6 0 0 0 0 0 0
44061 - 0 0 0 0 0 0 0 0 0 0 0 0
44062 - 0 0 0 0 0 0 0 0 0 0 0 0
44063 - 0 0 0 0 0 0 0 0 0 0 0 0
44064 - 0 0 0 0 0 0 0 0 0 0 0 0
44065 - 0 0 0 0 0 0 0 0 0 0 0 0
44066 - 0 0 0 0 0 0 0 0 0 0 0 0
44067 - 0 0 0 0 0 0 0 0 0 0 0 0
44068 - 0 0 0 0 0 0 0 0 0 0 0 0
44069 - 0 0 0 0 0 0 0 0 0 0 0 0
44070 - 0 0 0 0 0 0 0 0 0 14 14 14
44071 - 34 34 34 66 66 66 78 78 78 6 6 6
44072 - 2 2 6 18 18 18 218 218 218 253 253 253
44073 -253 253 253 253 253 253 253 253 253 246 246 246
44074 -226 226 226 231 231 231 246 246 246 253 253 253
44075 -253 253 253 253 253 253 253 253 253 253 253 253
44076 -253 253 253 253 253 253 253 253 253 253 253 253
44077 -253 253 253 178 178 178 2 2 6 2 2 6
44078 - 2 2 6 2 2 6 2 2 6 2 2 6
44079 - 2 2 6 18 18 18 90 90 90 62 62 62
44080 - 30 30 30 10 10 10 0 0 0 0 0 0
44081 - 0 0 0 0 0 0 0 0 0 0 0 0
44082 - 0 0 0 0 0 0 0 0 0 0 0 0
44083 - 0 0 0 0 0 0 0 0 0 0 0 0
44084 - 0 0 0 0 0 0 0 0 0 0 0 0
44085 - 0 0 0 0 0 0 0 0 0 0 0 0
44086 - 0 0 0 0 0 0 0 0 0 0 0 0
44087 - 0 0 0 0 0 0 0 0 0 0 0 0
44088 - 0 0 0 0 0 0 0 0 0 0 0 0
44089 - 0 0 0 0 0 0 0 0 0 0 0 0
44090 - 0 0 0 0 0 0 10 10 10 26 26 26
44091 - 58 58 58 90 90 90 18 18 18 2 2 6
44092 - 2 2 6 110 110 110 253 253 253 253 253 253
44093 -253 253 253 253 253 253 253 253 253 253 253 253
44094 -250 250 250 253 253 253 253 253 253 253 253 253
44095 -253 253 253 253 253 253 253 253 253 253 253 253
44096 -253 253 253 253 253 253 253 253 253 253 253 253
44097 -253 253 253 231 231 231 18 18 18 2 2 6
44098 - 2 2 6 2 2 6 2 2 6 2 2 6
44099 - 2 2 6 2 2 6 18 18 18 94 94 94
44100 - 54 54 54 26 26 26 10 10 10 0 0 0
44101 - 0 0 0 0 0 0 0 0 0 0 0 0
44102 - 0 0 0 0 0 0 0 0 0 0 0 0
44103 - 0 0 0 0 0 0 0 0 0 0 0 0
44104 - 0 0 0 0 0 0 0 0 0 0 0 0
44105 - 0 0 0 0 0 0 0 0 0 0 0 0
44106 - 0 0 0 0 0 0 0 0 0 0 0 0
44107 - 0 0 0 0 0 0 0 0 0 0 0 0
44108 - 0 0 0 0 0 0 0 0 0 0 0 0
44109 - 0 0 0 0 0 0 0 0 0 0 0 0
44110 - 0 0 0 6 6 6 22 22 22 50 50 50
44111 - 90 90 90 26 26 26 2 2 6 2 2 6
44112 - 14 14 14 195 195 195 250 250 250 253 253 253
44113 -253 253 253 253 253 253 253 253 253 253 253 253
44114 -253 253 253 253 253 253 253 253 253 253 253 253
44115 -253 253 253 253 253 253 253 253 253 253 253 253
44116 -253 253 253 253 253 253 253 253 253 253 253 253
44117 -250 250 250 242 242 242 54 54 54 2 2 6
44118 - 2 2 6 2 2 6 2 2 6 2 2 6
44119 - 2 2 6 2 2 6 2 2 6 38 38 38
44120 - 86 86 86 50 50 50 22 22 22 6 6 6
44121 - 0 0 0 0 0 0 0 0 0 0 0 0
44122 - 0 0 0 0 0 0 0 0 0 0 0 0
44123 - 0 0 0 0 0 0 0 0 0 0 0 0
44124 - 0 0 0 0 0 0 0 0 0 0 0 0
44125 - 0 0 0 0 0 0 0 0 0 0 0 0
44126 - 0 0 0 0 0 0 0 0 0 0 0 0
44127 - 0 0 0 0 0 0 0 0 0 0 0 0
44128 - 0 0 0 0 0 0 0 0 0 0 0 0
44129 - 0 0 0 0 0 0 0 0 0 0 0 0
44130 - 6 6 6 14 14 14 38 38 38 82 82 82
44131 - 34 34 34 2 2 6 2 2 6 2 2 6
44132 - 42 42 42 195 195 195 246 246 246 253 253 253
44133 -253 253 253 253 253 253 253 253 253 250 250 250
44134 -242 242 242 242 242 242 250 250 250 253 253 253
44135 -253 253 253 253 253 253 253 253 253 253 253 253
44136 -253 253 253 250 250 250 246 246 246 238 238 238
44137 -226 226 226 231 231 231 101 101 101 6 6 6
44138 - 2 2 6 2 2 6 2 2 6 2 2 6
44139 - 2 2 6 2 2 6 2 2 6 2 2 6
44140 - 38 38 38 82 82 82 42 42 42 14 14 14
44141 - 6 6 6 0 0 0 0 0 0 0 0 0
44142 - 0 0 0 0 0 0 0 0 0 0 0 0
44143 - 0 0 0 0 0 0 0 0 0 0 0 0
44144 - 0 0 0 0 0 0 0 0 0 0 0 0
44145 - 0 0 0 0 0 0 0 0 0 0 0 0
44146 - 0 0 0 0 0 0 0 0 0 0 0 0
44147 - 0 0 0 0 0 0 0 0 0 0 0 0
44148 - 0 0 0 0 0 0 0 0 0 0 0 0
44149 - 0 0 0 0 0 0 0 0 0 0 0 0
44150 - 10 10 10 26 26 26 62 62 62 66 66 66
44151 - 2 2 6 2 2 6 2 2 6 6 6 6
44152 - 70 70 70 170 170 170 206 206 206 234 234 234
44153 -246 246 246 250 250 250 250 250 250 238 238 238
44154 -226 226 226 231 231 231 238 238 238 250 250 250
44155 -250 250 250 250 250 250 246 246 246 231 231 231
44156 -214 214 214 206 206 206 202 202 202 202 202 202
44157 -198 198 198 202 202 202 182 182 182 18 18 18
44158 - 2 2 6 2 2 6 2 2 6 2 2 6
44159 - 2 2 6 2 2 6 2 2 6 2 2 6
44160 - 2 2 6 62 62 62 66 66 66 30 30 30
44161 - 10 10 10 0 0 0 0 0 0 0 0 0
44162 - 0 0 0 0 0 0 0 0 0 0 0 0
44163 - 0 0 0 0 0 0 0 0 0 0 0 0
44164 - 0 0 0 0 0 0 0 0 0 0 0 0
44165 - 0 0 0 0 0 0 0 0 0 0 0 0
44166 - 0 0 0 0 0 0 0 0 0 0 0 0
44167 - 0 0 0 0 0 0 0 0 0 0 0 0
44168 - 0 0 0 0 0 0 0 0 0 0 0 0
44169 - 0 0 0 0 0 0 0 0 0 0 0 0
44170 - 14 14 14 42 42 42 82 82 82 18 18 18
44171 - 2 2 6 2 2 6 2 2 6 10 10 10
44172 - 94 94 94 182 182 182 218 218 218 242 242 242
44173 -250 250 250 253 253 253 253 253 253 250 250 250
44174 -234 234 234 253 253 253 253 253 253 253 253 253
44175 -253 253 253 253 253 253 253 253 253 246 246 246
44176 -238 238 238 226 226 226 210 210 210 202 202 202
44177 -195 195 195 195 195 195 210 210 210 158 158 158
44178 - 6 6 6 14 14 14 50 50 50 14 14 14
44179 - 2 2 6 2 2 6 2 2 6 2 2 6
44180 - 2 2 6 6 6 6 86 86 86 46 46 46
44181 - 18 18 18 6 6 6 0 0 0 0 0 0
44182 - 0 0 0 0 0 0 0 0 0 0 0 0
44183 - 0 0 0 0 0 0 0 0 0 0 0 0
44184 - 0 0 0 0 0 0 0 0 0 0 0 0
44185 - 0 0 0 0 0 0 0 0 0 0 0 0
44186 - 0 0 0 0 0 0 0 0 0 0 0 0
44187 - 0 0 0 0 0 0 0 0 0 0 0 0
44188 - 0 0 0 0 0 0 0 0 0 0 0 0
44189 - 0 0 0 0 0 0 0 0 0 6 6 6
44190 - 22 22 22 54 54 54 70 70 70 2 2 6
44191 - 2 2 6 10 10 10 2 2 6 22 22 22
44192 -166 166 166 231 231 231 250 250 250 253 253 253
44193 -253 253 253 253 253 253 253 253 253 250 250 250
44194 -242 242 242 253 253 253 253 253 253 253 253 253
44195 -253 253 253 253 253 253 253 253 253 253 253 253
44196 -253 253 253 253 253 253 253 253 253 246 246 246
44197 -231 231 231 206 206 206 198 198 198 226 226 226
44198 - 94 94 94 2 2 6 6 6 6 38 38 38
44199 - 30 30 30 2 2 6 2 2 6 2 2 6
44200 - 2 2 6 2 2 6 62 62 62 66 66 66
44201 - 26 26 26 10 10 10 0 0 0 0 0 0
44202 - 0 0 0 0 0 0 0 0 0 0 0 0
44203 - 0 0 0 0 0 0 0 0 0 0 0 0
44204 - 0 0 0 0 0 0 0 0 0 0 0 0
44205 - 0 0 0 0 0 0 0 0 0 0 0 0
44206 - 0 0 0 0 0 0 0 0 0 0 0 0
44207 - 0 0 0 0 0 0 0 0 0 0 0 0
44208 - 0 0 0 0 0 0 0 0 0 0 0 0
44209 - 0 0 0 0 0 0 0 0 0 10 10 10
44210 - 30 30 30 74 74 74 50 50 50 2 2 6
44211 - 26 26 26 26 26 26 2 2 6 106 106 106
44212 -238 238 238 253 253 253 253 253 253 253 253 253
44213 -253 253 253 253 253 253 253 253 253 253 253 253
44214 -253 253 253 253 253 253 253 253 253 253 253 253
44215 -253 253 253 253 253 253 253 253 253 253 253 253
44216 -253 253 253 253 253 253 253 253 253 253 253 253
44217 -253 253 253 246 246 246 218 218 218 202 202 202
44218 -210 210 210 14 14 14 2 2 6 2 2 6
44219 - 30 30 30 22 22 22 2 2 6 2 2 6
44220 - 2 2 6 2 2 6 18 18 18 86 86 86
44221 - 42 42 42 14 14 14 0 0 0 0 0 0
44222 - 0 0 0 0 0 0 0 0 0 0 0 0
44223 - 0 0 0 0 0 0 0 0 0 0 0 0
44224 - 0 0 0 0 0 0 0 0 0 0 0 0
44225 - 0 0 0 0 0 0 0 0 0 0 0 0
44226 - 0 0 0 0 0 0 0 0 0 0 0 0
44227 - 0 0 0 0 0 0 0 0 0 0 0 0
44228 - 0 0 0 0 0 0 0 0 0 0 0 0
44229 - 0 0 0 0 0 0 0 0 0 14 14 14
44230 - 42 42 42 90 90 90 22 22 22 2 2 6
44231 - 42 42 42 2 2 6 18 18 18 218 218 218
44232 -253 253 253 253 253 253 253 253 253 253 253 253
44233 -253 253 253 253 253 253 253 253 253 253 253 253
44234 -253 253 253 253 253 253 253 253 253 253 253 253
44235 -253 253 253 253 253 253 253 253 253 253 253 253
44236 -253 253 253 253 253 253 253 253 253 253 253 253
44237 -253 253 253 253 253 253 250 250 250 221 221 221
44238 -218 218 218 101 101 101 2 2 6 14 14 14
44239 - 18 18 18 38 38 38 10 10 10 2 2 6
44240 - 2 2 6 2 2 6 2 2 6 78 78 78
44241 - 58 58 58 22 22 22 6 6 6 0 0 0
44242 - 0 0 0 0 0 0 0 0 0 0 0 0
44243 - 0 0 0 0 0 0 0 0 0 0 0 0
44244 - 0 0 0 0 0 0 0 0 0 0 0 0
44245 - 0 0 0 0 0 0 0 0 0 0 0 0
44246 - 0 0 0 0 0 0 0 0 0 0 0 0
44247 - 0 0 0 0 0 0 0 0 0 0 0 0
44248 - 0 0 0 0 0 0 0 0 0 0 0 0
44249 - 0 0 0 0 0 0 6 6 6 18 18 18
44250 - 54 54 54 82 82 82 2 2 6 26 26 26
44251 - 22 22 22 2 2 6 123 123 123 253 253 253
44252 -253 253 253 253 253 253 253 253 253 253 253 253
44253 -253 253 253 253 253 253 253 253 253 253 253 253
44254 -253 253 253 253 253 253 253 253 253 253 253 253
44255 -253 253 253 253 253 253 253 253 253 253 253 253
44256 -253 253 253 253 253 253 253 253 253 253 253 253
44257 -253 253 253 253 253 253 253 253 253 250 250 250
44258 -238 238 238 198 198 198 6 6 6 38 38 38
44259 - 58 58 58 26 26 26 38 38 38 2 2 6
44260 - 2 2 6 2 2 6 2 2 6 46 46 46
44261 - 78 78 78 30 30 30 10 10 10 0 0 0
44262 - 0 0 0 0 0 0 0 0 0 0 0 0
44263 - 0 0 0 0 0 0 0 0 0 0 0 0
44264 - 0 0 0 0 0 0 0 0 0 0 0 0
44265 - 0 0 0 0 0 0 0 0 0 0 0 0
44266 - 0 0 0 0 0 0 0 0 0 0 0 0
44267 - 0 0 0 0 0 0 0 0 0 0 0 0
44268 - 0 0 0 0 0 0 0 0 0 0 0 0
44269 - 0 0 0 0 0 0 10 10 10 30 30 30
44270 - 74 74 74 58 58 58 2 2 6 42 42 42
44271 - 2 2 6 22 22 22 231 231 231 253 253 253
44272 -253 253 253 253 253 253 253 253 253 253 253 253
44273 -253 253 253 253 253 253 253 253 253 250 250 250
44274 -253 253 253 253 253 253 253 253 253 253 253 253
44275 -253 253 253 253 253 253 253 253 253 253 253 253
44276 -253 253 253 253 253 253 253 253 253 253 253 253
44277 -253 253 253 253 253 253 253 253 253 253 253 253
44278 -253 253 253 246 246 246 46 46 46 38 38 38
44279 - 42 42 42 14 14 14 38 38 38 14 14 14
44280 - 2 2 6 2 2 6 2 2 6 6 6 6
44281 - 86 86 86 46 46 46 14 14 14 0 0 0
44282 - 0 0 0 0 0 0 0 0 0 0 0 0
44283 - 0 0 0 0 0 0 0 0 0 0 0 0
44284 - 0 0 0 0 0 0 0 0 0 0 0 0
44285 - 0 0 0 0 0 0 0 0 0 0 0 0
44286 - 0 0 0 0 0 0 0 0 0 0 0 0
44287 - 0 0 0 0 0 0 0 0 0 0 0 0
44288 - 0 0 0 0 0 0 0 0 0 0 0 0
44289 - 0 0 0 6 6 6 14 14 14 42 42 42
44290 - 90 90 90 18 18 18 18 18 18 26 26 26
44291 - 2 2 6 116 116 116 253 253 253 253 253 253
44292 -253 253 253 253 253 253 253 253 253 253 253 253
44293 -253 253 253 253 253 253 250 250 250 238 238 238
44294 -253 253 253 253 253 253 253 253 253 253 253 253
44295 -253 253 253 253 253 253 253 253 253 253 253 253
44296 -253 253 253 253 253 253 253 253 253 253 253 253
44297 -253 253 253 253 253 253 253 253 253 253 253 253
44298 -253 253 253 253 253 253 94 94 94 6 6 6
44299 - 2 2 6 2 2 6 10 10 10 34 34 34
44300 - 2 2 6 2 2 6 2 2 6 2 2 6
44301 - 74 74 74 58 58 58 22 22 22 6 6 6
44302 - 0 0 0 0 0 0 0 0 0 0 0 0
44303 - 0 0 0 0 0 0 0 0 0 0 0 0
44304 - 0 0 0 0 0 0 0 0 0 0 0 0
44305 - 0 0 0 0 0 0 0 0 0 0 0 0
44306 - 0 0 0 0 0 0 0 0 0 0 0 0
44307 - 0 0 0 0 0 0 0 0 0 0 0 0
44308 - 0 0 0 0 0 0 0 0 0 0 0 0
44309 - 0 0 0 10 10 10 26 26 26 66 66 66
44310 - 82 82 82 2 2 6 38 38 38 6 6 6
44311 - 14 14 14 210 210 210 253 253 253 253 253 253
44312 -253 253 253 253 253 253 253 253 253 253 253 253
44313 -253 253 253 253 253 253 246 246 246 242 242 242
44314 -253 253 253 253 253 253 253 253 253 253 253 253
44315 -253 253 253 253 253 253 253 253 253 253 253 253
44316 -253 253 253 253 253 253 253 253 253 253 253 253
44317 -253 253 253 253 253 253 253 253 253 253 253 253
44318 -253 253 253 253 253 253 144 144 144 2 2 6
44319 - 2 2 6 2 2 6 2 2 6 46 46 46
44320 - 2 2 6 2 2 6 2 2 6 2 2 6
44321 - 42 42 42 74 74 74 30 30 30 10 10 10
44322 - 0 0 0 0 0 0 0 0 0 0 0 0
44323 - 0 0 0 0 0 0 0 0 0 0 0 0
44324 - 0 0 0 0 0 0 0 0 0 0 0 0
44325 - 0 0 0 0 0 0 0 0 0 0 0 0
44326 - 0 0 0 0 0 0 0 0 0 0 0 0
44327 - 0 0 0 0 0 0 0 0 0 0 0 0
44328 - 0 0 0 0 0 0 0 0 0 0 0 0
44329 - 6 6 6 14 14 14 42 42 42 90 90 90
44330 - 26 26 26 6 6 6 42 42 42 2 2 6
44331 - 74 74 74 250 250 250 253 253 253 253 253 253
44332 -253 253 253 253 253 253 253 253 253 253 253 253
44333 -253 253 253 253 253 253 242 242 242 242 242 242
44334 -253 253 253 253 253 253 253 253 253 253 253 253
44335 -253 253 253 253 253 253 253 253 253 253 253 253
44336 -253 253 253 253 253 253 253 253 253 253 253 253
44337 -253 253 253 253 253 253 253 253 253 253 253 253
44338 -253 253 253 253 253 253 182 182 182 2 2 6
44339 - 2 2 6 2 2 6 2 2 6 46 46 46
44340 - 2 2 6 2 2 6 2 2 6 2 2 6
44341 - 10 10 10 86 86 86 38 38 38 10 10 10
44342 - 0 0 0 0 0 0 0 0 0 0 0 0
44343 - 0 0 0 0 0 0 0 0 0 0 0 0
44344 - 0 0 0 0 0 0 0 0 0 0 0 0
44345 - 0 0 0 0 0 0 0 0 0 0 0 0
44346 - 0 0 0 0 0 0 0 0 0 0 0 0
44347 - 0 0 0 0 0 0 0 0 0 0 0 0
44348 - 0 0 0 0 0 0 0 0 0 0 0 0
44349 - 10 10 10 26 26 26 66 66 66 82 82 82
44350 - 2 2 6 22 22 22 18 18 18 2 2 6
44351 -149 149 149 253 253 253 253 253 253 253 253 253
44352 -253 253 253 253 253 253 253 253 253 253 253 253
44353 -253 253 253 253 253 253 234 234 234 242 242 242
44354 -253 253 253 253 253 253 253 253 253 253 253 253
44355 -253 253 253 253 253 253 253 253 253 253 253 253
44356 -253 253 253 253 253 253 253 253 253 253 253 253
44357 -253 253 253 253 253 253 253 253 253 253 253 253
44358 -253 253 253 253 253 253 206 206 206 2 2 6
44359 - 2 2 6 2 2 6 2 2 6 38 38 38
44360 - 2 2 6 2 2 6 2 2 6 2 2 6
44361 - 6 6 6 86 86 86 46 46 46 14 14 14
44362 - 0 0 0 0 0 0 0 0 0 0 0 0
44363 - 0 0 0 0 0 0 0 0 0 0 0 0
44364 - 0 0 0 0 0 0 0 0 0 0 0 0
44365 - 0 0 0 0 0 0 0 0 0 0 0 0
44366 - 0 0 0 0 0 0 0 0 0 0 0 0
44367 - 0 0 0 0 0 0 0 0 0 0 0 0
44368 - 0 0 0 0 0 0 0 0 0 6 6 6
44369 - 18 18 18 46 46 46 86 86 86 18 18 18
44370 - 2 2 6 34 34 34 10 10 10 6 6 6
44371 -210 210 210 253 253 253 253 253 253 253 253 253
44372 -253 253 253 253 253 253 253 253 253 253 253 253
44373 -253 253 253 253 253 253 234 234 234 242 242 242
44374 -253 253 253 253 253 253 253 253 253 253 253 253
44375 -253 253 253 253 253 253 253 253 253 253 253 253
44376 -253 253 253 253 253 253 253 253 253 253 253 253
44377 -253 253 253 253 253 253 253 253 253 253 253 253
44378 -253 253 253 253 253 253 221 221 221 6 6 6
44379 - 2 2 6 2 2 6 6 6 6 30 30 30
44380 - 2 2 6 2 2 6 2 2 6 2 2 6
44381 - 2 2 6 82 82 82 54 54 54 18 18 18
44382 - 6 6 6 0 0 0 0 0 0 0 0 0
44383 - 0 0 0 0 0 0 0 0 0 0 0 0
44384 - 0 0 0 0 0 0 0 0 0 0 0 0
44385 - 0 0 0 0 0 0 0 0 0 0 0 0
44386 - 0 0 0 0 0 0 0 0 0 0 0 0
44387 - 0 0 0 0 0 0 0 0 0 0 0 0
44388 - 0 0 0 0 0 0 0 0 0 10 10 10
44389 - 26 26 26 66 66 66 62 62 62 2 2 6
44390 - 2 2 6 38 38 38 10 10 10 26 26 26
44391 -238 238 238 253 253 253 253 253 253 253 253 253
44392 -253 253 253 253 253 253 253 253 253 253 253 253
44393 -253 253 253 253 253 253 231 231 231 238 238 238
44394 -253 253 253 253 253 253 253 253 253 253 253 253
44395 -253 253 253 253 253 253 253 253 253 253 253 253
44396 -253 253 253 253 253 253 253 253 253 253 253 253
44397 -253 253 253 253 253 253 253 253 253 253 253 253
44398 -253 253 253 253 253 253 231 231 231 6 6 6
44399 - 2 2 6 2 2 6 10 10 10 30 30 30
44400 - 2 2 6 2 2 6 2 2 6 2 2 6
44401 - 2 2 6 66 66 66 58 58 58 22 22 22
44402 - 6 6 6 0 0 0 0 0 0 0 0 0
44403 - 0 0 0 0 0 0 0 0 0 0 0 0
44404 - 0 0 0 0 0 0 0 0 0 0 0 0
44405 - 0 0 0 0 0 0 0 0 0 0 0 0
44406 - 0 0 0 0 0 0 0 0 0 0 0 0
44407 - 0 0 0 0 0 0 0 0 0 0 0 0
44408 - 0 0 0 0 0 0 0 0 0 10 10 10
44409 - 38 38 38 78 78 78 6 6 6 2 2 6
44410 - 2 2 6 46 46 46 14 14 14 42 42 42
44411 -246 246 246 253 253 253 253 253 253 253 253 253
44412 -253 253 253 253 253 253 253 253 253 253 253 253
44413 -253 253 253 253 253 253 231 231 231 242 242 242
44414 -253 253 253 253 253 253 253 253 253 253 253 253
44415 -253 253 253 253 253 253 253 253 253 253 253 253
44416 -253 253 253 253 253 253 253 253 253 253 253 253
44417 -253 253 253 253 253 253 253 253 253 253 253 253
44418 -253 253 253 253 253 253 234 234 234 10 10 10
44419 - 2 2 6 2 2 6 22 22 22 14 14 14
44420 - 2 2 6 2 2 6 2 2 6 2 2 6
44421 - 2 2 6 66 66 66 62 62 62 22 22 22
44422 - 6 6 6 0 0 0 0 0 0 0 0 0
44423 - 0 0 0 0 0 0 0 0 0 0 0 0
44424 - 0 0 0 0 0 0 0 0 0 0 0 0
44425 - 0 0 0 0 0 0 0 0 0 0 0 0
44426 - 0 0 0 0 0 0 0 0 0 0 0 0
44427 - 0 0 0 0 0 0 0 0 0 0 0 0
44428 - 0 0 0 0 0 0 6 6 6 18 18 18
44429 - 50 50 50 74 74 74 2 2 6 2 2 6
44430 - 14 14 14 70 70 70 34 34 34 62 62 62
44431 -250 250 250 253 253 253 253 253 253 253 253 253
44432 -253 253 253 253 253 253 253 253 253 253 253 253
44433 -253 253 253 253 253 253 231 231 231 246 246 246
44434 -253 253 253 253 253 253 253 253 253 253 253 253
44435 -253 253 253 253 253 253 253 253 253 253 253 253
44436 -253 253 253 253 253 253 253 253 253 253 253 253
44437 -253 253 253 253 253 253 253 253 253 253 253 253
44438 -253 253 253 253 253 253 234 234 234 14 14 14
44439 - 2 2 6 2 2 6 30 30 30 2 2 6
44440 - 2 2 6 2 2 6 2 2 6 2 2 6
44441 - 2 2 6 66 66 66 62 62 62 22 22 22
44442 - 6 6 6 0 0 0 0 0 0 0 0 0
44443 - 0 0 0 0 0 0 0 0 0 0 0 0
44444 - 0 0 0 0 0 0 0 0 0 0 0 0
44445 - 0 0 0 0 0 0 0 0 0 0 0 0
44446 - 0 0 0 0 0 0 0 0 0 0 0 0
44447 - 0 0 0 0 0 0 0 0 0 0 0 0
44448 - 0 0 0 0 0 0 6 6 6 18 18 18
44449 - 54 54 54 62 62 62 2 2 6 2 2 6
44450 - 2 2 6 30 30 30 46 46 46 70 70 70
44451 -250 250 250 253 253 253 253 253 253 253 253 253
44452 -253 253 253 253 253 253 253 253 253 253 253 253
44453 -253 253 253 253 253 253 231 231 231 246 246 246
44454 -253 253 253 253 253 253 253 253 253 253 253 253
44455 -253 253 253 253 253 253 253 253 253 253 253 253
44456 -253 253 253 253 253 253 253 253 253 253 253 253
44457 -253 253 253 253 253 253 253 253 253 253 253 253
44458 -253 253 253 253 253 253 226 226 226 10 10 10
44459 - 2 2 6 6 6 6 30 30 30 2 2 6
44460 - 2 2 6 2 2 6 2 2 6 2 2 6
44461 - 2 2 6 66 66 66 58 58 58 22 22 22
44462 - 6 6 6 0 0 0 0 0 0 0 0 0
44463 - 0 0 0 0 0 0 0 0 0 0 0 0
44464 - 0 0 0 0 0 0 0 0 0 0 0 0
44465 - 0 0 0 0 0 0 0 0 0 0 0 0
44466 - 0 0 0 0 0 0 0 0 0 0 0 0
44467 - 0 0 0 0 0 0 0 0 0 0 0 0
44468 - 0 0 0 0 0 0 6 6 6 22 22 22
44469 - 58 58 58 62 62 62 2 2 6 2 2 6
44470 - 2 2 6 2 2 6 30 30 30 78 78 78
44471 -250 250 250 253 253 253 253 253 253 253 253 253
44472 -253 253 253 253 253 253 253 253 253 253 253 253
44473 -253 253 253 253 253 253 231 231 231 246 246 246
44474 -253 253 253 253 253 253 253 253 253 253 253 253
44475 -253 253 253 253 253 253 253 253 253 253 253 253
44476 -253 253 253 253 253 253 253 253 253 253 253 253
44477 -253 253 253 253 253 253 253 253 253 253 253 253
44478 -253 253 253 253 253 253 206 206 206 2 2 6
44479 - 22 22 22 34 34 34 18 14 6 22 22 22
44480 - 26 26 26 18 18 18 6 6 6 2 2 6
44481 - 2 2 6 82 82 82 54 54 54 18 18 18
44482 - 6 6 6 0 0 0 0 0 0 0 0 0
44483 - 0 0 0 0 0 0 0 0 0 0 0 0
44484 - 0 0 0 0 0 0 0 0 0 0 0 0
44485 - 0 0 0 0 0 0 0 0 0 0 0 0
44486 - 0 0 0 0 0 0 0 0 0 0 0 0
44487 - 0 0 0 0 0 0 0 0 0 0 0 0
44488 - 0 0 0 0 0 0 6 6 6 26 26 26
44489 - 62 62 62 106 106 106 74 54 14 185 133 11
44490 -210 162 10 121 92 8 6 6 6 62 62 62
44491 -238 238 238 253 253 253 253 253 253 253 253 253
44492 -253 253 253 253 253 253 253 253 253 253 253 253
44493 -253 253 253 253 253 253 231 231 231 246 246 246
44494 -253 253 253 253 253 253 253 253 253 253 253 253
44495 -253 253 253 253 253 253 253 253 253 253 253 253
44496 -253 253 253 253 253 253 253 253 253 253 253 253
44497 -253 253 253 253 253 253 253 253 253 253 253 253
44498 -253 253 253 253 253 253 158 158 158 18 18 18
44499 - 14 14 14 2 2 6 2 2 6 2 2 6
44500 - 6 6 6 18 18 18 66 66 66 38 38 38
44501 - 6 6 6 94 94 94 50 50 50 18 18 18
44502 - 6 6 6 0 0 0 0 0 0 0 0 0
44503 - 0 0 0 0 0 0 0 0 0 0 0 0
44504 - 0 0 0 0 0 0 0 0 0 0 0 0
44505 - 0 0 0 0 0 0 0 0 0 0 0 0
44506 - 0 0 0 0 0 0 0 0 0 0 0 0
44507 - 0 0 0 0 0 0 0 0 0 6 6 6
44508 - 10 10 10 10 10 10 18 18 18 38 38 38
44509 - 78 78 78 142 134 106 216 158 10 242 186 14
44510 -246 190 14 246 190 14 156 118 10 10 10 10
44511 - 90 90 90 238 238 238 253 253 253 253 253 253
44512 -253 253 253 253 253 253 253 253 253 253 253 253
44513 -253 253 253 253 253 253 231 231 231 250 250 250
44514 -253 253 253 253 253 253 253 253 253 253 253 253
44515 -253 253 253 253 253 253 253 253 253 253 253 253
44516 -253 253 253 253 253 253 253 253 253 253 253 253
44517 -253 253 253 253 253 253 253 253 253 246 230 190
44518 -238 204 91 238 204 91 181 142 44 37 26 9
44519 - 2 2 6 2 2 6 2 2 6 2 2 6
44520 - 2 2 6 2 2 6 38 38 38 46 46 46
44521 - 26 26 26 106 106 106 54 54 54 18 18 18
44522 - 6 6 6 0 0 0 0 0 0 0 0 0
44523 - 0 0 0 0 0 0 0 0 0 0 0 0
44524 - 0 0 0 0 0 0 0 0 0 0 0 0
44525 - 0 0 0 0 0 0 0 0 0 0 0 0
44526 - 0 0 0 0 0 0 0 0 0 0 0 0
44527 - 0 0 0 6 6 6 14 14 14 22 22 22
44528 - 30 30 30 38 38 38 50 50 50 70 70 70
44529 -106 106 106 190 142 34 226 170 11 242 186 14
44530 -246 190 14 246 190 14 246 190 14 154 114 10
44531 - 6 6 6 74 74 74 226 226 226 253 253 253
44532 -253 253 253 253 253 253 253 253 253 253 253 253
44533 -253 253 253 253 253 253 231 231 231 250 250 250
44534 -253 253 253 253 253 253 253 253 253 253 253 253
44535 -253 253 253 253 253 253 253 253 253 253 253 253
44536 -253 253 253 253 253 253 253 253 253 253 253 253
44537 -253 253 253 253 253 253 253 253 253 228 184 62
44538 -241 196 14 241 208 19 232 195 16 38 30 10
44539 - 2 2 6 2 2 6 2 2 6 2 2 6
44540 - 2 2 6 6 6 6 30 30 30 26 26 26
44541 -203 166 17 154 142 90 66 66 66 26 26 26
44542 - 6 6 6 0 0 0 0 0 0 0 0 0
44543 - 0 0 0 0 0 0 0 0 0 0 0 0
44544 - 0 0 0 0 0 0 0 0 0 0 0 0
44545 - 0 0 0 0 0 0 0 0 0 0 0 0
44546 - 0 0 0 0 0 0 0 0 0 0 0 0
44547 - 6 6 6 18 18 18 38 38 38 58 58 58
44548 - 78 78 78 86 86 86 101 101 101 123 123 123
44549 -175 146 61 210 150 10 234 174 13 246 186 14
44550 -246 190 14 246 190 14 246 190 14 238 190 10
44551 -102 78 10 2 2 6 46 46 46 198 198 198
44552 -253 253 253 253 253 253 253 253 253 253 253 253
44553 -253 253 253 253 253 253 234 234 234 242 242 242
44554 -253 253 253 253 253 253 253 253 253 253 253 253
44555 -253 253 253 253 253 253 253 253 253 253 253 253
44556 -253 253 253 253 253 253 253 253 253 253 253 253
44557 -253 253 253 253 253 253 253 253 253 224 178 62
44558 -242 186 14 241 196 14 210 166 10 22 18 6
44559 - 2 2 6 2 2 6 2 2 6 2 2 6
44560 - 2 2 6 2 2 6 6 6 6 121 92 8
44561 -238 202 15 232 195 16 82 82 82 34 34 34
44562 - 10 10 10 0 0 0 0 0 0 0 0 0
44563 - 0 0 0 0 0 0 0 0 0 0 0 0
44564 - 0 0 0 0 0 0 0 0 0 0 0 0
44565 - 0 0 0 0 0 0 0 0 0 0 0 0
44566 - 0 0 0 0 0 0 0 0 0 0 0 0
44567 - 14 14 14 38 38 38 70 70 70 154 122 46
44568 -190 142 34 200 144 11 197 138 11 197 138 11
44569 -213 154 11 226 170 11 242 186 14 246 190 14
44570 -246 190 14 246 190 14 246 190 14 246 190 14
44571 -225 175 15 46 32 6 2 2 6 22 22 22
44572 -158 158 158 250 250 250 253 253 253 253 253 253
44573 -253 253 253 253 253 253 253 253 253 253 253 253
44574 -253 253 253 253 253 253 253 253 253 253 253 253
44575 -253 253 253 253 253 253 253 253 253 253 253 253
44576 -253 253 253 253 253 253 253 253 253 253 253 253
44577 -253 253 253 250 250 250 242 242 242 224 178 62
44578 -239 182 13 236 186 11 213 154 11 46 32 6
44579 - 2 2 6 2 2 6 2 2 6 2 2 6
44580 - 2 2 6 2 2 6 61 42 6 225 175 15
44581 -238 190 10 236 186 11 112 100 78 42 42 42
44582 - 14 14 14 0 0 0 0 0 0 0 0 0
44583 - 0 0 0 0 0 0 0 0 0 0 0 0
44584 - 0 0 0 0 0 0 0 0 0 0 0 0
44585 - 0 0 0 0 0 0 0 0 0 0 0 0
44586 - 0 0 0 0 0 0 0 0 0 6 6 6
44587 - 22 22 22 54 54 54 154 122 46 213 154 11
44588 -226 170 11 230 174 11 226 170 11 226 170 11
44589 -236 178 12 242 186 14 246 190 14 246 190 14
44590 -246 190 14 246 190 14 246 190 14 246 190 14
44591 -241 196 14 184 144 12 10 10 10 2 2 6
44592 - 6 6 6 116 116 116 242 242 242 253 253 253
44593 -253 253 253 253 253 253 253 253 253 253 253 253
44594 -253 253 253 253 253 253 253 253 253 253 253 253
44595 -253 253 253 253 253 253 253 253 253 253 253 253
44596 -253 253 253 253 253 253 253 253 253 253 253 253
44597 -253 253 253 231 231 231 198 198 198 214 170 54
44598 -236 178 12 236 178 12 210 150 10 137 92 6
44599 - 18 14 6 2 2 6 2 2 6 2 2 6
44600 - 6 6 6 70 47 6 200 144 11 236 178 12
44601 -239 182 13 239 182 13 124 112 88 58 58 58
44602 - 22 22 22 6 6 6 0 0 0 0 0 0
44603 - 0 0 0 0 0 0 0 0 0 0 0 0
44604 - 0 0 0 0 0 0 0 0 0 0 0 0
44605 - 0 0 0 0 0 0 0 0 0 0 0 0
44606 - 0 0 0 0 0 0 0 0 0 10 10 10
44607 - 30 30 30 70 70 70 180 133 36 226 170 11
44608 -239 182 13 242 186 14 242 186 14 246 186 14
44609 -246 190 14 246 190 14 246 190 14 246 190 14
44610 -246 190 14 246 190 14 246 190 14 246 190 14
44611 -246 190 14 232 195 16 98 70 6 2 2 6
44612 - 2 2 6 2 2 6 66 66 66 221 221 221
44613 -253 253 253 253 253 253 253 253 253 253 253 253
44614 -253 253 253 253 253 253 253 253 253 253 253 253
44615 -253 253 253 253 253 253 253 253 253 253 253 253
44616 -253 253 253 253 253 253 253 253 253 253 253 253
44617 -253 253 253 206 206 206 198 198 198 214 166 58
44618 -230 174 11 230 174 11 216 158 10 192 133 9
44619 -163 110 8 116 81 8 102 78 10 116 81 8
44620 -167 114 7 197 138 11 226 170 11 239 182 13
44621 -242 186 14 242 186 14 162 146 94 78 78 78
44622 - 34 34 34 14 14 14 6 6 6 0 0 0
44623 - 0 0 0 0 0 0 0 0 0 0 0 0
44624 - 0 0 0 0 0 0 0 0 0 0 0 0
44625 - 0 0 0 0 0 0 0 0 0 0 0 0
44626 - 0 0 0 0 0 0 0 0 0 6 6 6
44627 - 30 30 30 78 78 78 190 142 34 226 170 11
44628 -239 182 13 246 190 14 246 190 14 246 190 14
44629 -246 190 14 246 190 14 246 190 14 246 190 14
44630 -246 190 14 246 190 14 246 190 14 246 190 14
44631 -246 190 14 241 196 14 203 166 17 22 18 6
44632 - 2 2 6 2 2 6 2 2 6 38 38 38
44633 -218 218 218 253 253 253 253 253 253 253 253 253
44634 -253 253 253 253 253 253 253 253 253 253 253 253
44635 -253 253 253 253 253 253 253 253 253 253 253 253
44636 -253 253 253 253 253 253 253 253 253 253 253 253
44637 -250 250 250 206 206 206 198 198 198 202 162 69
44638 -226 170 11 236 178 12 224 166 10 210 150 10
44639 -200 144 11 197 138 11 192 133 9 197 138 11
44640 -210 150 10 226 170 11 242 186 14 246 190 14
44641 -246 190 14 246 186 14 225 175 15 124 112 88
44642 - 62 62 62 30 30 30 14 14 14 6 6 6
44643 - 0 0 0 0 0 0 0 0 0 0 0 0
44644 - 0 0 0 0 0 0 0 0 0 0 0 0
44645 - 0 0 0 0 0 0 0 0 0 0 0 0
44646 - 0 0 0 0 0 0 0 0 0 10 10 10
44647 - 30 30 30 78 78 78 174 135 50 224 166 10
44648 -239 182 13 246 190 14 246 190 14 246 190 14
44649 -246 190 14 246 190 14 246 190 14 246 190 14
44650 -246 190 14 246 190 14 246 190 14 246 190 14
44651 -246 190 14 246 190 14 241 196 14 139 102 15
44652 - 2 2 6 2 2 6 2 2 6 2 2 6
44653 - 78 78 78 250 250 250 253 253 253 253 253 253
44654 -253 253 253 253 253 253 253 253 253 253 253 253
44655 -253 253 253 253 253 253 253 253 253 253 253 253
44656 -253 253 253 253 253 253 253 253 253 253 253 253
44657 -250 250 250 214 214 214 198 198 198 190 150 46
44658 -219 162 10 236 178 12 234 174 13 224 166 10
44659 -216 158 10 213 154 11 213 154 11 216 158 10
44660 -226 170 11 239 182 13 246 190 14 246 190 14
44661 -246 190 14 246 190 14 242 186 14 206 162 42
44662 -101 101 101 58 58 58 30 30 30 14 14 14
44663 - 6 6 6 0 0 0 0 0 0 0 0 0
44664 - 0 0 0 0 0 0 0 0 0 0 0 0
44665 - 0 0 0 0 0 0 0 0 0 0 0 0
44666 - 0 0 0 0 0 0 0 0 0 10 10 10
44667 - 30 30 30 74 74 74 174 135 50 216 158 10
44668 -236 178 12 246 190 14 246 190 14 246 190 14
44669 -246 190 14 246 190 14 246 190 14 246 190 14
44670 -246 190 14 246 190 14 246 190 14 246 190 14
44671 -246 190 14 246 190 14 241 196 14 226 184 13
44672 - 61 42 6 2 2 6 2 2 6 2 2 6
44673 - 22 22 22 238 238 238 253 253 253 253 253 253
44674 -253 253 253 253 253 253 253 253 253 253 253 253
44675 -253 253 253 253 253 253 253 253 253 253 253 253
44676 -253 253 253 253 253 253 253 253 253 253 253 253
44677 -253 253 253 226 226 226 187 187 187 180 133 36
44678 -216 158 10 236 178 12 239 182 13 236 178 12
44679 -230 174 11 226 170 11 226 170 11 230 174 11
44680 -236 178 12 242 186 14 246 190 14 246 190 14
44681 -246 190 14 246 190 14 246 186 14 239 182 13
44682 -206 162 42 106 106 106 66 66 66 34 34 34
44683 - 14 14 14 6 6 6 0 0 0 0 0 0
44684 - 0 0 0 0 0 0 0 0 0 0 0 0
44685 - 0 0 0 0 0 0 0 0 0 0 0 0
44686 - 0 0 0 0 0 0 0 0 0 6 6 6
44687 - 26 26 26 70 70 70 163 133 67 213 154 11
44688 -236 178 12 246 190 14 246 190 14 246 190 14
44689 -246 190 14 246 190 14 246 190 14 246 190 14
44690 -246 190 14 246 190 14 246 190 14 246 190 14
44691 -246 190 14 246 190 14 246 190 14 241 196 14
44692 -190 146 13 18 14 6 2 2 6 2 2 6
44693 - 46 46 46 246 246 246 253 253 253 253 253 253
44694 -253 253 253 253 253 253 253 253 253 253 253 253
44695 -253 253 253 253 253 253 253 253 253 253 253 253
44696 -253 253 253 253 253 253 253 253 253 253 253 253
44697 -253 253 253 221 221 221 86 86 86 156 107 11
44698 -216 158 10 236 178 12 242 186 14 246 186 14
44699 -242 186 14 239 182 13 239 182 13 242 186 14
44700 -242 186 14 246 186 14 246 190 14 246 190 14
44701 -246 190 14 246 190 14 246 190 14 246 190 14
44702 -242 186 14 225 175 15 142 122 72 66 66 66
44703 - 30 30 30 10 10 10 0 0 0 0 0 0
44704 - 0 0 0 0 0 0 0 0 0 0 0 0
44705 - 0 0 0 0 0 0 0 0 0 0 0 0
44706 - 0 0 0 0 0 0 0 0 0 6 6 6
44707 - 26 26 26 70 70 70 163 133 67 210 150 10
44708 -236 178 12 246 190 14 246 190 14 246 190 14
44709 -246 190 14 246 190 14 246 190 14 246 190 14
44710 -246 190 14 246 190 14 246 190 14 246 190 14
44711 -246 190 14 246 190 14 246 190 14 246 190 14
44712 -232 195 16 121 92 8 34 34 34 106 106 106
44713 -221 221 221 253 253 253 253 253 253 253 253 253
44714 -253 253 253 253 253 253 253 253 253 253 253 253
44715 -253 253 253 253 253 253 253 253 253 253 253 253
44716 -253 253 253 253 253 253 253 253 253 253 253 253
44717 -242 242 242 82 82 82 18 14 6 163 110 8
44718 -216 158 10 236 178 12 242 186 14 246 190 14
44719 -246 190 14 246 190 14 246 190 14 246 190 14
44720 -246 190 14 246 190 14 246 190 14 246 190 14
44721 -246 190 14 246 190 14 246 190 14 246 190 14
44722 -246 190 14 246 190 14 242 186 14 163 133 67
44723 - 46 46 46 18 18 18 6 6 6 0 0 0
44724 - 0 0 0 0 0 0 0 0 0 0 0 0
44725 - 0 0 0 0 0 0 0 0 0 0 0 0
44726 - 0 0 0 0 0 0 0 0 0 10 10 10
44727 - 30 30 30 78 78 78 163 133 67 210 150 10
44728 -236 178 12 246 186 14 246 190 14 246 190 14
44729 -246 190 14 246 190 14 246 190 14 246 190 14
44730 -246 190 14 246 190 14 246 190 14 246 190 14
44731 -246 190 14 246 190 14 246 190 14 246 190 14
44732 -241 196 14 215 174 15 190 178 144 253 253 253
44733 -253 253 253 253 253 253 253 253 253 253 253 253
44734 -253 253 253 253 253 253 253 253 253 253 253 253
44735 -253 253 253 253 253 253 253 253 253 253 253 253
44736 -253 253 253 253 253 253 253 253 253 218 218 218
44737 - 58 58 58 2 2 6 22 18 6 167 114 7
44738 -216 158 10 236 178 12 246 186 14 246 190 14
44739 -246 190 14 246 190 14 246 190 14 246 190 14
44740 -246 190 14 246 190 14 246 190 14 246 190 14
44741 -246 190 14 246 190 14 246 190 14 246 190 14
44742 -246 190 14 246 186 14 242 186 14 190 150 46
44743 - 54 54 54 22 22 22 6 6 6 0 0 0
44744 - 0 0 0 0 0 0 0 0 0 0 0 0
44745 - 0 0 0 0 0 0 0 0 0 0 0 0
44746 - 0 0 0 0 0 0 0 0 0 14 14 14
44747 - 38 38 38 86 86 86 180 133 36 213 154 11
44748 -236 178 12 246 186 14 246 190 14 246 190 14
44749 -246 190 14 246 190 14 246 190 14 246 190 14
44750 -246 190 14 246 190 14 246 190 14 246 190 14
44751 -246 190 14 246 190 14 246 190 14 246 190 14
44752 -246 190 14 232 195 16 190 146 13 214 214 214
44753 -253 253 253 253 253 253 253 253 253 253 253 253
44754 -253 253 253 253 253 253 253 253 253 253 253 253
44755 -253 253 253 253 253 253 253 253 253 253 253 253
44756 -253 253 253 250 250 250 170 170 170 26 26 26
44757 - 2 2 6 2 2 6 37 26 9 163 110 8
44758 -219 162 10 239 182 13 246 186 14 246 190 14
44759 -246 190 14 246 190 14 246 190 14 246 190 14
44760 -246 190 14 246 190 14 246 190 14 246 190 14
44761 -246 190 14 246 190 14 246 190 14 246 190 14
44762 -246 186 14 236 178 12 224 166 10 142 122 72
44763 - 46 46 46 18 18 18 6 6 6 0 0 0
44764 - 0 0 0 0 0 0 0 0 0 0 0 0
44765 - 0 0 0 0 0 0 0 0 0 0 0 0
44766 - 0 0 0 0 0 0 6 6 6 18 18 18
44767 - 50 50 50 109 106 95 192 133 9 224 166 10
44768 -242 186 14 246 190 14 246 190 14 246 190 14
44769 -246 190 14 246 190 14 246 190 14 246 190 14
44770 -246 190 14 246 190 14 246 190 14 246 190 14
44771 -246 190 14 246 190 14 246 190 14 246 190 14
44772 -242 186 14 226 184 13 210 162 10 142 110 46
44773 -226 226 226 253 253 253 253 253 253 253 253 253
44774 -253 253 253 253 253 253 253 253 253 253 253 253
44775 -253 253 253 253 253 253 253 253 253 253 253 253
44776 -198 198 198 66 66 66 2 2 6 2 2 6
44777 - 2 2 6 2 2 6 50 34 6 156 107 11
44778 -219 162 10 239 182 13 246 186 14 246 190 14
44779 -246 190 14 246 190 14 246 190 14 246 190 14
44780 -246 190 14 246 190 14 246 190 14 246 190 14
44781 -246 190 14 246 190 14 246 190 14 242 186 14
44782 -234 174 13 213 154 11 154 122 46 66 66 66
44783 - 30 30 30 10 10 10 0 0 0 0 0 0
44784 - 0 0 0 0 0 0 0 0 0 0 0 0
44785 - 0 0 0 0 0 0 0 0 0 0 0 0
44786 - 0 0 0 0 0 0 6 6 6 22 22 22
44787 - 58 58 58 154 121 60 206 145 10 234 174 13
44788 -242 186 14 246 186 14 246 190 14 246 190 14
44789 -246 190 14 246 190 14 246 190 14 246 190 14
44790 -246 190 14 246 190 14 246 190 14 246 190 14
44791 -246 190 14 246 190 14 246 190 14 246 190 14
44792 -246 186 14 236 178 12 210 162 10 163 110 8
44793 - 61 42 6 138 138 138 218 218 218 250 250 250
44794 -253 253 253 253 253 253 253 253 253 250 250 250
44795 -242 242 242 210 210 210 144 144 144 66 66 66
44796 - 6 6 6 2 2 6 2 2 6 2 2 6
44797 - 2 2 6 2 2 6 61 42 6 163 110 8
44798 -216 158 10 236 178 12 246 190 14 246 190 14
44799 -246 190 14 246 190 14 246 190 14 246 190 14
44800 -246 190 14 246 190 14 246 190 14 246 190 14
44801 -246 190 14 239 182 13 230 174 11 216 158 10
44802 -190 142 34 124 112 88 70 70 70 38 38 38
44803 - 18 18 18 6 6 6 0 0 0 0 0 0
44804 - 0 0 0 0 0 0 0 0 0 0 0 0
44805 - 0 0 0 0 0 0 0 0 0 0 0 0
44806 - 0 0 0 0 0 0 6 6 6 22 22 22
44807 - 62 62 62 168 124 44 206 145 10 224 166 10
44808 -236 178 12 239 182 13 242 186 14 242 186 14
44809 -246 186 14 246 190 14 246 190 14 246 190 14
44810 -246 190 14 246 190 14 246 190 14 246 190 14
44811 -246 190 14 246 190 14 246 190 14 246 190 14
44812 -246 190 14 236 178 12 216 158 10 175 118 6
44813 - 80 54 7 2 2 6 6 6 6 30 30 30
44814 - 54 54 54 62 62 62 50 50 50 38 38 38
44815 - 14 14 14 2 2 6 2 2 6 2 2 6
44816 - 2 2 6 2 2 6 2 2 6 2 2 6
44817 - 2 2 6 6 6 6 80 54 7 167 114 7
44818 -213 154 11 236 178 12 246 190 14 246 190 14
44819 -246 190 14 246 190 14 246 190 14 246 190 14
44820 -246 190 14 242 186 14 239 182 13 239 182 13
44821 -230 174 11 210 150 10 174 135 50 124 112 88
44822 - 82 82 82 54 54 54 34 34 34 18 18 18
44823 - 6 6 6 0 0 0 0 0 0 0 0 0
44824 - 0 0 0 0 0 0 0 0 0 0 0 0
44825 - 0 0 0 0 0 0 0 0 0 0 0 0
44826 - 0 0 0 0 0 0 6 6 6 18 18 18
44827 - 50 50 50 158 118 36 192 133 9 200 144 11
44828 -216 158 10 219 162 10 224 166 10 226 170 11
44829 -230 174 11 236 178 12 239 182 13 239 182 13
44830 -242 186 14 246 186 14 246 190 14 246 190 14
44831 -246 190 14 246 190 14 246 190 14 246 190 14
44832 -246 186 14 230 174 11 210 150 10 163 110 8
44833 -104 69 6 10 10 10 2 2 6 2 2 6
44834 - 2 2 6 2 2 6 2 2 6 2 2 6
44835 - 2 2 6 2 2 6 2 2 6 2 2 6
44836 - 2 2 6 2 2 6 2 2 6 2 2 6
44837 - 2 2 6 6 6 6 91 60 6 167 114 7
44838 -206 145 10 230 174 11 242 186 14 246 190 14
44839 -246 190 14 246 190 14 246 186 14 242 186 14
44840 -239 182 13 230 174 11 224 166 10 213 154 11
44841 -180 133 36 124 112 88 86 86 86 58 58 58
44842 - 38 38 38 22 22 22 10 10 10 6 6 6
44843 - 0 0 0 0 0 0 0 0 0 0 0 0
44844 - 0 0 0 0 0 0 0 0 0 0 0 0
44845 - 0 0 0 0 0 0 0 0 0 0 0 0
44846 - 0 0 0 0 0 0 0 0 0 14 14 14
44847 - 34 34 34 70 70 70 138 110 50 158 118 36
44848 -167 114 7 180 123 7 192 133 9 197 138 11
44849 -200 144 11 206 145 10 213 154 11 219 162 10
44850 -224 166 10 230 174 11 239 182 13 242 186 14
44851 -246 186 14 246 186 14 246 186 14 246 186 14
44852 -239 182 13 216 158 10 185 133 11 152 99 6
44853 -104 69 6 18 14 6 2 2 6 2 2 6
44854 - 2 2 6 2 2 6 2 2 6 2 2 6
44855 - 2 2 6 2 2 6 2 2 6 2 2 6
44856 - 2 2 6 2 2 6 2 2 6 2 2 6
44857 - 2 2 6 6 6 6 80 54 7 152 99 6
44858 -192 133 9 219 162 10 236 178 12 239 182 13
44859 -246 186 14 242 186 14 239 182 13 236 178 12
44860 -224 166 10 206 145 10 192 133 9 154 121 60
44861 - 94 94 94 62 62 62 42 42 42 22 22 22
44862 - 14 14 14 6 6 6 0 0 0 0 0 0
44863 - 0 0 0 0 0 0 0 0 0 0 0 0
44864 - 0 0 0 0 0 0 0 0 0 0 0 0
44865 - 0 0 0 0 0 0 0 0 0 0 0 0
44866 - 0 0 0 0 0 0 0 0 0 6 6 6
44867 - 18 18 18 34 34 34 58 58 58 78 78 78
44868 -101 98 89 124 112 88 142 110 46 156 107 11
44869 -163 110 8 167 114 7 175 118 6 180 123 7
44870 -185 133 11 197 138 11 210 150 10 219 162 10
44871 -226 170 11 236 178 12 236 178 12 234 174 13
44872 -219 162 10 197 138 11 163 110 8 130 83 6
44873 - 91 60 6 10 10 10 2 2 6 2 2 6
44874 - 18 18 18 38 38 38 38 38 38 38 38 38
44875 - 38 38 38 38 38 38 38 38 38 38 38 38
44876 - 38 38 38 38 38 38 26 26 26 2 2 6
44877 - 2 2 6 6 6 6 70 47 6 137 92 6
44878 -175 118 6 200 144 11 219 162 10 230 174 11
44879 -234 174 13 230 174 11 219 162 10 210 150 10
44880 -192 133 9 163 110 8 124 112 88 82 82 82
44881 - 50 50 50 30 30 30 14 14 14 6 6 6
44882 - 0 0 0 0 0 0 0 0 0 0 0 0
44883 - 0 0 0 0 0 0 0 0 0 0 0 0
44884 - 0 0 0 0 0 0 0 0 0 0 0 0
44885 - 0 0 0 0 0 0 0 0 0 0 0 0
44886 - 0 0 0 0 0 0 0 0 0 0 0 0
44887 - 6 6 6 14 14 14 22 22 22 34 34 34
44888 - 42 42 42 58 58 58 74 74 74 86 86 86
44889 -101 98 89 122 102 70 130 98 46 121 87 25
44890 -137 92 6 152 99 6 163 110 8 180 123 7
44891 -185 133 11 197 138 11 206 145 10 200 144 11
44892 -180 123 7 156 107 11 130 83 6 104 69 6
44893 - 50 34 6 54 54 54 110 110 110 101 98 89
44894 - 86 86 86 82 82 82 78 78 78 78 78 78
44895 - 78 78 78 78 78 78 78 78 78 78 78 78
44896 - 78 78 78 82 82 82 86 86 86 94 94 94
44897 -106 106 106 101 101 101 86 66 34 124 80 6
44898 -156 107 11 180 123 7 192 133 9 200 144 11
44899 -206 145 10 200 144 11 192 133 9 175 118 6
44900 -139 102 15 109 106 95 70 70 70 42 42 42
44901 - 22 22 22 10 10 10 0 0 0 0 0 0
44902 - 0 0 0 0 0 0 0 0 0 0 0 0
44903 - 0 0 0 0 0 0 0 0 0 0 0 0
44904 - 0 0 0 0 0 0 0 0 0 0 0 0
44905 - 0 0 0 0 0 0 0 0 0 0 0 0
44906 - 0 0 0 0 0 0 0 0 0 0 0 0
44907 - 0 0 0 0 0 0 6 6 6 10 10 10
44908 - 14 14 14 22 22 22 30 30 30 38 38 38
44909 - 50 50 50 62 62 62 74 74 74 90 90 90
44910 -101 98 89 112 100 78 121 87 25 124 80 6
44911 -137 92 6 152 99 6 152 99 6 152 99 6
44912 -138 86 6 124 80 6 98 70 6 86 66 30
44913 -101 98 89 82 82 82 58 58 58 46 46 46
44914 - 38 38 38 34 34 34 34 34 34 34 34 34
44915 - 34 34 34 34 34 34 34 34 34 34 34 34
44916 - 34 34 34 34 34 34 38 38 38 42 42 42
44917 - 54 54 54 82 82 82 94 86 76 91 60 6
44918 -134 86 6 156 107 11 167 114 7 175 118 6
44919 -175 118 6 167 114 7 152 99 6 121 87 25
44920 -101 98 89 62 62 62 34 34 34 18 18 18
44921 - 6 6 6 0 0 0 0 0 0 0 0 0
44922 - 0 0 0 0 0 0 0 0 0 0 0 0
44923 - 0 0 0 0 0 0 0 0 0 0 0 0
44924 - 0 0 0 0 0 0 0 0 0 0 0 0
44925 - 0 0 0 0 0 0 0 0 0 0 0 0
44926 - 0 0 0 0 0 0 0 0 0 0 0 0
44927 - 0 0 0 0 0 0 0 0 0 0 0 0
44928 - 0 0 0 6 6 6 6 6 6 10 10 10
44929 - 18 18 18 22 22 22 30 30 30 42 42 42
44930 - 50 50 50 66 66 66 86 86 86 101 98 89
44931 -106 86 58 98 70 6 104 69 6 104 69 6
44932 -104 69 6 91 60 6 82 62 34 90 90 90
44933 - 62 62 62 38 38 38 22 22 22 14 14 14
44934 - 10 10 10 10 10 10 10 10 10 10 10 10
44935 - 10 10 10 10 10 10 6 6 6 10 10 10
44936 - 10 10 10 10 10 10 10 10 10 14 14 14
44937 - 22 22 22 42 42 42 70 70 70 89 81 66
44938 - 80 54 7 104 69 6 124 80 6 137 92 6
44939 -134 86 6 116 81 8 100 82 52 86 86 86
44940 - 58 58 58 30 30 30 14 14 14 6 6 6
44941 - 0 0 0 0 0 0 0 0 0 0 0 0
44942 - 0 0 0 0 0 0 0 0 0 0 0 0
44943 - 0 0 0 0 0 0 0 0 0 0 0 0
44944 - 0 0 0 0 0 0 0 0 0 0 0 0
44945 - 0 0 0 0 0 0 0 0 0 0 0 0
44946 - 0 0 0 0 0 0 0 0 0 0 0 0
44947 - 0 0 0 0 0 0 0 0 0 0 0 0
44948 - 0 0 0 0 0 0 0 0 0 0 0 0
44949 - 0 0 0 6 6 6 10 10 10 14 14 14
44950 - 18 18 18 26 26 26 38 38 38 54 54 54
44951 - 70 70 70 86 86 86 94 86 76 89 81 66
44952 - 89 81 66 86 86 86 74 74 74 50 50 50
44953 - 30 30 30 14 14 14 6 6 6 0 0 0
44954 - 0 0 0 0 0 0 0 0 0 0 0 0
44955 - 0 0 0 0 0 0 0 0 0 0 0 0
44956 - 0 0 0 0 0 0 0 0 0 0 0 0
44957 - 6 6 6 18 18 18 34 34 34 58 58 58
44958 - 82 82 82 89 81 66 89 81 66 89 81 66
44959 - 94 86 66 94 86 76 74 74 74 50 50 50
44960 - 26 26 26 14 14 14 6 6 6 0 0 0
44961 - 0 0 0 0 0 0 0 0 0 0 0 0
44962 - 0 0 0 0 0 0 0 0 0 0 0 0
44963 - 0 0 0 0 0 0 0 0 0 0 0 0
44964 - 0 0 0 0 0 0 0 0 0 0 0 0
44965 - 0 0 0 0 0 0 0 0 0 0 0 0
44966 - 0 0 0 0 0 0 0 0 0 0 0 0
44967 - 0 0 0 0 0 0 0 0 0 0 0 0
44968 - 0 0 0 0 0 0 0 0 0 0 0 0
44969 - 0 0 0 0 0 0 0 0 0 0 0 0
44970 - 6 6 6 6 6 6 14 14 14 18 18 18
44971 - 30 30 30 38 38 38 46 46 46 54 54 54
44972 - 50 50 50 42 42 42 30 30 30 18 18 18
44973 - 10 10 10 0 0 0 0 0 0 0 0 0
44974 - 0 0 0 0 0 0 0 0 0 0 0 0
44975 - 0 0 0 0 0 0 0 0 0 0 0 0
44976 - 0 0 0 0 0 0 0 0 0 0 0 0
44977 - 0 0 0 6 6 6 14 14 14 26 26 26
44978 - 38 38 38 50 50 50 58 58 58 58 58 58
44979 - 54 54 54 42 42 42 30 30 30 18 18 18
44980 - 10 10 10 0 0 0 0 0 0 0 0 0
44981 - 0 0 0 0 0 0 0 0 0 0 0 0
44982 - 0 0 0 0 0 0 0 0 0 0 0 0
44983 - 0 0 0 0 0 0 0 0 0 0 0 0
44984 - 0 0 0 0 0 0 0 0 0 0 0 0
44985 - 0 0 0 0 0 0 0 0 0 0 0 0
44986 - 0 0 0 0 0 0 0 0 0 0 0 0
44987 - 0 0 0 0 0 0 0 0 0 0 0 0
44988 - 0 0 0 0 0 0 0 0 0 0 0 0
44989 - 0 0 0 0 0 0 0 0 0 0 0 0
44990 - 0 0 0 0 0 0 0 0 0 6 6 6
44991 - 6 6 6 10 10 10 14 14 14 18 18 18
44992 - 18 18 18 14 14 14 10 10 10 6 6 6
44993 - 0 0 0 0 0 0 0 0 0 0 0 0
44994 - 0 0 0 0 0 0 0 0 0 0 0 0
44995 - 0 0 0 0 0 0 0 0 0 0 0 0
44996 - 0 0 0 0 0 0 0 0 0 0 0 0
44997 - 0 0 0 0 0 0 0 0 0 6 6 6
44998 - 14 14 14 18 18 18 22 22 22 22 22 22
44999 - 18 18 18 14 14 14 10 10 10 6 6 6
45000 - 0 0 0 0 0 0 0 0 0 0 0 0
45001 - 0 0 0 0 0 0 0 0 0 0 0 0
45002 - 0 0 0 0 0 0 0 0 0 0 0 0
45003 - 0 0 0 0 0 0 0 0 0 0 0 0
45004 - 0 0 0 0 0 0 0 0 0 0 0 0
45005 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45006 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45007 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45008 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45009 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45010 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45011 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45012 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45013 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45014 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45015 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45016 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45017 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45018 +4 4 4 4 4 4
45019 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45020 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45021 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45022 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45023 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45024 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45025 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45026 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45027 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45028 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45029 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45030 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45031 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45032 +4 4 4 4 4 4
45033 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45034 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45035 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45036 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45037 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45038 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45039 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45040 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45041 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45042 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45043 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45044 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45045 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45046 +4 4 4 4 4 4
45047 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45048 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45049 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45050 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45051 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45052 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45053 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45054 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45055 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45056 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45057 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45058 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45059 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45060 +4 4 4 4 4 4
45061 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45062 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45063 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45064 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45065 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45066 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45067 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45068 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45069 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45070 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45071 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45072 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45073 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45074 +4 4 4 4 4 4
45075 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45076 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45077 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45078 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45079 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45080 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45081 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45082 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45083 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45084 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45085 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45086 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45087 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45088 +4 4 4 4 4 4
45089 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45090 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45091 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45092 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45093 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
45094 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
45095 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45096 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45097 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45098 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
45099 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45100 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
45101 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45102 +4 4 4 4 4 4
45103 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45104 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45105 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45106 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45107 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
45108 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
45109 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45110 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45111 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45112 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
45113 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
45114 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
45115 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45116 +4 4 4 4 4 4
45117 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45118 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45119 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45120 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45121 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
45122 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
45123 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45124 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45125 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45126 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
45127 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
45128 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
45129 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
45130 +4 4 4 4 4 4
45131 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45132 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45133 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45134 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
45135 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
45136 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
45137 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
45138 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45139 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
45140 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
45141 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
45142 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
45143 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
45144 +4 4 4 4 4 4
45145 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45146 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45147 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45148 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
45149 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
45150 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
45151 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
45152 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45153 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
45154 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
45155 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
45156 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
45157 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
45158 +4 4 4 4 4 4
45159 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45160 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45161 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45162 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
45163 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
45164 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
45165 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
45166 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
45167 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
45168 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
45169 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
45170 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
45171 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
45172 +4 4 4 4 4 4
45173 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45174 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45175 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
45176 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
45177 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
45178 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
45179 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
45180 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
45181 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
45182 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
45183 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
45184 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
45185 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
45186 +4 4 4 4 4 4
45187 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45188 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45189 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
45190 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
45191 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
45192 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
45193 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
45194 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
45195 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
45196 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
45197 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
45198 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
45199 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
45200 +4 4 4 4 4 4
45201 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45202 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45203 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
45204 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
45205 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
45206 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
45207 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
45208 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
45209 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
45210 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
45211 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
45212 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
45213 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45214 +4 4 4 4 4 4
45215 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45216 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45217 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
45218 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
45219 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
45220 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
45221 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
45222 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
45223 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
45224 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
45225 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
45226 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
45227 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
45228 +4 4 4 4 4 4
45229 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45230 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
45231 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
45232 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
45233 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
45234 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
45235 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
45236 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
45237 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
45238 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
45239 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
45240 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
45241 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
45242 +4 4 4 4 4 4
45243 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45244 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
45245 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
45246 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
45247 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
45248 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
45249 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
45250 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
45251 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
45252 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
45253 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
45254 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
45255 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
45256 +0 0 0 4 4 4
45257 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
45258 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
45259 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
45260 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
45261 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
45262 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
45263 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
45264 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
45265 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
45266 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
45267 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
45268 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
45269 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
45270 +2 0 0 0 0 0
45271 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
45272 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
45273 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
45274 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
45275 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
45276 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
45277 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
45278 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
45279 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
45280 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
45281 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
45282 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
45283 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
45284 +37 38 37 0 0 0
45285 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
45286 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
45287 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
45288 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
45289 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
45290 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
45291 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
45292 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
45293 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
45294 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
45295 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
45296 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
45297 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
45298 +85 115 134 4 0 0
45299 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
45300 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
45301 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
45302 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
45303 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
45304 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
45305 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
45306 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
45307 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
45308 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
45309 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
45310 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
45311 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
45312 +60 73 81 4 0 0
45313 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
45314 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
45315 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
45316 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
45317 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
45318 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
45319 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
45320 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
45321 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
45322 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
45323 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
45324 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
45325 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
45326 +16 19 21 4 0 0
45327 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
45328 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
45329 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
45330 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
45331 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
45332 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
45333 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
45334 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
45335 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
45336 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
45337 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
45338 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
45339 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
45340 +4 0 0 4 3 3
45341 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
45342 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
45343 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
45344 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
45345 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
45346 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
45347 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
45348 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
45349 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
45350 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
45351 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
45352 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
45353 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
45354 +3 2 2 4 4 4
45355 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
45356 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
45357 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
45358 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
45359 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
45360 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
45361 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
45362 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
45363 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
45364 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
45365 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
45366 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
45367 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
45368 +4 4 4 4 4 4
45369 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
45370 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
45371 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
45372 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
45373 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
45374 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
45375 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
45376 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
45377 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
45378 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
45379 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
45380 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
45381 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
45382 +4 4 4 4 4 4
45383 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
45384 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
45385 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
45386 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
45387 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
45388 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
45389 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
45390 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
45391 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
45392 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
45393 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
45394 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
45395 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
45396 +5 5 5 5 5 5
45397 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
45398 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
45399 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
45400 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
45401 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
45402 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45403 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
45404 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
45405 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
45406 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
45407 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
45408 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
45409 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45410 +5 5 5 4 4 4
45411 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
45412 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
45413 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
45414 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
45415 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45416 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
45417 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
45418 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
45419 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
45420 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
45421 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
45422 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45423 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45424 +4 4 4 4 4 4
45425 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
45426 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
45427 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
45428 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
45429 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
45430 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45431 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45432 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
45433 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
45434 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
45435 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
45436 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
45437 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45438 +4 4 4 4 4 4
45439 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
45440 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
45441 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
45442 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
45443 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45444 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
45445 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
45446 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
45447 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
45448 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
45449 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
45450 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45451 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45452 +4 4 4 4 4 4
45453 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
45454 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
45455 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
45456 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
45457 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45458 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45459 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45460 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
45461 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
45462 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
45463 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
45464 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45465 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45466 +4 4 4 4 4 4
45467 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
45468 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
45469 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
45470 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
45471 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45472 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
45473 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
45474 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
45475 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
45476 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
45477 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45478 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45479 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45480 +4 4 4 4 4 4
45481 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
45482 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
45483 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
45484 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
45485 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45486 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
45487 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
45488 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
45489 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
45490 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
45491 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
45492 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45493 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45494 +4 4 4 4 4 4
45495 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
45496 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
45497 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
45498 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
45499 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45500 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
45501 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
45502 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
45503 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
45504 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
45505 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
45506 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45507 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45508 +4 4 4 4 4 4
45509 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
45510 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
45511 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
45512 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45513 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
45514 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
45515 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
45516 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
45517 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
45518 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
45519 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45520 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45521 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45522 +4 4 4 4 4 4
45523 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
45524 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
45525 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
45526 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45527 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45528 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
45529 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
45530 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
45531 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
45532 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
45533 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45534 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45535 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45536 +4 4 4 4 4 4
45537 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
45538 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
45539 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45540 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45541 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45542 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
45543 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
45544 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
45545 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
45546 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
45547 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45548 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45549 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45550 +4 4 4 4 4 4
45551 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
45552 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
45553 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45554 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45555 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45556 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
45557 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
45558 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
45559 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45560 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45561 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45562 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45563 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45564 +4 4 4 4 4 4
45565 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45566 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
45567 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45568 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
45569 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
45570 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
45571 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
45572 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
45573 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45574 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45575 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45576 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45577 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45578 +4 4 4 4 4 4
45579 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45580 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
45581 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45582 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
45583 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45584 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
45585 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
45586 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
45587 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45588 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45589 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45590 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45591 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45592 +4 4 4 4 4 4
45593 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
45594 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
45595 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45596 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
45597 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
45598 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
45599 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
45600 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
45601 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45602 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45603 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45604 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45605 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45606 +4 4 4 4 4 4
45607 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
45608 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
45609 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45610 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
45611 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
45612 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
45613 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
45614 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
45615 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45616 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45617 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45618 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45619 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45620 +4 4 4 4 4 4
45621 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45622 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
45623 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45624 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
45625 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
45626 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
45627 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
45628 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
45629 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45630 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45631 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45632 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45633 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45634 +4 4 4 4 4 4
45635 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
45636 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
45637 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45638 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
45639 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
45640 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
45641 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
45642 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
45643 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
45644 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45645 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45646 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45647 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45648 +4 4 4 4 4 4
45649 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45650 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
45651 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
45652 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
45653 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
45654 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
45655 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
45656 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
45657 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45658 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45659 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45660 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45661 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45662 +4 4 4 4 4 4
45663 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45664 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
45665 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45666 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
45667 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
45668 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
45669 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
45670 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
45671 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45672 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45673 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45674 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45675 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45676 +4 4 4 4 4 4
45677 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45678 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
45679 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
45680 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
45681 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
45682 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
45683 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45684 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
45685 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45686 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45687 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45688 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45689 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45690 +4 4 4 4 4 4
45691 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45692 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
45693 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
45694 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45695 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
45696 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
45697 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45698 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
45699 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45700 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45701 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45702 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45703 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45704 +4 4 4 4 4 4
45705 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45706 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
45707 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
45708 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
45709 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
45710 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
45711 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
45712 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
45713 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
45714 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45715 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45716 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45717 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45718 +4 4 4 4 4 4
45719 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45720 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
45721 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
45722 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
45723 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
45724 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
45725 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
45726 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
45727 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
45728 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45729 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45730 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45731 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45732 +4 4 4 4 4 4
45733 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
45734 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
45735 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
45736 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
45737 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45738 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
45739 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
45740 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
45741 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
45742 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45743 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45744 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45745 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45746 +4 4 4 4 4 4
45747 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45748 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
45749 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
45750 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
45751 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
45752 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
45753 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
45754 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
45755 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
45756 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45757 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45758 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45759 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45760 +4 4 4 4 4 4
45761 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
45762 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
45763 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
45764 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
45765 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
45766 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
45767 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
45768 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
45769 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
45770 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
45771 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45772 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45773 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45774 +4 4 4 4 4 4
45775 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
45776 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45777 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
45778 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
45779 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
45780 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
45781 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
45782 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
45783 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
45784 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
45785 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45786 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45787 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45788 +4 4 4 4 4 4
45789 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
45790 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45791 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
45792 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
45793 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
45794 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
45795 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45796 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
45797 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
45798 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
45799 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45800 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45801 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45802 +4 4 4 4 4 4
45803 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
45804 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
45805 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
45806 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
45807 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
45808 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
45809 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
45810 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
45811 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
45812 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
45813 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45814 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45815 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45816 +4 4 4 4 4 4
45817 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
45818 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
45819 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45820 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
45821 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
45822 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
45823 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
45824 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
45825 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
45826 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
45827 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45828 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45829 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45830 +4 4 4 4 4 4
45831 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45832 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
45833 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
45834 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
45835 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
45836 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
45837 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
45838 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
45839 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
45840 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45841 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45842 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45843 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45844 +4 4 4 4 4 4
45845 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
45846 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
45847 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
45848 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
45849 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
45850 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
45851 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
45852 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
45853 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
45854 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45855 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45856 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45857 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45858 +4 4 4 4 4 4
45859 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
45860 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
45861 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
45862 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
45863 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
45864 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
45865 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
45866 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
45867 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
45868 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45869 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45870 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45871 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45872 +4 4 4 4 4 4
45873 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
45874 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45875 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
45876 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45877 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
45878 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
45879 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
45880 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
45881 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
45882 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45883 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45884 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45885 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45886 +4 4 4 4 4 4
45887 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
45888 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
45889 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
45890 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
45891 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
45892 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
45893 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
45894 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
45895 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
45896 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45897 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45898 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45899 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45900 +4 4 4 4 4 4
45901 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
45902 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
45903 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
45904 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
45905 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
45906 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
45907 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
45908 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
45909 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45910 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45911 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45912 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45913 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45914 +4 4 4 4 4 4
45915 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
45916 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
45917 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
45918 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
45919 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
45920 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
45921 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
45922 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
45923 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45924 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45925 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45926 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45927 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45928 +4 4 4 4 4 4
45929 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
45930 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
45931 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
45932 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
45933 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
45934 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
45935 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
45936 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45937 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45938 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45939 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45940 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45941 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45942 +4 4 4 4 4 4
45943 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45944 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
45945 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45946 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
45947 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
45948 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
45949 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
45950 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
45951 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45952 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45953 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45954 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45955 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45956 +4 4 4 4 4 4
45957 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45958 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
45959 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
45960 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
45961 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
45962 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
45963 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
45964 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
45965 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45966 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45967 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45968 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45969 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45970 +4 4 4 4 4 4
45971 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45972 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45973 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
45974 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
45975 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
45976 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
45977 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
45978 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45979 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45980 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45981 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45982 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45983 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45984 +4 4 4 4 4 4
45985 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45986 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45987 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
45988 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
45989 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
45990 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
45991 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
45992 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45993 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45994 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45995 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45996 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45997 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45998 +4 4 4 4 4 4
45999 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46000 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46001 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46002 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
46003 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
46004 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
46005 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
46006 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46007 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46008 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46009 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46010 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46011 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46012 +4 4 4 4 4 4
46013 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46014 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46015 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46016 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
46017 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
46018 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
46019 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
46020 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46021 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46022 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46023 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46024 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46025 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46026 +4 4 4 4 4 4
46027 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46028 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46029 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46030 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
46031 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
46032 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
46033 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
46034 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46035 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46036 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46037 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46038 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46039 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46040 +4 4 4 4 4 4
46041 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46042 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46043 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46044 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
46045 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
46046 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
46047 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
46048 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46049 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46050 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46051 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46052 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46053 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46054 +4 4 4 4 4 4
46055 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46056 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46057 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46058 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46059 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
46060 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
46061 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46062 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46063 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46064 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46065 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46066 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46067 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46068 +4 4 4 4 4 4
46069 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46070 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46071 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46072 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46073 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
46074 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
46075 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
46076 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46077 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46078 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46079 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46080 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46081 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46082 +4 4 4 4 4 4
46083 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46084 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46085 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46086 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46087 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
46088 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
46089 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46090 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46091 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46092 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46093 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46094 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46095 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46096 +4 4 4 4 4 4
46097 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46098 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46099 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46100 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46101 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
46102 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
46103 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46104 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46105 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46106 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46107 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46108 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46109 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46110 +4 4 4 4 4 4
46111 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46112 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46113 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46114 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46115 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
46116 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
46117 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46118 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46119 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46120 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46121 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46122 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46123 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46124 +4 4 4 4 4 4
46125 diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
46126 index 443e3c8..c443d6a 100644
46127 --- a/drivers/video/nvidia/nv_backlight.c
46128 +++ b/drivers/video/nvidia/nv_backlight.c
46129 @@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(struct backlight_device *bd)
46130 return bd->props.brightness;
46131 }
46132
46133 -static struct backlight_ops nvidia_bl_ops = {
46134 +static const struct backlight_ops nvidia_bl_ops = {
46135 .get_brightness = nvidia_bl_get_brightness,
46136 .update_status = nvidia_bl_update_status,
46137 };
46138 diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
46139 index d94c57f..912984c 100644
46140 --- a/drivers/video/riva/fbdev.c
46141 +++ b/drivers/video/riva/fbdev.c
46142 @@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct backlight_device *bd)
46143 return bd->props.brightness;
46144 }
46145
46146 -static struct backlight_ops riva_bl_ops = {
46147 +static const struct backlight_ops riva_bl_ops = {
46148 .get_brightness = riva_bl_get_brightness,
46149 .update_status = riva_bl_update_status,
46150 };
46151 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
46152 index 54fbb29..2c108fc 100644
46153 --- a/drivers/video/uvesafb.c
46154 +++ b/drivers/video/uvesafb.c
46155 @@ -18,6 +18,7 @@
46156 #include <linux/fb.h>
46157 #include <linux/io.h>
46158 #include <linux/mutex.h>
46159 +#include <linux/moduleloader.h>
46160 #include <video/edid.h>
46161 #include <video/uvesafb.h>
46162 #ifdef CONFIG_X86
46163 @@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
46164 NULL,
46165 };
46166
46167 - return call_usermodehelper(v86d_path, argv, envp, 1);
46168 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
46169 }
46170
46171 /*
46172 @@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
46173 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
46174 par->pmi_setpal = par->ypan = 0;
46175 } else {
46176 +
46177 +#ifdef CONFIG_PAX_KERNEXEC
46178 +#ifdef CONFIG_MODULES
46179 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
46180 +#endif
46181 + if (!par->pmi_code) {
46182 + par->pmi_setpal = par->ypan = 0;
46183 + return 0;
46184 + }
46185 +#endif
46186 +
46187 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
46188 + task->t.regs.edi);
46189 +
46190 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46191 + pax_open_kernel();
46192 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
46193 + pax_close_kernel();
46194 +
46195 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
46196 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
46197 +#else
46198 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
46199 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
46200 +#endif
46201 +
46202 printk(KERN_INFO "uvesafb: protected mode interface info at "
46203 "%04x:%04x\n",
46204 (u16)task->t.regs.es, (u16)task->t.regs.edi);
46205 @@ -1799,6 +1822,11 @@ out:
46206 if (par->vbe_modes)
46207 kfree(par->vbe_modes);
46208
46209 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46210 + if (par->pmi_code)
46211 + module_free_exec(NULL, par->pmi_code);
46212 +#endif
46213 +
46214 framebuffer_release(info);
46215 return err;
46216 }
46217 @@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platform_device *dev)
46218 kfree(par->vbe_state_orig);
46219 if (par->vbe_state_saved)
46220 kfree(par->vbe_state_saved);
46221 +
46222 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46223 + if (par->pmi_code)
46224 + module_free_exec(NULL, par->pmi_code);
46225 +#endif
46226 +
46227 }
46228
46229 framebuffer_release(info);
46230 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
46231 index bd37ee1..cb827e8 100644
46232 --- a/drivers/video/vesafb.c
46233 +++ b/drivers/video/vesafb.c
46234 @@ -9,6 +9,7 @@
46235 */
46236
46237 #include <linux/module.h>
46238 +#include <linux/moduleloader.h>
46239 #include <linux/kernel.h>
46240 #include <linux/errno.h>
46241 #include <linux/string.h>
46242 @@ -53,8 +54,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
46243 static int vram_total __initdata; /* Set total amount of memory */
46244 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
46245 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
46246 -static void (*pmi_start)(void) __read_mostly;
46247 -static void (*pmi_pal) (void) __read_mostly;
46248 +static void (*pmi_start)(void) __read_only;
46249 +static void (*pmi_pal) (void) __read_only;
46250 static int depth __read_mostly;
46251 static int vga_compat __read_mostly;
46252 /* --------------------------------------------------------------------- */
46253 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
46254 unsigned int size_vmode;
46255 unsigned int size_remap;
46256 unsigned int size_total;
46257 + void *pmi_code = NULL;
46258
46259 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
46260 return -ENODEV;
46261 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
46262 size_remap = size_total;
46263 vesafb_fix.smem_len = size_remap;
46264
46265 -#ifndef __i386__
46266 - screen_info.vesapm_seg = 0;
46267 -#endif
46268 -
46269 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
46270 printk(KERN_WARNING
46271 "vesafb: cannot reserve video memory at 0x%lx\n",
46272 @@ -315,9 +313,21 @@ static int __init vesafb_probe(struct platform_device *dev)
46273 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
46274 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
46275
46276 +#ifdef __i386__
46277 +
46278 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46279 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
46280 + if (!pmi_code)
46281 +#elif !defined(CONFIG_PAX_KERNEXEC)
46282 + if (0)
46283 +#endif
46284 +
46285 +#endif
46286 + screen_info.vesapm_seg = 0;
46287 +
46288 if (screen_info.vesapm_seg) {
46289 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
46290 - screen_info.vesapm_seg,screen_info.vesapm_off);
46291 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
46292 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
46293 }
46294
46295 if (screen_info.vesapm_seg < 0xc000)
46296 @@ -325,9 +335,25 @@ static int __init vesafb_probe(struct platform_device *dev)
46297
46298 if (ypan || pmi_setpal) {
46299 unsigned short *pmi_base;
46300 +
46301 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
46302 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
46303 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
46304 +
46305 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46306 + pax_open_kernel();
46307 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
46308 +#else
46309 + pmi_code = pmi_base;
46310 +#endif
46311 +
46312 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
46313 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
46314 +
46315 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46316 + pmi_start = ktva_ktla(pmi_start);
46317 + pmi_pal = ktva_ktla(pmi_pal);
46318 + pax_close_kernel();
46319 +#endif
46320 +
46321 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
46322 if (pmi_base[3]) {
46323 printk(KERN_INFO "vesafb: pmi: ports = ");
46324 @@ -469,6 +495,11 @@ static int __init vesafb_probe(struct platform_device *dev)
46325 info->node, info->fix.id);
46326 return 0;
46327 err:
46328 +
46329 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46330 + module_free_exec(NULL, pmi_code);
46331 +#endif
46332 +
46333 if (info->screen_base)
46334 iounmap(info->screen_base);
46335 framebuffer_release(info);
46336 diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
46337 index 88a60e0..6783cc2 100644
46338 --- a/drivers/xen/sys-hypervisor.c
46339 +++ b/drivers/xen/sys-hypervisor.c
46340 @@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct kobject *kobj,
46341 return 0;
46342 }
46343
46344 -static struct sysfs_ops hyp_sysfs_ops = {
46345 +static const struct sysfs_ops hyp_sysfs_ops = {
46346 .show = hyp_sysfs_show,
46347 .store = hyp_sysfs_store,
46348 };
46349 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
46350 index 18f74ec..3227009 100644
46351 --- a/fs/9p/vfs_inode.c
46352 +++ b/fs/9p/vfs_inode.c
46353 @@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
46354 static void
46355 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46356 {
46357 - char *s = nd_get_link(nd);
46358 + const char *s = nd_get_link(nd);
46359
46360 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
46361 IS_ERR(s) ? "<error>" : s);
46362 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
46363 index bb4cc5b..df5eaa0 100644
46364 --- a/fs/Kconfig.binfmt
46365 +++ b/fs/Kconfig.binfmt
46366 @@ -86,7 +86,7 @@ config HAVE_AOUT
46367
46368 config BINFMT_AOUT
46369 tristate "Kernel support for a.out and ECOFF binaries"
46370 - depends on HAVE_AOUT
46371 + depends on HAVE_AOUT && BROKEN
46372 ---help---
46373 A.out (Assembler.OUTput) is a set of formats for libraries and
46374 executables used in the earliest versions of UNIX. Linux used
46375 diff --git a/fs/aio.c b/fs/aio.c
46376 index 22a19ad..d484e5b 100644
46377 --- a/fs/aio.c
46378 +++ b/fs/aio.c
46379 @@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx *ctx)
46380 size += sizeof(struct io_event) * nr_events;
46381 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
46382
46383 - if (nr_pages < 0)
46384 + if (nr_pages <= 0)
46385 return -EINVAL;
46386
46387 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
46388 @@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ctx,
46389 struct aio_timeout to;
46390 int retry = 0;
46391
46392 + pax_track_stack();
46393 +
46394 /* needed to zero any padding within an entry (there shouldn't be
46395 * any, but C is fun!
46396 */
46397 @@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *iocb)
46398 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
46399 {
46400 ssize_t ret;
46401 + struct iovec iovstack;
46402
46403 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
46404 kiocb->ki_nbytes, 1,
46405 - &kiocb->ki_inline_vec, &kiocb->ki_iovec);
46406 + &iovstack, &kiocb->ki_iovec);
46407 if (ret < 0)
46408 goto out;
46409
46410 + if (kiocb->ki_iovec == &iovstack) {
46411 + kiocb->ki_inline_vec = iovstack;
46412 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
46413 + }
46414 kiocb->ki_nr_segs = kiocb->ki_nbytes;
46415 kiocb->ki_cur_seg = 0;
46416 /* ki_nbytes/left now reflect bytes instead of segs */
46417 diff --git a/fs/attr.c b/fs/attr.c
46418 index 96d394b..33cf5b4 100644
46419 --- a/fs/attr.c
46420 +++ b/fs/attr.c
46421 @@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
46422 unsigned long limit;
46423
46424 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
46425 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
46426 if (limit != RLIM_INFINITY && offset > limit)
46427 goto out_sig;
46428 if (offset > inode->i_sb->s_maxbytes)
46429 diff --git a/fs/autofs/root.c b/fs/autofs/root.c
46430 index 4a1401c..05eb5ca 100644
46431 --- a/fs/autofs/root.c
46432 +++ b/fs/autofs/root.c
46433 @@ -299,7 +299,8 @@ static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const c
46434 set_bit(n,sbi->symlink_bitmap);
46435 sl = &sbi->symlink[n];
46436 sl->len = strlen(symname);
46437 - sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
46438 + slsize = sl->len+1;
46439 + sl->data = kmalloc(slsize, GFP_KERNEL);
46440 if (!sl->data) {
46441 clear_bit(n,sbi->symlink_bitmap);
46442 unlock_kernel();
46443 diff --git a/fs/autofs4/symlink.c b/fs/autofs4/symlink.c
46444 index b4ea829..e63ef18 100644
46445 --- a/fs/autofs4/symlink.c
46446 +++ b/fs/autofs4/symlink.c
46447 @@ -15,7 +15,7 @@
46448 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
46449 {
46450 struct autofs_info *ino = autofs4_dentry_ino(dentry);
46451 - nd_set_link(nd, (char *)ino->u.symlink);
46452 + nd_set_link(nd, ino->u.symlink);
46453 return NULL;
46454 }
46455
46456 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
46457 index 2341375..df9d1c2 100644
46458 --- a/fs/autofs4/waitq.c
46459 +++ b/fs/autofs4/waitq.c
46460 @@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
46461 {
46462 unsigned long sigpipe, flags;
46463 mm_segment_t fs;
46464 - const char *data = (const char *)addr;
46465 + const char __user *data = (const char __force_user *)addr;
46466 ssize_t wr = 0;
46467
46468 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
46469 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
46470 index 9158c07..3f06659 100644
46471 --- a/fs/befs/linuxvfs.c
46472 +++ b/fs/befs/linuxvfs.c
46473 @@ -498,7 +498,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46474 {
46475 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
46476 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
46477 - char *link = nd_get_link(nd);
46478 + const char *link = nd_get_link(nd);
46479 if (!IS_ERR(link))
46480 kfree(link);
46481 }
46482 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
46483 index 0133b5a..b3baa9f 100644
46484 --- a/fs/binfmt_aout.c
46485 +++ b/fs/binfmt_aout.c
46486 @@ -16,6 +16,7 @@
46487 #include <linux/string.h>
46488 #include <linux/fs.h>
46489 #include <linux/file.h>
46490 +#include <linux/security.h>
46491 #include <linux/stat.h>
46492 #include <linux/fcntl.h>
46493 #include <linux/ptrace.h>
46494 @@ -102,6 +103,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46495 #endif
46496 # define START_STACK(u) (u.start_stack)
46497
46498 + memset(&dump, 0, sizeof(dump));
46499 +
46500 fs = get_fs();
46501 set_fs(KERNEL_DS);
46502 has_dumped = 1;
46503 @@ -113,10 +116,12 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46504
46505 /* If the size of the dump file exceeds the rlimit, then see what would happen
46506 if we wrote the stack, but not the data area. */
46507 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
46508 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
46509 dump.u_dsize = 0;
46510
46511 /* Make sure we have enough room to write the stack and data areas. */
46512 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
46513 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
46514 dump.u_ssize = 0;
46515
46516 @@ -146,9 +151,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46517 dump_size = dump.u_ssize << PAGE_SHIFT;
46518 DUMP_WRITE(dump_start,dump_size);
46519 }
46520 -/* Finally dump the task struct. Not be used by gdb, but could be useful */
46521 - set_fs(KERNEL_DS);
46522 - DUMP_WRITE(current,sizeof(*current));
46523 +/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
46524 end_coredump:
46525 set_fs(fs);
46526 return has_dumped;
46527 @@ -249,6 +252,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46528 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
46529 if (rlim >= RLIM_INFINITY)
46530 rlim = ~0;
46531 +
46532 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
46533 if (ex.a_data + ex.a_bss > rlim)
46534 return -ENOMEM;
46535
46536 @@ -277,6 +282,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46537 install_exec_creds(bprm);
46538 current->flags &= ~PF_FORKNOEXEC;
46539
46540 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46541 + current->mm->pax_flags = 0UL;
46542 +#endif
46543 +
46544 +#ifdef CONFIG_PAX_PAGEEXEC
46545 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
46546 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
46547 +
46548 +#ifdef CONFIG_PAX_EMUTRAMP
46549 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
46550 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
46551 +#endif
46552 +
46553 +#ifdef CONFIG_PAX_MPROTECT
46554 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
46555 + current->mm->pax_flags |= MF_PAX_MPROTECT;
46556 +#endif
46557 +
46558 + }
46559 +#endif
46560 +
46561 if (N_MAGIC(ex) == OMAGIC) {
46562 unsigned long text_addr, map_size;
46563 loff_t pos;
46564 @@ -349,7 +375,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46565
46566 down_write(&current->mm->mmap_sem);
46567 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
46568 - PROT_READ | PROT_WRITE | PROT_EXEC,
46569 + PROT_READ | PROT_WRITE,
46570 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
46571 fd_offset + ex.a_text);
46572 up_write(&current->mm->mmap_sem);
46573 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
46574 index 1ed37ba..f351a62 100644
46575 --- a/fs/binfmt_elf.c
46576 +++ b/fs/binfmt_elf.c
46577 @@ -31,6 +31,7 @@
46578 #include <linux/random.h>
46579 #include <linux/elf.h>
46580 #include <linux/utsname.h>
46581 +#include <linux/xattr.h>
46582 #include <asm/uaccess.h>
46583 #include <asm/param.h>
46584 #include <asm/page.h>
46585 @@ -50,6 +51,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
46586 #define elf_core_dump NULL
46587 #endif
46588
46589 +#ifdef CONFIG_PAX_MPROTECT
46590 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
46591 +#endif
46592 +
46593 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
46594 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
46595 #else
46596 @@ -69,6 +74,11 @@ static struct linux_binfmt elf_format = {
46597 .load_binary = load_elf_binary,
46598 .load_shlib = load_elf_library,
46599 .core_dump = elf_core_dump,
46600 +
46601 +#ifdef CONFIG_PAX_MPROTECT
46602 + .handle_mprotect= elf_handle_mprotect,
46603 +#endif
46604 +
46605 .min_coredump = ELF_EXEC_PAGESIZE,
46606 .hasvdso = 1
46607 };
46608 @@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
46609
46610 static int set_brk(unsigned long start, unsigned long end)
46611 {
46612 + unsigned long e = end;
46613 +
46614 start = ELF_PAGEALIGN(start);
46615 end = ELF_PAGEALIGN(end);
46616 if (end > start) {
46617 @@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
46618 if (BAD_ADDR(addr))
46619 return addr;
46620 }
46621 - current->mm->start_brk = current->mm->brk = end;
46622 + current->mm->start_brk = current->mm->brk = e;
46623 return 0;
46624 }
46625
46626 @@ -148,12 +160,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46627 elf_addr_t __user *u_rand_bytes;
46628 const char *k_platform = ELF_PLATFORM;
46629 const char *k_base_platform = ELF_BASE_PLATFORM;
46630 - unsigned char k_rand_bytes[16];
46631 + u32 k_rand_bytes[4];
46632 int items;
46633 elf_addr_t *elf_info;
46634 int ei_index = 0;
46635 const struct cred *cred = current_cred();
46636 struct vm_area_struct *vma;
46637 + unsigned long saved_auxv[AT_VECTOR_SIZE];
46638 +
46639 + pax_track_stack();
46640
46641 /*
46642 * In some cases (e.g. Hyper-Threading), we want to avoid L1
46643 @@ -195,8 +210,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46644 * Generate 16 random bytes for userspace PRNG seeding.
46645 */
46646 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
46647 - u_rand_bytes = (elf_addr_t __user *)
46648 - STACK_ALLOC(p, sizeof(k_rand_bytes));
46649 + srandom32(k_rand_bytes[0] ^ random32());
46650 + srandom32(k_rand_bytes[1] ^ random32());
46651 + srandom32(k_rand_bytes[2] ^ random32());
46652 + srandom32(k_rand_bytes[3] ^ random32());
46653 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
46654 + u_rand_bytes = (elf_addr_t __user *) p;
46655 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
46656 return -EFAULT;
46657
46658 @@ -308,9 +327,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46659 return -EFAULT;
46660 current->mm->env_end = p;
46661
46662 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
46663 +
46664 /* Put the elf_info on the stack in the right place. */
46665 sp = (elf_addr_t __user *)envp + 1;
46666 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
46667 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
46668 return -EFAULT;
46669 return 0;
46670 }
46671 @@ -385,10 +406,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46672 {
46673 struct elf_phdr *elf_phdata;
46674 struct elf_phdr *eppnt;
46675 - unsigned long load_addr = 0;
46676 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
46677 int load_addr_set = 0;
46678 unsigned long last_bss = 0, elf_bss = 0;
46679 - unsigned long error = ~0UL;
46680 + unsigned long error = -EINVAL;
46681 unsigned long total_size;
46682 int retval, i, size;
46683
46684 @@ -434,6 +455,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46685 goto out_close;
46686 }
46687
46688 +#ifdef CONFIG_PAX_SEGMEXEC
46689 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
46690 + pax_task_size = SEGMEXEC_TASK_SIZE;
46691 +#endif
46692 +
46693 eppnt = elf_phdata;
46694 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
46695 if (eppnt->p_type == PT_LOAD) {
46696 @@ -477,8 +503,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46697 k = load_addr + eppnt->p_vaddr;
46698 if (BAD_ADDR(k) ||
46699 eppnt->p_filesz > eppnt->p_memsz ||
46700 - eppnt->p_memsz > TASK_SIZE ||
46701 - TASK_SIZE - eppnt->p_memsz < k) {
46702 + eppnt->p_memsz > pax_task_size ||
46703 + pax_task_size - eppnt->p_memsz < k) {
46704 error = -ENOMEM;
46705 goto out_close;
46706 }
46707 @@ -532,6 +558,351 @@ out:
46708 return error;
46709 }
46710
46711 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
46712 +{
46713 + unsigned long pax_flags = 0UL;
46714 +
46715 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
46716 +
46717 +#ifdef CONFIG_PAX_PAGEEXEC
46718 + if (elf_phdata->p_flags & PF_PAGEEXEC)
46719 + pax_flags |= MF_PAX_PAGEEXEC;
46720 +#endif
46721 +
46722 +#ifdef CONFIG_PAX_SEGMEXEC
46723 + if (elf_phdata->p_flags & PF_SEGMEXEC)
46724 + pax_flags |= MF_PAX_SEGMEXEC;
46725 +#endif
46726 +
46727 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46728 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46729 + if (nx_enabled)
46730 + pax_flags &= ~MF_PAX_SEGMEXEC;
46731 + else
46732 + pax_flags &= ~MF_PAX_PAGEEXEC;
46733 + }
46734 +#endif
46735 +
46736 +#ifdef CONFIG_PAX_EMUTRAMP
46737 + if (elf_phdata->p_flags & PF_EMUTRAMP)
46738 + pax_flags |= MF_PAX_EMUTRAMP;
46739 +#endif
46740 +
46741 +#ifdef CONFIG_PAX_MPROTECT
46742 + if (elf_phdata->p_flags & PF_MPROTECT)
46743 + pax_flags |= MF_PAX_MPROTECT;
46744 +#endif
46745 +
46746 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46747 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
46748 + pax_flags |= MF_PAX_RANDMMAP;
46749 +#endif
46750 +
46751 +#endif
46752 +
46753 + return pax_flags;
46754 +}
46755 +
46756 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
46757 +{
46758 + unsigned long pax_flags = 0UL;
46759 +
46760 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
46761 +
46762 +#ifdef CONFIG_PAX_PAGEEXEC
46763 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
46764 + pax_flags |= MF_PAX_PAGEEXEC;
46765 +#endif
46766 +
46767 +#ifdef CONFIG_PAX_SEGMEXEC
46768 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
46769 + pax_flags |= MF_PAX_SEGMEXEC;
46770 +#endif
46771 +
46772 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46773 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46774 + if (nx_enabled)
46775 + pax_flags &= ~MF_PAX_SEGMEXEC;
46776 + else
46777 + pax_flags &= ~MF_PAX_PAGEEXEC;
46778 + }
46779 +#endif
46780 +
46781 +#ifdef CONFIG_PAX_EMUTRAMP
46782 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
46783 + pax_flags |= MF_PAX_EMUTRAMP;
46784 +#endif
46785 +
46786 +#ifdef CONFIG_PAX_MPROTECT
46787 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
46788 + pax_flags |= MF_PAX_MPROTECT;
46789 +#endif
46790 +
46791 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46792 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
46793 + pax_flags |= MF_PAX_RANDMMAP;
46794 +#endif
46795 +
46796 +#endif
46797 +
46798 + return pax_flags;
46799 +}
46800 +
46801 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
46802 +{
46803 + unsigned long pax_flags = 0UL;
46804 +
46805 +#ifdef CONFIG_PAX_EI_PAX
46806 +
46807 +#ifdef CONFIG_PAX_PAGEEXEC
46808 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
46809 + pax_flags |= MF_PAX_PAGEEXEC;
46810 +#endif
46811 +
46812 +#ifdef CONFIG_PAX_SEGMEXEC
46813 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
46814 + pax_flags |= MF_PAX_SEGMEXEC;
46815 +#endif
46816 +
46817 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46818 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46819 + if (nx_enabled)
46820 + pax_flags &= ~MF_PAX_SEGMEXEC;
46821 + else
46822 + pax_flags &= ~MF_PAX_PAGEEXEC;
46823 + }
46824 +#endif
46825 +
46826 +#ifdef CONFIG_PAX_EMUTRAMP
46827 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
46828 + pax_flags |= MF_PAX_EMUTRAMP;
46829 +#endif
46830 +
46831 +#ifdef CONFIG_PAX_MPROTECT
46832 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
46833 + pax_flags |= MF_PAX_MPROTECT;
46834 +#endif
46835 +
46836 +#ifdef CONFIG_PAX_ASLR
46837 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
46838 + pax_flags |= MF_PAX_RANDMMAP;
46839 +#endif
46840 +
46841 +#else
46842 +
46843 +#ifdef CONFIG_PAX_PAGEEXEC
46844 + pax_flags |= MF_PAX_PAGEEXEC;
46845 +#endif
46846 +
46847 +#ifdef CONFIG_PAX_MPROTECT
46848 + pax_flags |= MF_PAX_MPROTECT;
46849 +#endif
46850 +
46851 +#ifdef CONFIG_PAX_RANDMMAP
46852 + pax_flags |= MF_PAX_RANDMMAP;
46853 +#endif
46854 +
46855 +#ifdef CONFIG_PAX_SEGMEXEC
46856 + if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
46857 + pax_flags &= ~MF_PAX_PAGEEXEC;
46858 + pax_flags |= MF_PAX_SEGMEXEC;
46859 + }
46860 +#endif
46861 +
46862 +#endif
46863 +
46864 + return pax_flags;
46865 +}
46866 +
46867 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
46868 +{
46869 +
46870 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
46871 + unsigned long i;
46872 +
46873 + for (i = 0UL; i < elf_ex->e_phnum; i++)
46874 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
46875 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
46876 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
46877 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
46878 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
46879 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
46880 + return ~0UL;
46881 +
46882 +#ifdef CONFIG_PAX_SOFTMODE
46883 + if (pax_softmode)
46884 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
46885 + else
46886 +#endif
46887 +
46888 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
46889 + break;
46890 + }
46891 +#endif
46892 +
46893 + return ~0UL;
46894 +}
46895 +
46896 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
46897 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
46898 +{
46899 + unsigned long pax_flags = 0UL;
46900 +
46901 +#ifdef CONFIG_PAX_PAGEEXEC
46902 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
46903 + pax_flags |= MF_PAX_PAGEEXEC;
46904 +#endif
46905 +
46906 +#ifdef CONFIG_PAX_SEGMEXEC
46907 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
46908 + pax_flags |= MF_PAX_SEGMEXEC;
46909 +#endif
46910 +
46911 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46912 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46913 + if ((__supported_pte_mask & _PAGE_NX))
46914 + pax_flags &= ~MF_PAX_SEGMEXEC;
46915 + else
46916 + pax_flags &= ~MF_PAX_PAGEEXEC;
46917 + }
46918 +#endif
46919 +
46920 +#ifdef CONFIG_PAX_EMUTRAMP
46921 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
46922 + pax_flags |= MF_PAX_EMUTRAMP;
46923 +#endif
46924 +
46925 +#ifdef CONFIG_PAX_MPROTECT
46926 + if (pax_flags_softmode & MF_PAX_MPROTECT)
46927 + pax_flags |= MF_PAX_MPROTECT;
46928 +#endif
46929 +
46930 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46931 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
46932 + pax_flags |= MF_PAX_RANDMMAP;
46933 +#endif
46934 +
46935 + return pax_flags;
46936 +}
46937 +
46938 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
46939 +{
46940 + unsigned long pax_flags = 0UL;
46941 +
46942 +#ifdef CONFIG_PAX_PAGEEXEC
46943 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
46944 + pax_flags |= MF_PAX_PAGEEXEC;
46945 +#endif
46946 +
46947 +#ifdef CONFIG_PAX_SEGMEXEC
46948 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
46949 + pax_flags |= MF_PAX_SEGMEXEC;
46950 +#endif
46951 +
46952 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46953 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46954 + if ((__supported_pte_mask & _PAGE_NX))
46955 + pax_flags &= ~MF_PAX_SEGMEXEC;
46956 + else
46957 + pax_flags &= ~MF_PAX_PAGEEXEC;
46958 + }
46959 +#endif
46960 +
46961 +#ifdef CONFIG_PAX_EMUTRAMP
46962 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
46963 + pax_flags |= MF_PAX_EMUTRAMP;
46964 +#endif
46965 +
46966 +#ifdef CONFIG_PAX_MPROTECT
46967 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
46968 + pax_flags |= MF_PAX_MPROTECT;
46969 +#endif
46970 +
46971 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46972 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
46973 + pax_flags |= MF_PAX_RANDMMAP;
46974 +#endif
46975 +
46976 + return pax_flags;
46977 +}
46978 +#endif
46979 +
46980 +static unsigned long pax_parse_xattr_pax(struct file * const file)
46981 +{
46982 +
46983 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
46984 + ssize_t xattr_size, i;
46985 + unsigned char xattr_value[5];
46986 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
46987 +
46988 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
46989 + if (xattr_size <= 0)
46990 + return ~0UL;
46991 +
46992 + for (i = 0; i < xattr_size; i++)
46993 + switch (xattr_value[i]) {
46994 + default:
46995 + return ~0UL;
46996 +
46997 +#define parse_flag(option1, option2, flag) \
46998 + case option1: \
46999 + pax_flags_hardmode |= MF_PAX_##flag; \
47000 + break; \
47001 + case option2: \
47002 + pax_flags_softmode |= MF_PAX_##flag; \
47003 + break;
47004 +
47005 + parse_flag('p', 'P', PAGEEXEC);
47006 + parse_flag('e', 'E', EMUTRAMP);
47007 + parse_flag('m', 'M', MPROTECT);
47008 + parse_flag('r', 'R', RANDMMAP);
47009 + parse_flag('s', 'S', SEGMEXEC);
47010 +
47011 +#undef parse_flag
47012 + }
47013 +
47014 + if (pax_flags_hardmode & pax_flags_softmode)
47015 + return ~0UL;
47016 +
47017 +#ifdef CONFIG_PAX_SOFTMODE
47018 + if (pax_softmode)
47019 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
47020 + else
47021 +#endif
47022 +
47023 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
47024 +#else
47025 + return ~0UL;
47026 +#endif
47027 +
47028 +}
47029 +
47030 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
47031 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
47032 +{
47033 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
47034 +
47035 + pax_flags = pax_parse_ei_pax(elf_ex);
47036 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
47037 + xattr_pax_flags = pax_parse_xattr_pax(file);
47038 +
47039 + if (pt_pax_flags == ~0UL)
47040 + pt_pax_flags = xattr_pax_flags;
47041 + else if (xattr_pax_flags == ~0UL)
47042 + xattr_pax_flags = pt_pax_flags;
47043 + if (pt_pax_flags != xattr_pax_flags)
47044 + return -EINVAL;
47045 + if (pt_pax_flags != ~0UL)
47046 + pax_flags = pt_pax_flags;
47047 +
47048 + if (0 > pax_check_flags(&pax_flags))
47049 + return -EINVAL;
47050 +
47051 + current->mm->pax_flags = pax_flags;
47052 + return 0;
47053 +}
47054 +#endif
47055 +
47056 /*
47057 * These are the functions used to load ELF style executables and shared
47058 * libraries. There is no binary dependent code anywhere else.
47059 @@ -548,6 +919,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
47060 {
47061 unsigned int random_variable = 0;
47062
47063 +#ifdef CONFIG_PAX_RANDUSTACK
47064 + if (randomize_va_space)
47065 + return stack_top - current->mm->delta_stack;
47066 +#endif
47067 +
47068 if ((current->flags & PF_RANDOMIZE) &&
47069 !(current->personality & ADDR_NO_RANDOMIZE)) {
47070 random_variable = get_random_int() & STACK_RND_MASK;
47071 @@ -566,7 +942,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47072 unsigned long load_addr = 0, load_bias = 0;
47073 int load_addr_set = 0;
47074 char * elf_interpreter = NULL;
47075 - unsigned long error;
47076 + unsigned long error = 0;
47077 struct elf_phdr *elf_ppnt, *elf_phdata;
47078 unsigned long elf_bss, elf_brk;
47079 int retval, i;
47080 @@ -576,11 +952,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47081 unsigned long start_code, end_code, start_data, end_data;
47082 unsigned long reloc_func_desc = 0;
47083 int executable_stack = EXSTACK_DEFAULT;
47084 - unsigned long def_flags = 0;
47085 struct {
47086 struct elfhdr elf_ex;
47087 struct elfhdr interp_elf_ex;
47088 } *loc;
47089 + unsigned long pax_task_size = TASK_SIZE;
47090
47091 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
47092 if (!loc) {
47093 @@ -718,11 +1094,80 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47094
47095 /* OK, This is the point of no return */
47096 current->flags &= ~PF_FORKNOEXEC;
47097 - current->mm->def_flags = def_flags;
47098 +
47099 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47100 + current->mm->pax_flags = 0UL;
47101 +#endif
47102 +
47103 +#ifdef CONFIG_PAX_DLRESOLVE
47104 + current->mm->call_dl_resolve = 0UL;
47105 +#endif
47106 +
47107 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
47108 + current->mm->call_syscall = 0UL;
47109 +#endif
47110 +
47111 +#ifdef CONFIG_PAX_ASLR
47112 + current->mm->delta_mmap = 0UL;
47113 + current->mm->delta_stack = 0UL;
47114 +#endif
47115 +
47116 + current->mm->def_flags = 0;
47117 +
47118 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
47119 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
47120 + send_sig(SIGKILL, current, 0);
47121 + goto out_free_dentry;
47122 + }
47123 +#endif
47124 +
47125 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
47126 + pax_set_initial_flags(bprm);
47127 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
47128 + if (pax_set_initial_flags_func)
47129 + (pax_set_initial_flags_func)(bprm);
47130 +#endif
47131 +
47132 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
47133 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
47134 + current->mm->context.user_cs_limit = PAGE_SIZE;
47135 + current->mm->def_flags |= VM_PAGEEXEC;
47136 + }
47137 +#endif
47138 +
47139 +#ifdef CONFIG_PAX_SEGMEXEC
47140 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
47141 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
47142 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
47143 + pax_task_size = SEGMEXEC_TASK_SIZE;
47144 + }
47145 +#endif
47146 +
47147 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
47148 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47149 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
47150 + put_cpu();
47151 + }
47152 +#endif
47153
47154 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
47155 may depend on the personality. */
47156 SET_PERSONALITY(loc->elf_ex);
47157 +
47158 +#ifdef CONFIG_PAX_ASLR
47159 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
47160 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
47161 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
47162 + }
47163 +#endif
47164 +
47165 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
47166 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47167 + executable_stack = EXSTACK_DISABLE_X;
47168 + current->personality &= ~READ_IMPLIES_EXEC;
47169 + } else
47170 +#endif
47171 +
47172 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
47173 current->personality |= READ_IMPLIES_EXEC;
47174
47175 @@ -800,10 +1245,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47176 * might try to exec. This is because the brk will
47177 * follow the loader, and is not movable. */
47178 #ifdef CONFIG_X86
47179 - load_bias = 0;
47180 + if (current->flags & PF_RANDOMIZE)
47181 + load_bias = 0;
47182 + else
47183 + load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
47184 #else
47185 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
47186 #endif
47187 +
47188 +#ifdef CONFIG_PAX_RANDMMAP
47189 + /* PaX: randomize base address at the default exe base if requested */
47190 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
47191 +#ifdef CONFIG_SPARC64
47192 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
47193 +#else
47194 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
47195 +#endif
47196 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
47197 + elf_flags |= MAP_FIXED;
47198 + }
47199 +#endif
47200 +
47201 }
47202
47203 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
47204 @@ -836,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47205 * allowed task size. Note that p_filesz must always be
47206 * <= p_memsz so it is only necessary to check p_memsz.
47207 */
47208 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
47209 - elf_ppnt->p_memsz > TASK_SIZE ||
47210 - TASK_SIZE - elf_ppnt->p_memsz < k) {
47211 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
47212 + elf_ppnt->p_memsz > pax_task_size ||
47213 + pax_task_size - elf_ppnt->p_memsz < k) {
47214 /* set_brk can never work. Avoid overflows. */
47215 send_sig(SIGKILL, current, 0);
47216 retval = -EINVAL;
47217 @@ -866,6 +1328,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47218 start_data += load_bias;
47219 end_data += load_bias;
47220
47221 +#ifdef CONFIG_PAX_RANDMMAP
47222 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
47223 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
47224 +#endif
47225 +
47226 /* Calling set_brk effectively mmaps the pages that we need
47227 * for the bss and break sections. We must do this before
47228 * mapping in the interpreter, to make sure it doesn't wind
47229 @@ -877,9 +1344,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47230 goto out_free_dentry;
47231 }
47232 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
47233 - send_sig(SIGSEGV, current, 0);
47234 - retval = -EFAULT; /* Nobody gets to see this, but.. */
47235 - goto out_free_dentry;
47236 + /*
47237 + * This bss-zeroing can fail if the ELF
47238 + * file specifies odd protections. So
47239 + * we don't check the return value
47240 + */
47241 }
47242
47243 if (elf_interpreter) {
47244 @@ -1112,8 +1581,10 @@ static int dump_seek(struct file *file, loff_t off)
47245 unsigned long n = off;
47246 if (n > PAGE_SIZE)
47247 n = PAGE_SIZE;
47248 - if (!dump_write(file, buf, n))
47249 + if (!dump_write(file, buf, n)) {
47250 + free_page((unsigned long)buf);
47251 return 0;
47252 + }
47253 off -= n;
47254 }
47255 free_page((unsigned long)buf);
47256 @@ -1125,7 +1596,7 @@ static int dump_seek(struct file *file, loff_t off)
47257 * Decide what to dump of a segment, part, all or none.
47258 */
47259 static unsigned long vma_dump_size(struct vm_area_struct *vma,
47260 - unsigned long mm_flags)
47261 + unsigned long mm_flags, long signr)
47262 {
47263 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
47264
47265 @@ -1159,7 +1630,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
47266 if (vma->vm_file == NULL)
47267 return 0;
47268
47269 - if (FILTER(MAPPED_PRIVATE))
47270 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
47271 goto whole;
47272
47273 /*
47274 @@ -1255,8 +1726,11 @@ static int writenote(struct memelfnote *men, struct file *file,
47275 #undef DUMP_WRITE
47276
47277 #define DUMP_WRITE(addr, nr) \
47278 + do { \
47279 + gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
47280 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
47281 - goto end_coredump;
47282 + goto end_coredump; \
47283 + } while (0);
47284
47285 static void fill_elf_header(struct elfhdr *elf, int segs,
47286 u16 machine, u32 flags, u8 osabi)
47287 @@ -1385,9 +1859,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
47288 {
47289 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
47290 int i = 0;
47291 - do
47292 + do {
47293 i += 2;
47294 - while (auxv[i - 2] != AT_NULL);
47295 + } while (auxv[i - 2] != AT_NULL);
47296 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
47297 }
47298
47299 @@ -1973,7 +2447,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47300 phdr.p_offset = offset;
47301 phdr.p_vaddr = vma->vm_start;
47302 phdr.p_paddr = 0;
47303 - phdr.p_filesz = vma_dump_size(vma, mm_flags);
47304 + phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
47305 phdr.p_memsz = vma->vm_end - vma->vm_start;
47306 offset += phdr.p_filesz;
47307 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
47308 @@ -2006,7 +2480,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47309 unsigned long addr;
47310 unsigned long end;
47311
47312 - end = vma->vm_start + vma_dump_size(vma, mm_flags);
47313 + end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
47314
47315 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
47316 struct page *page;
47317 @@ -2015,6 +2489,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47318 page = get_dump_page(addr);
47319 if (page) {
47320 void *kaddr = kmap(page);
47321 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
47322 stop = ((size += PAGE_SIZE) > limit) ||
47323 !dump_write(file, kaddr, PAGE_SIZE);
47324 kunmap(page);
47325 @@ -2042,6 +2517,97 @@ out:
47326
47327 #endif /* USE_ELF_CORE_DUMP */
47328
47329 +#ifdef CONFIG_PAX_MPROTECT
47330 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
47331 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
47332 + * we'll remove VM_MAYWRITE for good on RELRO segments.
47333 + *
47334 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
47335 + * basis because we want to allow the common case and not the special ones.
47336 + */
47337 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
47338 +{
47339 + struct elfhdr elf_h;
47340 + struct elf_phdr elf_p;
47341 + unsigned long i;
47342 + unsigned long oldflags;
47343 + bool is_textrel_rw, is_textrel_rx, is_relro;
47344 +
47345 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
47346 + return;
47347 +
47348 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
47349 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
47350 +
47351 +#ifdef CONFIG_PAX_ELFRELOCS
47352 + /* possible TEXTREL */
47353 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
47354 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
47355 +#else
47356 + is_textrel_rw = false;
47357 + is_textrel_rx = false;
47358 +#endif
47359 +
47360 + /* possible RELRO */
47361 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
47362 +
47363 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
47364 + return;
47365 +
47366 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
47367 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
47368 +
47369 +#ifdef CONFIG_PAX_ETEXECRELOCS
47370 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
47371 +#else
47372 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
47373 +#endif
47374 +
47375 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
47376 + !elf_check_arch(&elf_h) ||
47377 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
47378 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
47379 + return;
47380 +
47381 + for (i = 0UL; i < elf_h.e_phnum; i++) {
47382 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
47383 + return;
47384 + switch (elf_p.p_type) {
47385 + case PT_DYNAMIC:
47386 + if (!is_textrel_rw && !is_textrel_rx)
47387 + continue;
47388 + i = 0UL;
47389 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
47390 + elf_dyn dyn;
47391 +
47392 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
47393 + return;
47394 + if (dyn.d_tag == DT_NULL)
47395 + return;
47396 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
47397 + gr_log_textrel(vma);
47398 + if (is_textrel_rw)
47399 + vma->vm_flags |= VM_MAYWRITE;
47400 + else
47401 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
47402 + vma->vm_flags &= ~VM_MAYWRITE;
47403 + return;
47404 + }
47405 + i++;
47406 + }
47407 + return;
47408 +
47409 + case PT_GNU_RELRO:
47410 + if (!is_relro)
47411 + continue;
47412 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
47413 + vma->vm_flags &= ~VM_MAYWRITE;
47414 + return;
47415 + }
47416 + }
47417 +}
47418 +#endif
47419 +
47420 static int __init init_elf_binfmt(void)
47421 {
47422 return register_binfmt(&elf_format);
47423 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
47424 index ca88c46..f155a60 100644
47425 --- a/fs/binfmt_flat.c
47426 +++ b/fs/binfmt_flat.c
47427 @@ -564,7 +564,9 @@ static int load_flat_file(struct linux_binprm * bprm,
47428 realdatastart = (unsigned long) -ENOMEM;
47429 printk("Unable to allocate RAM for process data, errno %d\n",
47430 (int)-realdatastart);
47431 + down_write(&current->mm->mmap_sem);
47432 do_munmap(current->mm, textpos, text_len);
47433 + up_write(&current->mm->mmap_sem);
47434 ret = realdatastart;
47435 goto err;
47436 }
47437 @@ -588,8 +590,10 @@ static int load_flat_file(struct linux_binprm * bprm,
47438 }
47439 if (IS_ERR_VALUE(result)) {
47440 printk("Unable to read data+bss, errno %d\n", (int)-result);
47441 + down_write(&current->mm->mmap_sem);
47442 do_munmap(current->mm, textpos, text_len);
47443 do_munmap(current->mm, realdatastart, data_len + extra);
47444 + up_write(&current->mm->mmap_sem);
47445 ret = result;
47446 goto err;
47447 }
47448 @@ -658,8 +662,10 @@ static int load_flat_file(struct linux_binprm * bprm,
47449 }
47450 if (IS_ERR_VALUE(result)) {
47451 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
47452 + down_write(&current->mm->mmap_sem);
47453 do_munmap(current->mm, textpos, text_len + data_len + extra +
47454 MAX_SHARED_LIBS * sizeof(unsigned long));
47455 + up_write(&current->mm->mmap_sem);
47456 ret = result;
47457 goto err;
47458 }
47459 diff --git a/fs/bio.c b/fs/bio.c
47460 index e696713..83de133 100644
47461 --- a/fs/bio.c
47462 +++ b/fs/bio.c
47463 @@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
47464
47465 i = 0;
47466 while (i < bio_slab_nr) {
47467 - struct bio_slab *bslab = &bio_slabs[i];
47468 + bslab = &bio_slabs[i];
47469
47470 if (!bslab->slab && entry == -1)
47471 entry = i;
47472 @@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
47473 const int read = bio_data_dir(bio) == READ;
47474 struct bio_map_data *bmd = bio->bi_private;
47475 int i;
47476 - char *p = bmd->sgvecs[0].iov_base;
47477 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
47478
47479 __bio_for_each_segment(bvec, bio, i, 0) {
47480 char *addr = page_address(bvec->bv_page);
47481 diff --git a/fs/block_dev.c b/fs/block_dev.c
47482 index e65efa2..04fae57 100644
47483 --- a/fs/block_dev.c
47484 +++ b/fs/block_dev.c
47485 @@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev, void *holder)
47486 else if (bdev->bd_contains == bdev)
47487 res = 0; /* is a whole device which isn't held */
47488
47489 - else if (bdev->bd_contains->bd_holder == bd_claim)
47490 + else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
47491 res = 0; /* is a partition of a device that is being partitioned */
47492 else if (bdev->bd_contains->bd_holder != NULL)
47493 res = -EBUSY; /* is a partition of a held device */
47494 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
47495 index c4bc570..42acd8d 100644
47496 --- a/fs/btrfs/ctree.c
47497 +++ b/fs/btrfs/ctree.c
47498 @@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
47499 free_extent_buffer(buf);
47500 add_root_to_dirty_list(root);
47501 } else {
47502 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
47503 - parent_start = parent->start;
47504 - else
47505 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
47506 + if (parent)
47507 + parent_start = parent->start;
47508 + else
47509 + parent_start = 0;
47510 + } else
47511 parent_start = 0;
47512
47513 WARN_ON(trans->transid != btrfs_header_generation(parent));
47514 @@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
47515
47516 ret = 0;
47517 if (slot == 0) {
47518 - struct btrfs_disk_key disk_key;
47519 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
47520 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
47521 }
47522 diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
47523 index f447188..59c17c5 100644
47524 --- a/fs/btrfs/disk-io.c
47525 +++ b/fs/btrfs/disk-io.c
47526 @@ -39,7 +39,7 @@
47527 #include "tree-log.h"
47528 #include "free-space-cache.h"
47529
47530 -static struct extent_io_ops btree_extent_io_ops;
47531 +static const struct extent_io_ops btree_extent_io_ops;
47532 static void end_workqueue_fn(struct btrfs_work *work);
47533 static void free_fs_root(struct btrfs_root *root);
47534
47535 @@ -2607,7 +2607,7 @@ out:
47536 return 0;
47537 }
47538
47539 -static struct extent_io_ops btree_extent_io_ops = {
47540 +static const struct extent_io_ops btree_extent_io_ops = {
47541 .write_cache_pages_lock_hook = btree_lock_page_hook,
47542 .readpage_end_io_hook = btree_readpage_end_io_hook,
47543 .submit_bio_hook = btree_submit_bio_hook,
47544 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
47545 index 559f724..a026171 100644
47546 --- a/fs/btrfs/extent-tree.c
47547 +++ b/fs/btrfs/extent-tree.c
47548 @@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
47549 u64 group_start = group->key.objectid;
47550 new_extents = kmalloc(sizeof(*new_extents),
47551 GFP_NOFS);
47552 + if (!new_extents) {
47553 + ret = -ENOMEM;
47554 + goto out;
47555 + }
47556 nr_extents = 1;
47557 ret = get_new_locations(reloc_inode,
47558 extent_key,
47559 diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
47560 index 36de250..7ec75c7 100644
47561 --- a/fs/btrfs/extent_io.h
47562 +++ b/fs/btrfs/extent_io.h
47563 @@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
47564 struct bio *bio, int mirror_num,
47565 unsigned long bio_flags);
47566 struct extent_io_ops {
47567 - int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
47568 + int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
47569 u64 start, u64 end, int *page_started,
47570 unsigned long *nr_written);
47571 - int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
47572 - int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
47573 + int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
47574 + int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
47575 extent_submit_bio_hook_t *submit_bio_hook;
47576 - int (*merge_bio_hook)(struct page *page, unsigned long offset,
47577 + int (* const merge_bio_hook)(struct page *page, unsigned long offset,
47578 size_t size, struct bio *bio,
47579 unsigned long bio_flags);
47580 - int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
47581 - int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
47582 + int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
47583 + int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
47584 u64 start, u64 end,
47585 struct extent_state *state);
47586 - int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
47587 + int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
47588 u64 start, u64 end,
47589 struct extent_state *state);
47590 - int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
47591 + int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
47592 struct extent_state *state);
47593 - int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
47594 + int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
47595 struct extent_state *state, int uptodate);
47596 - int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
47597 + int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
47598 unsigned long old, unsigned long bits);
47599 - int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
47600 + int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
47601 unsigned long bits);
47602 - int (*merge_extent_hook)(struct inode *inode,
47603 + int (* const merge_extent_hook)(struct inode *inode,
47604 struct extent_state *new,
47605 struct extent_state *other);
47606 - int (*split_extent_hook)(struct inode *inode,
47607 + int (* const split_extent_hook)(struct inode *inode,
47608 struct extent_state *orig, u64 split);
47609 - int (*write_cache_pages_lock_hook)(struct page *page);
47610 + int (* const write_cache_pages_lock_hook)(struct page *page);
47611 };
47612
47613 struct extent_io_tree {
47614 @@ -88,7 +88,7 @@ struct extent_io_tree {
47615 u64 dirty_bytes;
47616 spinlock_t lock;
47617 spinlock_t buffer_lock;
47618 - struct extent_io_ops *ops;
47619 + const struct extent_io_ops *ops;
47620 };
47621
47622 struct extent_state {
47623 diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
47624 index cb2849f..3718fb4 100644
47625 --- a/fs/btrfs/free-space-cache.c
47626 +++ b/fs/btrfs/free-space-cache.c
47627 @@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
47628
47629 while(1) {
47630 if (entry->bytes < bytes || entry->offset < min_start) {
47631 - struct rb_node *node;
47632 -
47633 node = rb_next(&entry->offset_index);
47634 if (!node)
47635 break;
47636 @@ -1226,7 +1224,7 @@ again:
47637 */
47638 while (entry->bitmap || found_bitmap ||
47639 (!entry->bitmap && entry->bytes < min_bytes)) {
47640 - struct rb_node *node = rb_next(&entry->offset_index);
47641 + node = rb_next(&entry->offset_index);
47642
47643 if (entry->bitmap && entry->bytes > bytes + empty_size) {
47644 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
47645 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
47646 index e03a836..323837e 100644
47647 --- a/fs/btrfs/inode.c
47648 +++ b/fs/btrfs/inode.c
47649 @@ -63,7 +63,7 @@ static const struct inode_operations btrfs_file_inode_operations;
47650 static const struct address_space_operations btrfs_aops;
47651 static const struct address_space_operations btrfs_symlink_aops;
47652 static const struct file_operations btrfs_dir_file_operations;
47653 -static struct extent_io_ops btrfs_extent_io_ops;
47654 +static const struct extent_io_ops btrfs_extent_io_ops;
47655
47656 static struct kmem_cache *btrfs_inode_cachep;
47657 struct kmem_cache *btrfs_trans_handle_cachep;
47658 @@ -925,6 +925,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
47659 1, 0, NULL, GFP_NOFS);
47660 while (start < end) {
47661 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
47662 + BUG_ON(!async_cow);
47663 async_cow->inode = inode;
47664 async_cow->root = root;
47665 async_cow->locked_page = locked_page;
47666 @@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(struct btrfs_path *path,
47667 inline_size = btrfs_file_extent_inline_item_len(leaf,
47668 btrfs_item_nr(leaf, path->slots[0]));
47669 tmp = kmalloc(inline_size, GFP_NOFS);
47670 + if (!tmp)
47671 + return -ENOMEM;
47672 ptr = btrfs_file_extent_inline_start(item);
47673
47674 read_extent_buffer(leaf, tmp, ptr, inline_size);
47675 @@ -5410,7 +5413,7 @@ fail:
47676 return -ENOMEM;
47677 }
47678
47679 -static int btrfs_getattr(struct vfsmount *mnt,
47680 +int btrfs_getattr(struct vfsmount *mnt,
47681 struct dentry *dentry, struct kstat *stat)
47682 {
47683 struct inode *inode = dentry->d_inode;
47684 @@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
47685 return 0;
47686 }
47687
47688 +EXPORT_SYMBOL(btrfs_getattr);
47689 +
47690 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
47691 +{
47692 + return BTRFS_I(inode)->root->anon_super.s_dev;
47693 +}
47694 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
47695 +
47696 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
47697 struct inode *new_dir, struct dentry *new_dentry)
47698 {
47699 @@ -5972,7 +5983,7 @@ static const struct file_operations btrfs_dir_file_operations = {
47700 .fsync = btrfs_sync_file,
47701 };
47702
47703 -static struct extent_io_ops btrfs_extent_io_ops = {
47704 +static const struct extent_io_ops btrfs_extent_io_ops = {
47705 .fill_delalloc = run_delalloc_range,
47706 .submit_bio_hook = btrfs_submit_bio_hook,
47707 .merge_bio_hook = btrfs_merge_bio_hook,
47708 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
47709 index ab7ab53..94e0781 100644
47710 --- a/fs/btrfs/relocation.c
47711 +++ b/fs/btrfs/relocation.c
47712 @@ -884,7 +884,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
47713 }
47714 spin_unlock(&rc->reloc_root_tree.lock);
47715
47716 - BUG_ON((struct btrfs_root *)node->data != root);
47717 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
47718
47719 if (!del) {
47720 spin_lock(&rc->reloc_root_tree.lock);
47721 diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
47722 index a240b6f..4ce16ef 100644
47723 --- a/fs/btrfs/sysfs.c
47724 +++ b/fs/btrfs/sysfs.c
47725 @@ -164,12 +164,12 @@ static void btrfs_root_release(struct kobject *kobj)
47726 complete(&root->kobj_unregister);
47727 }
47728
47729 -static struct sysfs_ops btrfs_super_attr_ops = {
47730 +static const struct sysfs_ops btrfs_super_attr_ops = {
47731 .show = btrfs_super_attr_show,
47732 .store = btrfs_super_attr_store,
47733 };
47734
47735 -static struct sysfs_ops btrfs_root_attr_ops = {
47736 +static const struct sysfs_ops btrfs_root_attr_ops = {
47737 .show = btrfs_root_attr_show,
47738 .store = btrfs_root_attr_store,
47739 };
47740 diff --git a/fs/buffer.c b/fs/buffer.c
47741 index 6fa5302..395d9f6 100644
47742 --- a/fs/buffer.c
47743 +++ b/fs/buffer.c
47744 @@ -25,6 +25,7 @@
47745 #include <linux/percpu.h>
47746 #include <linux/slab.h>
47747 #include <linux/capability.h>
47748 +#include <linux/security.h>
47749 #include <linux/blkdev.h>
47750 #include <linux/file.h>
47751 #include <linux/quotaops.h>
47752 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
47753 index 3797e00..ce776f6 100644
47754 --- a/fs/cachefiles/bind.c
47755 +++ b/fs/cachefiles/bind.c
47756 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
47757 args);
47758
47759 /* start by checking things over */
47760 - ASSERT(cache->fstop_percent >= 0 &&
47761 - cache->fstop_percent < cache->fcull_percent &&
47762 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
47763 cache->fcull_percent < cache->frun_percent &&
47764 cache->frun_percent < 100);
47765
47766 - ASSERT(cache->bstop_percent >= 0 &&
47767 - cache->bstop_percent < cache->bcull_percent &&
47768 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
47769 cache->bcull_percent < cache->brun_percent &&
47770 cache->brun_percent < 100);
47771
47772 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
47773 index 4618516..bb30d01 100644
47774 --- a/fs/cachefiles/daemon.c
47775 +++ b/fs/cachefiles/daemon.c
47776 @@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
47777 if (test_bit(CACHEFILES_DEAD, &cache->flags))
47778 return -EIO;
47779
47780 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
47781 + if (datalen > PAGE_SIZE - 1)
47782 return -EOPNOTSUPP;
47783
47784 /* drag the command string into the kernel so we can parse it */
47785 @@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
47786 if (args[0] != '%' || args[1] != '\0')
47787 return -EINVAL;
47788
47789 - if (fstop < 0 || fstop >= cache->fcull_percent)
47790 + if (fstop >= cache->fcull_percent)
47791 return cachefiles_daemon_range_error(cache, args);
47792
47793 cache->fstop_percent = fstop;
47794 @@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
47795 if (args[0] != '%' || args[1] != '\0')
47796 return -EINVAL;
47797
47798 - if (bstop < 0 || bstop >= cache->bcull_percent)
47799 + if (bstop >= cache->bcull_percent)
47800 return cachefiles_daemon_range_error(cache, args);
47801
47802 cache->bstop_percent = bstop;
47803 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
47804 index f7c255f..fcd61de 100644
47805 --- a/fs/cachefiles/internal.h
47806 +++ b/fs/cachefiles/internal.h
47807 @@ -56,7 +56,7 @@ struct cachefiles_cache {
47808 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
47809 struct rb_root active_nodes; /* active nodes (can't be culled) */
47810 rwlock_t active_lock; /* lock for active_nodes */
47811 - atomic_t gravecounter; /* graveyard uniquifier */
47812 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
47813 unsigned frun_percent; /* when to stop culling (% files) */
47814 unsigned fcull_percent; /* when to start culling (% files) */
47815 unsigned fstop_percent; /* when to stop allocating (% files) */
47816 @@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
47817 * proc.c
47818 */
47819 #ifdef CONFIG_CACHEFILES_HISTOGRAM
47820 -extern atomic_t cachefiles_lookup_histogram[HZ];
47821 -extern atomic_t cachefiles_mkdir_histogram[HZ];
47822 -extern atomic_t cachefiles_create_histogram[HZ];
47823 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47824 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47825 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
47826
47827 extern int __init cachefiles_proc_init(void);
47828 extern void cachefiles_proc_cleanup(void);
47829 static inline
47830 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
47831 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
47832 {
47833 unsigned long jif = jiffies - start_jif;
47834 if (jif >= HZ)
47835 jif = HZ - 1;
47836 - atomic_inc(&histogram[jif]);
47837 + atomic_inc_unchecked(&histogram[jif]);
47838 }
47839
47840 #else
47841 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
47842 index 14ac480..a62766c 100644
47843 --- a/fs/cachefiles/namei.c
47844 +++ b/fs/cachefiles/namei.c
47845 @@ -250,7 +250,7 @@ try_again:
47846 /* first step is to make up a grave dentry in the graveyard */
47847 sprintf(nbuffer, "%08x%08x",
47848 (uint32_t) get_seconds(),
47849 - (uint32_t) atomic_inc_return(&cache->gravecounter));
47850 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
47851
47852 /* do the multiway lock magic */
47853 trap = lock_rename(cache->graveyard, dir);
47854 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
47855 index eccd339..4c1d995 100644
47856 --- a/fs/cachefiles/proc.c
47857 +++ b/fs/cachefiles/proc.c
47858 @@ -14,9 +14,9 @@
47859 #include <linux/seq_file.h>
47860 #include "internal.h"
47861
47862 -atomic_t cachefiles_lookup_histogram[HZ];
47863 -atomic_t cachefiles_mkdir_histogram[HZ];
47864 -atomic_t cachefiles_create_histogram[HZ];
47865 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47866 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47867 +atomic_unchecked_t cachefiles_create_histogram[HZ];
47868
47869 /*
47870 * display the latency histogram
47871 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
47872 return 0;
47873 default:
47874 index = (unsigned long) v - 3;
47875 - x = atomic_read(&cachefiles_lookup_histogram[index]);
47876 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
47877 - z = atomic_read(&cachefiles_create_histogram[index]);
47878 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
47879 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
47880 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
47881 if (x == 0 && y == 0 && z == 0)
47882 return 0;
47883
47884 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
47885 index a6c8c6f..5cf8517 100644
47886 --- a/fs/cachefiles/rdwr.c
47887 +++ b/fs/cachefiles/rdwr.c
47888 @@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
47889 old_fs = get_fs();
47890 set_fs(KERNEL_DS);
47891 ret = file->f_op->write(
47892 - file, (const void __user *) data, len, &pos);
47893 + file, (const void __force_user *) data, len, &pos);
47894 set_fs(old_fs);
47895 kunmap(page);
47896 if (ret != len)
47897 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
47898 index 42cec2a..2aba466 100644
47899 --- a/fs/cifs/cifs_debug.c
47900 +++ b/fs/cifs/cifs_debug.c
47901 @@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
47902 tcon = list_entry(tmp3,
47903 struct cifsTconInfo,
47904 tcon_list);
47905 - atomic_set(&tcon->num_smbs_sent, 0);
47906 - atomic_set(&tcon->num_writes, 0);
47907 - atomic_set(&tcon->num_reads, 0);
47908 - atomic_set(&tcon->num_oplock_brks, 0);
47909 - atomic_set(&tcon->num_opens, 0);
47910 - atomic_set(&tcon->num_posixopens, 0);
47911 - atomic_set(&tcon->num_posixmkdirs, 0);
47912 - atomic_set(&tcon->num_closes, 0);
47913 - atomic_set(&tcon->num_deletes, 0);
47914 - atomic_set(&tcon->num_mkdirs, 0);
47915 - atomic_set(&tcon->num_rmdirs, 0);
47916 - atomic_set(&tcon->num_renames, 0);
47917 - atomic_set(&tcon->num_t2renames, 0);
47918 - atomic_set(&tcon->num_ffirst, 0);
47919 - atomic_set(&tcon->num_fnext, 0);
47920 - atomic_set(&tcon->num_fclose, 0);
47921 - atomic_set(&tcon->num_hardlinks, 0);
47922 - atomic_set(&tcon->num_symlinks, 0);
47923 - atomic_set(&tcon->num_locks, 0);
47924 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
47925 + atomic_set_unchecked(&tcon->num_writes, 0);
47926 + atomic_set_unchecked(&tcon->num_reads, 0);
47927 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
47928 + atomic_set_unchecked(&tcon->num_opens, 0);
47929 + atomic_set_unchecked(&tcon->num_posixopens, 0);
47930 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
47931 + atomic_set_unchecked(&tcon->num_closes, 0);
47932 + atomic_set_unchecked(&tcon->num_deletes, 0);
47933 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
47934 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
47935 + atomic_set_unchecked(&tcon->num_renames, 0);
47936 + atomic_set_unchecked(&tcon->num_t2renames, 0);
47937 + atomic_set_unchecked(&tcon->num_ffirst, 0);
47938 + atomic_set_unchecked(&tcon->num_fnext, 0);
47939 + atomic_set_unchecked(&tcon->num_fclose, 0);
47940 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
47941 + atomic_set_unchecked(&tcon->num_symlinks, 0);
47942 + atomic_set_unchecked(&tcon->num_locks, 0);
47943 }
47944 }
47945 }
47946 @@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
47947 if (tcon->need_reconnect)
47948 seq_puts(m, "\tDISCONNECTED ");
47949 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
47950 - atomic_read(&tcon->num_smbs_sent),
47951 - atomic_read(&tcon->num_oplock_brks));
47952 + atomic_read_unchecked(&tcon->num_smbs_sent),
47953 + atomic_read_unchecked(&tcon->num_oplock_brks));
47954 seq_printf(m, "\nReads: %d Bytes: %lld",
47955 - atomic_read(&tcon->num_reads),
47956 + atomic_read_unchecked(&tcon->num_reads),
47957 (long long)(tcon->bytes_read));
47958 seq_printf(m, "\nWrites: %d Bytes: %lld",
47959 - atomic_read(&tcon->num_writes),
47960 + atomic_read_unchecked(&tcon->num_writes),
47961 (long long)(tcon->bytes_written));
47962 seq_printf(m, "\nFlushes: %d",
47963 - atomic_read(&tcon->num_flushes));
47964 + atomic_read_unchecked(&tcon->num_flushes));
47965 seq_printf(m, "\nLocks: %d HardLinks: %d "
47966 "Symlinks: %d",
47967 - atomic_read(&tcon->num_locks),
47968 - atomic_read(&tcon->num_hardlinks),
47969 - atomic_read(&tcon->num_symlinks));
47970 + atomic_read_unchecked(&tcon->num_locks),
47971 + atomic_read_unchecked(&tcon->num_hardlinks),
47972 + atomic_read_unchecked(&tcon->num_symlinks));
47973 seq_printf(m, "\nOpens: %d Closes: %d "
47974 "Deletes: %d",
47975 - atomic_read(&tcon->num_opens),
47976 - atomic_read(&tcon->num_closes),
47977 - atomic_read(&tcon->num_deletes));
47978 + atomic_read_unchecked(&tcon->num_opens),
47979 + atomic_read_unchecked(&tcon->num_closes),
47980 + atomic_read_unchecked(&tcon->num_deletes));
47981 seq_printf(m, "\nPosix Opens: %d "
47982 "Posix Mkdirs: %d",
47983 - atomic_read(&tcon->num_posixopens),
47984 - atomic_read(&tcon->num_posixmkdirs));
47985 + atomic_read_unchecked(&tcon->num_posixopens),
47986 + atomic_read_unchecked(&tcon->num_posixmkdirs));
47987 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
47988 - atomic_read(&tcon->num_mkdirs),
47989 - atomic_read(&tcon->num_rmdirs));
47990 + atomic_read_unchecked(&tcon->num_mkdirs),
47991 + atomic_read_unchecked(&tcon->num_rmdirs));
47992 seq_printf(m, "\nRenames: %d T2 Renames %d",
47993 - atomic_read(&tcon->num_renames),
47994 - atomic_read(&tcon->num_t2renames));
47995 + atomic_read_unchecked(&tcon->num_renames),
47996 + atomic_read_unchecked(&tcon->num_t2renames));
47997 seq_printf(m, "\nFindFirst: %d FNext %d "
47998 "FClose %d",
47999 - atomic_read(&tcon->num_ffirst),
48000 - atomic_read(&tcon->num_fnext),
48001 - atomic_read(&tcon->num_fclose));
48002 + atomic_read_unchecked(&tcon->num_ffirst),
48003 + atomic_read_unchecked(&tcon->num_fnext),
48004 + atomic_read_unchecked(&tcon->num_fclose));
48005 }
48006 }
48007 }
48008 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
48009 index 1445407..68cb0dc 100644
48010 --- a/fs/cifs/cifsfs.c
48011 +++ b/fs/cifs/cifsfs.c
48012 @@ -869,7 +869,7 @@ cifs_init_request_bufs(void)
48013 cifs_req_cachep = kmem_cache_create("cifs_request",
48014 CIFSMaxBufSize +
48015 MAX_CIFS_HDR_SIZE, 0,
48016 - SLAB_HWCACHE_ALIGN, NULL);
48017 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
48018 if (cifs_req_cachep == NULL)
48019 return -ENOMEM;
48020
48021 @@ -896,7 +896,7 @@ cifs_init_request_bufs(void)
48022 efficient to alloc 1 per page off the slab compared to 17K (5page)
48023 alloc of large cifs buffers even when page debugging is on */
48024 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
48025 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
48026 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
48027 NULL);
48028 if (cifs_sm_req_cachep == NULL) {
48029 mempool_destroy(cifs_req_poolp);
48030 @@ -991,8 +991,8 @@ init_cifs(void)
48031 atomic_set(&bufAllocCount, 0);
48032 atomic_set(&smBufAllocCount, 0);
48033 #ifdef CONFIG_CIFS_STATS2
48034 - atomic_set(&totBufAllocCount, 0);
48035 - atomic_set(&totSmBufAllocCount, 0);
48036 + atomic_set_unchecked(&totBufAllocCount, 0);
48037 + atomic_set_unchecked(&totSmBufAllocCount, 0);
48038 #endif /* CONFIG_CIFS_STATS2 */
48039
48040 atomic_set(&midCount, 0);
48041 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
48042 index e29581e..1c22bab 100644
48043 --- a/fs/cifs/cifsglob.h
48044 +++ b/fs/cifs/cifsglob.h
48045 @@ -252,28 +252,28 @@ struct cifsTconInfo {
48046 __u16 Flags; /* optional support bits */
48047 enum statusEnum tidStatus;
48048 #ifdef CONFIG_CIFS_STATS
48049 - atomic_t num_smbs_sent;
48050 - atomic_t num_writes;
48051 - atomic_t num_reads;
48052 - atomic_t num_flushes;
48053 - atomic_t num_oplock_brks;
48054 - atomic_t num_opens;
48055 - atomic_t num_closes;
48056 - atomic_t num_deletes;
48057 - atomic_t num_mkdirs;
48058 - atomic_t num_posixopens;
48059 - atomic_t num_posixmkdirs;
48060 - atomic_t num_rmdirs;
48061 - atomic_t num_renames;
48062 - atomic_t num_t2renames;
48063 - atomic_t num_ffirst;
48064 - atomic_t num_fnext;
48065 - atomic_t num_fclose;
48066 - atomic_t num_hardlinks;
48067 - atomic_t num_symlinks;
48068 - atomic_t num_locks;
48069 - atomic_t num_acl_get;
48070 - atomic_t num_acl_set;
48071 + atomic_unchecked_t num_smbs_sent;
48072 + atomic_unchecked_t num_writes;
48073 + atomic_unchecked_t num_reads;
48074 + atomic_unchecked_t num_flushes;
48075 + atomic_unchecked_t num_oplock_brks;
48076 + atomic_unchecked_t num_opens;
48077 + atomic_unchecked_t num_closes;
48078 + atomic_unchecked_t num_deletes;
48079 + atomic_unchecked_t num_mkdirs;
48080 + atomic_unchecked_t num_posixopens;
48081 + atomic_unchecked_t num_posixmkdirs;
48082 + atomic_unchecked_t num_rmdirs;
48083 + atomic_unchecked_t num_renames;
48084 + atomic_unchecked_t num_t2renames;
48085 + atomic_unchecked_t num_ffirst;
48086 + atomic_unchecked_t num_fnext;
48087 + atomic_unchecked_t num_fclose;
48088 + atomic_unchecked_t num_hardlinks;
48089 + atomic_unchecked_t num_symlinks;
48090 + atomic_unchecked_t num_locks;
48091 + atomic_unchecked_t num_acl_get;
48092 + atomic_unchecked_t num_acl_set;
48093 #ifdef CONFIG_CIFS_STATS2
48094 unsigned long long time_writes;
48095 unsigned long long time_reads;
48096 @@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
48097 }
48098
48099 #ifdef CONFIG_CIFS_STATS
48100 -#define cifs_stats_inc atomic_inc
48101 +#define cifs_stats_inc atomic_inc_unchecked
48102
48103 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
48104 unsigned int bytes)
48105 @@ -701,8 +701,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
48106 /* Various Debug counters */
48107 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
48108 #ifdef CONFIG_CIFS_STATS2
48109 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
48110 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
48111 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
48112 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
48113 #endif
48114 GLOBAL_EXTERN atomic_t smBufAllocCount;
48115 GLOBAL_EXTERN atomic_t midCount;
48116 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
48117 index fc1e048..28b3441 100644
48118 --- a/fs/cifs/link.c
48119 +++ b/fs/cifs/link.c
48120 @@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
48121
48122 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
48123 {
48124 - char *p = nd_get_link(nd);
48125 + const char *p = nd_get_link(nd);
48126 if (!IS_ERR(p))
48127 kfree(p);
48128 }
48129 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
48130 index 95b82e8..12a538d 100644
48131 --- a/fs/cifs/misc.c
48132 +++ b/fs/cifs/misc.c
48133 @@ -155,7 +155,7 @@ cifs_buf_get(void)
48134 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
48135 atomic_inc(&bufAllocCount);
48136 #ifdef CONFIG_CIFS_STATS2
48137 - atomic_inc(&totBufAllocCount);
48138 + atomic_inc_unchecked(&totBufAllocCount);
48139 #endif /* CONFIG_CIFS_STATS2 */
48140 }
48141
48142 @@ -190,7 +190,7 @@ cifs_small_buf_get(void)
48143 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
48144 atomic_inc(&smBufAllocCount);
48145 #ifdef CONFIG_CIFS_STATS2
48146 - atomic_inc(&totSmBufAllocCount);
48147 + atomic_inc_unchecked(&totSmBufAllocCount);
48148 #endif /* CONFIG_CIFS_STATS2 */
48149
48150 }
48151 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
48152 index a5bf577..6d19845 100644
48153 --- a/fs/coda/cache.c
48154 +++ b/fs/coda/cache.c
48155 @@ -24,14 +24,14 @@
48156 #include <linux/coda_fs_i.h>
48157 #include <linux/coda_cache.h>
48158
48159 -static atomic_t permission_epoch = ATOMIC_INIT(0);
48160 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
48161
48162 /* replace or extend an acl cache hit */
48163 void coda_cache_enter(struct inode *inode, int mask)
48164 {
48165 struct coda_inode_info *cii = ITOC(inode);
48166
48167 - cii->c_cached_epoch = atomic_read(&permission_epoch);
48168 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
48169 if (cii->c_uid != current_fsuid()) {
48170 cii->c_uid = current_fsuid();
48171 cii->c_cached_perm = mask;
48172 @@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inode, int mask)
48173 void coda_cache_clear_inode(struct inode *inode)
48174 {
48175 struct coda_inode_info *cii = ITOC(inode);
48176 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
48177 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
48178 }
48179
48180 /* remove all acl caches */
48181 void coda_cache_clear_all(struct super_block *sb)
48182 {
48183 - atomic_inc(&permission_epoch);
48184 + atomic_inc_unchecked(&permission_epoch);
48185 }
48186
48187
48188 @@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode, int mask)
48189
48190 hit = (mask & cii->c_cached_perm) == mask &&
48191 cii->c_uid == current_fsuid() &&
48192 - cii->c_cached_epoch == atomic_read(&permission_epoch);
48193 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
48194
48195 return hit;
48196 }
48197 diff --git a/fs/compat.c b/fs/compat.c
48198 index d1e2411..c2ef8ed 100644
48199 --- a/fs/compat.c
48200 +++ b/fs/compat.c
48201 @@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval _
48202 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
48203 {
48204 compat_ino_t ino = stat->ino;
48205 - typeof(ubuf->st_uid) uid = 0;
48206 - typeof(ubuf->st_gid) gid = 0;
48207 + typeof(((struct compat_stat *)0)->st_uid) uid = 0;
48208 + typeof(((struct compat_stat *)0)->st_gid) gid = 0;
48209 int err;
48210
48211 SET_UID(uid, stat->uid);
48212 @@ -533,7 +533,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
48213
48214 set_fs(KERNEL_DS);
48215 /* The __user pointer cast is valid because of the set_fs() */
48216 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
48217 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
48218 set_fs(oldfs);
48219 /* truncating is ok because it's a user address */
48220 if (!ret)
48221 @@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
48222
48223 struct compat_readdir_callback {
48224 struct compat_old_linux_dirent __user *dirent;
48225 + struct file * file;
48226 int result;
48227 };
48228
48229 @@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
48230 buf->result = -EOVERFLOW;
48231 return -EOVERFLOW;
48232 }
48233 +
48234 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48235 + return 0;
48236 +
48237 buf->result++;
48238 dirent = buf->dirent;
48239 if (!access_ok(VERIFY_WRITE, dirent,
48240 @@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
48241
48242 buf.result = 0;
48243 buf.dirent = dirent;
48244 + buf.file = file;
48245
48246 error = vfs_readdir(file, compat_fillonedir, &buf);
48247 if (buf.result)
48248 @@ -899,6 +905,7 @@ struct compat_linux_dirent {
48249 struct compat_getdents_callback {
48250 struct compat_linux_dirent __user *current_dir;
48251 struct compat_linux_dirent __user *previous;
48252 + struct file * file;
48253 int count;
48254 int error;
48255 };
48256 @@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
48257 buf->error = -EOVERFLOW;
48258 return -EOVERFLOW;
48259 }
48260 +
48261 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48262 + return 0;
48263 +
48264 dirent = buf->previous;
48265 if (dirent) {
48266 if (__put_user(offset, &dirent->d_off))
48267 @@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
48268 buf.previous = NULL;
48269 buf.count = count;
48270 buf.error = 0;
48271 + buf.file = file;
48272
48273 error = vfs_readdir(file, compat_filldir, &buf);
48274 if (error >= 0)
48275 @@ -987,6 +999,7 @@ out:
48276 struct compat_getdents_callback64 {
48277 struct linux_dirent64 __user *current_dir;
48278 struct linux_dirent64 __user *previous;
48279 + struct file * file;
48280 int count;
48281 int error;
48282 };
48283 @@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
48284 buf->error = -EINVAL; /* only used if we fail.. */
48285 if (reclen > buf->count)
48286 return -EINVAL;
48287 +
48288 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48289 + return 0;
48290 +
48291 dirent = buf->previous;
48292
48293 if (dirent) {
48294 @@ -1054,13 +1071,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
48295 buf.previous = NULL;
48296 buf.count = count;
48297 buf.error = 0;
48298 + buf.file = file;
48299
48300 error = vfs_readdir(file, compat_filldir64, &buf);
48301 if (error >= 0)
48302 error = buf.error;
48303 lastdirent = buf.previous;
48304 if (lastdirent) {
48305 - typeof(lastdirent->d_off) d_off = file->f_pos;
48306 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
48307 if (__put_user_unaligned(d_off, &lastdirent->d_off))
48308 error = -EFAULT;
48309 else
48310 @@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
48311 * verify all the pointers
48312 */
48313 ret = -EINVAL;
48314 - if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
48315 + if (nr_segs > UIO_MAXIOV)
48316 goto out;
48317 if (!file->f_op)
48318 goto out;
48319 @@ -1454,6 +1472,10 @@ out:
48320 return ret;
48321 }
48322
48323 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48324 +extern atomic64_unchecked_t global_exec_counter;
48325 +#endif
48326 +
48327 /*
48328 * compat_do_execve() is mostly a copy of do_execve(), with the exception
48329 * that it processes 32 bit argv and envp pointers.
48330 @@ -1463,11 +1485,35 @@ int compat_do_execve(char * filename,
48331 compat_uptr_t __user *envp,
48332 struct pt_regs * regs)
48333 {
48334 +#ifdef CONFIG_GRKERNSEC
48335 + struct file *old_exec_file;
48336 + struct acl_subject_label *old_acl;
48337 + struct rlimit old_rlim[RLIM_NLIMITS];
48338 +#endif
48339 struct linux_binprm *bprm;
48340 struct file *file;
48341 struct files_struct *displaced;
48342 bool clear_in_exec;
48343 int retval;
48344 + const struct cred *cred = current_cred();
48345 +
48346 + /*
48347 + * We move the actual failure in case of RLIMIT_NPROC excess from
48348 + * set*uid() to execve() because too many poorly written programs
48349 + * don't check setuid() return code. Here we additionally recheck
48350 + * whether NPROC limit is still exceeded.
48351 + */
48352 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
48353 +
48354 + if ((current->flags & PF_NPROC_EXCEEDED) &&
48355 + atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
48356 + retval = -EAGAIN;
48357 + goto out_ret;
48358 + }
48359 +
48360 + /* We're below the limit (still or again), so we don't want to make
48361 + * further execve() calls fail. */
48362 + current->flags &= ~PF_NPROC_EXCEEDED;
48363
48364 retval = unshare_files(&displaced);
48365 if (retval)
48366 @@ -1493,12 +1539,26 @@ int compat_do_execve(char * filename,
48367 if (IS_ERR(file))
48368 goto out_unmark;
48369
48370 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
48371 + retval = -EPERM;
48372 + goto out_file;
48373 + }
48374 +
48375 sched_exec();
48376
48377 bprm->file = file;
48378 bprm->filename = filename;
48379 bprm->interp = filename;
48380
48381 + if (gr_process_user_ban()) {
48382 + retval = -EPERM;
48383 + goto out_file;
48384 + }
48385 +
48386 + retval = -EACCES;
48387 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
48388 + goto out_file;
48389 +
48390 retval = bprm_mm_init(bprm);
48391 if (retval)
48392 goto out_file;
48393 @@ -1528,11 +1588,45 @@ int compat_do_execve(char * filename,
48394 if (retval < 0)
48395 goto out;
48396
48397 + if (!gr_tpe_allow(file)) {
48398 + retval = -EACCES;
48399 + goto out;
48400 + }
48401 +
48402 + if (gr_check_crash_exec(file)) {
48403 + retval = -EACCES;
48404 + goto out;
48405 + }
48406 +
48407 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
48408 +
48409 + gr_handle_exec_args_compat(bprm, argv);
48410 +
48411 +#ifdef CONFIG_GRKERNSEC
48412 + old_acl = current->acl;
48413 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
48414 + old_exec_file = current->exec_file;
48415 + get_file(file);
48416 + current->exec_file = file;
48417 +#endif
48418 +
48419 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
48420 + bprm->unsafe);
48421 + if (retval < 0)
48422 + goto out_fail;
48423 +
48424 retval = search_binary_handler(bprm, regs);
48425 if (retval < 0)
48426 - goto out;
48427 + goto out_fail;
48428 +#ifdef CONFIG_GRKERNSEC
48429 + if (old_exec_file)
48430 + fput(old_exec_file);
48431 +#endif
48432
48433 /* execve succeeded */
48434 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48435 + current->exec_id = atomic64_inc_return_unchecked(&global_exec_counter);
48436 +#endif
48437 current->fs->in_exec = 0;
48438 current->in_execve = 0;
48439 acct_update_integrals(current);
48440 @@ -1541,6 +1635,14 @@ int compat_do_execve(char * filename,
48441 put_files_struct(displaced);
48442 return retval;
48443
48444 +out_fail:
48445 +#ifdef CONFIG_GRKERNSEC
48446 + current->acl = old_acl;
48447 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
48448 + fput(current->exec_file);
48449 + current->exec_file = old_exec_file;
48450 +#endif
48451 +
48452 out:
48453 if (bprm->mm) {
48454 acct_arg_size(bprm, 0);
48455 @@ -1711,6 +1813,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
48456 struct fdtable *fdt;
48457 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
48458
48459 + pax_track_stack();
48460 +
48461 if (n < 0)
48462 goto out_nofds;
48463
48464 @@ -2151,7 +2255,7 @@ asmlinkage long compat_sys_nfsservctl(int cmd,
48465 oldfs = get_fs();
48466 set_fs(KERNEL_DS);
48467 /* The __user pointer casts are valid because of the set_fs() */
48468 - err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
48469 + err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
48470 set_fs(oldfs);
48471
48472 if (err)
48473 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
48474 index 0adced2..bbb1b0d 100644
48475 --- a/fs/compat_binfmt_elf.c
48476 +++ b/fs/compat_binfmt_elf.c
48477 @@ -29,10 +29,12 @@
48478 #undef elfhdr
48479 #undef elf_phdr
48480 #undef elf_note
48481 +#undef elf_dyn
48482 #undef elf_addr_t
48483 #define elfhdr elf32_hdr
48484 #define elf_phdr elf32_phdr
48485 #define elf_note elf32_note
48486 +#define elf_dyn Elf32_Dyn
48487 #define elf_addr_t Elf32_Addr
48488
48489 /*
48490 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
48491 index d84e705..d8c364c 100644
48492 --- a/fs/compat_ioctl.c
48493 +++ b/fs/compat_ioctl.c
48494 @@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned
48495 up = (struct compat_video_spu_palette __user *) arg;
48496 err = get_user(palp, &up->palette);
48497 err |= get_user(length, &up->length);
48498 + if (err)
48499 + return -EFAULT;
48500
48501 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
48502 err = put_user(compat_ptr(palp), &up_native->palette);
48503 @@ -1513,7 +1515,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
48504 return -EFAULT;
48505 if (__get_user(udata, &ss32->iomem_base))
48506 return -EFAULT;
48507 - ss.iomem_base = compat_ptr(udata);
48508 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
48509 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
48510 __get_user(ss.port_high, &ss32->port_high))
48511 return -EFAULT;
48512 @@ -1809,7 +1811,7 @@ static int compat_ioctl_preallocate(struct file *file, unsigned long arg)
48513 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
48514 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
48515 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
48516 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
48517 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
48518 return -EFAULT;
48519
48520 return ioctl_preallocate(file, p);
48521 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
48522 index 8e48b52..f01ed91 100644
48523 --- a/fs/configfs/dir.c
48524 +++ b/fs/configfs/dir.c
48525 @@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
48526 }
48527 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
48528 struct configfs_dirent *next;
48529 - const char * name;
48530 + const unsigned char * name;
48531 + char d_name[sizeof(next->s_dentry->d_iname)];
48532 int len;
48533
48534 next = list_entry(p, struct configfs_dirent,
48535 @@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
48536 continue;
48537
48538 name = configfs_get_name(next);
48539 - len = strlen(name);
48540 + if (next->s_dentry && name == next->s_dentry->d_iname) {
48541 + len = next->s_dentry->d_name.len;
48542 + memcpy(d_name, name, len);
48543 + name = d_name;
48544 + } else
48545 + len = strlen(name);
48546 if (next->s_dentry)
48547 ino = next->s_dentry->d_inode->i_ino;
48548 else
48549 diff --git a/fs/dcache.c b/fs/dcache.c
48550 index 44c0aea..2529092 100644
48551 --- a/fs/dcache.c
48552 +++ b/fs/dcache.c
48553 @@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
48554
48555 static struct kmem_cache *dentry_cache __read_mostly;
48556
48557 -#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
48558 -
48559 /*
48560 * This is the single most critical data structure when it comes
48561 * to the dcache: the hashtable for lookups. Somebody should try
48562 @@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned long mempages)
48563 mempages -= reserve;
48564
48565 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
48566 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
48567 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
48568
48569 dcache_init();
48570 inode_init();
48571 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
48572 index 39c6ee8..dcee0f1 100644
48573 --- a/fs/debugfs/inode.c
48574 +++ b/fs/debugfs/inode.c
48575 @@ -269,7 +269,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
48576 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
48577 {
48578 return debugfs_create_file(name,
48579 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
48580 + S_IFDIR | S_IRWXU,
48581 +#else
48582 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
48583 +#endif
48584 parent, NULL, NULL);
48585 }
48586 EXPORT_SYMBOL_GPL(debugfs_create_dir);
48587 diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
48588 index c010ecf..a8d8c59 100644
48589 --- a/fs/dlm/lockspace.c
48590 +++ b/fs/dlm/lockspace.c
48591 @@ -148,7 +148,7 @@ static void lockspace_kobj_release(struct kobject *k)
48592 kfree(ls);
48593 }
48594
48595 -static struct sysfs_ops dlm_attr_ops = {
48596 +static const struct sysfs_ops dlm_attr_ops = {
48597 .show = dlm_attr_show,
48598 .store = dlm_attr_store,
48599 };
48600 diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
48601 index 7a5f1ac..205b034 100644
48602 --- a/fs/ecryptfs/crypto.c
48603 +++ b/fs/ecryptfs/crypto.c
48604 @@ -418,17 +418,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
48605 rc);
48606 goto out;
48607 }
48608 - if (unlikely(ecryptfs_verbosity > 0)) {
48609 - ecryptfs_printk(KERN_DEBUG, "Encrypting extent "
48610 - "with iv:\n");
48611 - ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
48612 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
48613 - "encryption:\n");
48614 - ecryptfs_dump_hex((char *)
48615 - (page_address(page)
48616 - + (extent_offset * crypt_stat->extent_size)),
48617 - 8);
48618 - }
48619 rc = ecryptfs_encrypt_page_offset(crypt_stat, enc_extent_page, 0,
48620 page, (extent_offset
48621 * crypt_stat->extent_size),
48622 @@ -441,14 +430,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
48623 goto out;
48624 }
48625 rc = 0;
48626 - if (unlikely(ecryptfs_verbosity > 0)) {
48627 - ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16x]; "
48628 - "rc = [%d]\n", (extent_base + extent_offset),
48629 - rc);
48630 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
48631 - "encryption:\n");
48632 - ecryptfs_dump_hex((char *)(page_address(enc_extent_page)), 8);
48633 - }
48634 out:
48635 return rc;
48636 }
48637 @@ -545,17 +526,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
48638 rc);
48639 goto out;
48640 }
48641 - if (unlikely(ecryptfs_verbosity > 0)) {
48642 - ecryptfs_printk(KERN_DEBUG, "Decrypting extent "
48643 - "with iv:\n");
48644 - ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
48645 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
48646 - "decryption:\n");
48647 - ecryptfs_dump_hex((char *)
48648 - (page_address(enc_extent_page)
48649 - + (extent_offset * crypt_stat->extent_size)),
48650 - 8);
48651 - }
48652 rc = ecryptfs_decrypt_page_offset(crypt_stat, page,
48653 (extent_offset
48654 * crypt_stat->extent_size),
48655 @@ -569,16 +539,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
48656 goto out;
48657 }
48658 rc = 0;
48659 - if (unlikely(ecryptfs_verbosity > 0)) {
48660 - ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16x]; "
48661 - "rc = [%d]\n", (extent_base + extent_offset),
48662 - rc);
48663 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
48664 - "decryption:\n");
48665 - ecryptfs_dump_hex((char *)(page_address(page)
48666 - + (extent_offset
48667 - * crypt_stat->extent_size)), 8);
48668 - }
48669 out:
48670 return rc;
48671 }
48672 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
48673 index 88ba4d4..073f003 100644
48674 --- a/fs/ecryptfs/inode.c
48675 +++ b/fs/ecryptfs/inode.c
48676 @@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
48677 old_fs = get_fs();
48678 set_fs(get_ds());
48679 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
48680 - (char __user *)lower_buf,
48681 + (char __force_user *)lower_buf,
48682 lower_bufsiz);
48683 set_fs(old_fs);
48684 if (rc < 0)
48685 @@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48686 }
48687 old_fs = get_fs();
48688 set_fs(get_ds());
48689 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
48690 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
48691 set_fs(old_fs);
48692 if (rc < 0)
48693 goto out_free;
48694 diff --git a/fs/exec.c b/fs/exec.c
48695 index 86fafc6..6272c0e 100644
48696 --- a/fs/exec.c
48697 +++ b/fs/exec.c
48698 @@ -56,12 +56,28 @@
48699 #include <linux/fsnotify.h>
48700 #include <linux/fs_struct.h>
48701 #include <linux/pipe_fs_i.h>
48702 +#include <linux/random.h>
48703 +#include <linux/seq_file.h>
48704 +
48705 +#ifdef CONFIG_PAX_REFCOUNT
48706 +#include <linux/kallsyms.h>
48707 +#include <linux/kdebug.h>
48708 +#endif
48709
48710 #include <asm/uaccess.h>
48711 #include <asm/mmu_context.h>
48712 #include <asm/tlb.h>
48713 #include "internal.h"
48714
48715 +#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
48716 +void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
48717 +#endif
48718 +
48719 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
48720 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
48721 +EXPORT_SYMBOL(pax_set_initial_flags_func);
48722 +#endif
48723 +
48724 int core_uses_pid;
48725 char core_pattern[CORENAME_MAX_SIZE] = "core";
48726 unsigned int core_pipe_limit;
48727 @@ -178,18 +194,10 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
48728 int write)
48729 {
48730 struct page *page;
48731 - int ret;
48732
48733 -#ifdef CONFIG_STACK_GROWSUP
48734 - if (write) {
48735 - ret = expand_stack_downwards(bprm->vma, pos);
48736 - if (ret < 0)
48737 - return NULL;
48738 - }
48739 -#endif
48740 - ret = get_user_pages(current, bprm->mm, pos,
48741 - 1, write, 1, &page, NULL);
48742 - if (ret <= 0)
48743 + if (0 > expand_stack_downwards(bprm->vma, pos))
48744 + return NULL;
48745 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
48746 return NULL;
48747
48748 if (write) {
48749 @@ -205,6 +213,17 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
48750 if (size <= ARG_MAX)
48751 return page;
48752
48753 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48754 + // only allow 1MB for argv+env on suid/sgid binaries
48755 + // to prevent easy ASLR exhaustion
48756 + if (((bprm->cred->euid != current_euid()) ||
48757 + (bprm->cred->egid != current_egid())) &&
48758 + (size > (1024 * 1024))) {
48759 + put_page(page);
48760 + return NULL;
48761 + }
48762 +#endif
48763 +
48764 /*
48765 * Limit to 1/4-th the stack size for the argv+env strings.
48766 * This ensures that:
48767 @@ -263,6 +282,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
48768 vma->vm_end = STACK_TOP_MAX;
48769 vma->vm_start = vma->vm_end - PAGE_SIZE;
48770 vma->vm_flags = VM_STACK_FLAGS;
48771 +
48772 +#ifdef CONFIG_PAX_SEGMEXEC
48773 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
48774 +#endif
48775 +
48776 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
48777
48778 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
48779 @@ -276,6 +300,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
48780 mm->stack_vm = mm->total_vm = 1;
48781 up_write(&mm->mmap_sem);
48782 bprm->p = vma->vm_end - sizeof(void *);
48783 +
48784 +#ifdef CONFIG_PAX_RANDUSTACK
48785 + if (randomize_va_space)
48786 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
48787 +#endif
48788 +
48789 return 0;
48790 err:
48791 up_write(&mm->mmap_sem);
48792 @@ -510,7 +540,7 @@ int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
48793 int r;
48794 mm_segment_t oldfs = get_fs();
48795 set_fs(KERNEL_DS);
48796 - r = copy_strings(argc, (char __user * __user *)argv, bprm);
48797 + r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
48798 set_fs(oldfs);
48799 return r;
48800 }
48801 @@ -540,7 +570,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
48802 unsigned long new_end = old_end - shift;
48803 struct mmu_gather *tlb;
48804
48805 - BUG_ON(new_start > new_end);
48806 + if (new_start >= new_end || new_start < mmap_min_addr)
48807 + return -ENOMEM;
48808
48809 /*
48810 * ensure there are no vmas between where we want to go
48811 @@ -549,6 +580,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
48812 if (vma != find_vma(mm, new_start))
48813 return -EFAULT;
48814
48815 +#ifdef CONFIG_PAX_SEGMEXEC
48816 + BUG_ON(pax_find_mirror_vma(vma));
48817 +#endif
48818 +
48819 /*
48820 * cover the whole range: [new_start, old_end)
48821 */
48822 @@ -630,10 +665,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
48823 stack_top = arch_align_stack(stack_top);
48824 stack_top = PAGE_ALIGN(stack_top);
48825
48826 - if (unlikely(stack_top < mmap_min_addr) ||
48827 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
48828 - return -ENOMEM;
48829 -
48830 stack_shift = vma->vm_end - stack_top;
48831
48832 bprm->p -= stack_shift;
48833 @@ -645,6 +676,14 @@ int setup_arg_pages(struct linux_binprm *bprm,
48834 bprm->exec -= stack_shift;
48835
48836 down_write(&mm->mmap_sem);
48837 +
48838 + /* Move stack pages down in memory. */
48839 + if (stack_shift) {
48840 + ret = shift_arg_pages(vma, stack_shift);
48841 + if (ret)
48842 + goto out_unlock;
48843 + }
48844 +
48845 vm_flags = VM_STACK_FLAGS;
48846
48847 /*
48848 @@ -658,19 +697,24 @@ int setup_arg_pages(struct linux_binprm *bprm,
48849 vm_flags &= ~VM_EXEC;
48850 vm_flags |= mm->def_flags;
48851
48852 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
48853 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
48854 + vm_flags &= ~VM_EXEC;
48855 +
48856 +#ifdef CONFIG_PAX_MPROTECT
48857 + if (mm->pax_flags & MF_PAX_MPROTECT)
48858 + vm_flags &= ~VM_MAYEXEC;
48859 +#endif
48860 +
48861 + }
48862 +#endif
48863 +
48864 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
48865 vm_flags);
48866 if (ret)
48867 goto out_unlock;
48868 BUG_ON(prev != vma);
48869
48870 - /* Move stack pages down in memory. */
48871 - if (stack_shift) {
48872 - ret = shift_arg_pages(vma, stack_shift);
48873 - if (ret)
48874 - goto out_unlock;
48875 - }
48876 -
48877 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
48878 stack_size = vma->vm_end - vma->vm_start;
48879 /*
48880 @@ -744,7 +788,7 @@ int kernel_read(struct file *file, loff_t offset,
48881 old_fs = get_fs();
48882 set_fs(get_ds());
48883 /* The cast to a user pointer is valid due to the set_fs() */
48884 - result = vfs_read(file, (void __user *)addr, count, &pos);
48885 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
48886 set_fs(old_fs);
48887 return result;
48888 }
48889 @@ -985,6 +1029,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
48890 perf_event_comm(tsk);
48891 }
48892
48893 +static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
48894 +{
48895 + int i, ch;
48896 +
48897 + /* Copies the binary name from after last slash */
48898 + for (i = 0; (ch = *(fn++)) != '\0';) {
48899 + if (ch == '/')
48900 + i = 0; /* overwrite what we wrote */
48901 + else
48902 + if (i < len - 1)
48903 + tcomm[i++] = ch;
48904 + }
48905 + tcomm[i] = '\0';
48906 +}
48907 +
48908 int flush_old_exec(struct linux_binprm * bprm)
48909 {
48910 int retval;
48911 @@ -999,6 +1058,7 @@ int flush_old_exec(struct linux_binprm * bprm)
48912
48913 set_mm_exe_file(bprm->mm, bprm->file);
48914
48915 + filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
48916 /*
48917 * Release all of the old mmap stuff
48918 */
48919 @@ -1023,10 +1083,6 @@ EXPORT_SYMBOL(flush_old_exec);
48920
48921 void setup_new_exec(struct linux_binprm * bprm)
48922 {
48923 - int i, ch;
48924 - char * name;
48925 - char tcomm[sizeof(current->comm)];
48926 -
48927 arch_pick_mmap_layout(current->mm);
48928
48929 /* This is the point of no return */
48930 @@ -1037,18 +1093,7 @@ void setup_new_exec(struct linux_binprm * bprm)
48931 else
48932 set_dumpable(current->mm, suid_dumpable);
48933
48934 - name = bprm->filename;
48935 -
48936 - /* Copies the binary name from after last slash */
48937 - for (i=0; (ch = *(name++)) != '\0';) {
48938 - if (ch == '/')
48939 - i = 0; /* overwrite what we wrote */
48940 - else
48941 - if (i < (sizeof(tcomm) - 1))
48942 - tcomm[i++] = ch;
48943 - }
48944 - tcomm[i] = '\0';
48945 - set_task_comm(current, tcomm);
48946 + set_task_comm(current, bprm->tcomm);
48947
48948 /* Set the new mm task size. We have to do that late because it may
48949 * depend on TIF_32BIT which is only updated in flush_thread() on
48950 @@ -1152,7 +1197,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
48951 }
48952 rcu_read_unlock();
48953
48954 - if (p->fs->users > n_fs) {
48955 + if (atomic_read(&p->fs->users) > n_fs) {
48956 bprm->unsafe |= LSM_UNSAFE_SHARE;
48957 } else {
48958 res = -EAGAIN;
48959 @@ -1339,6 +1384,10 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
48960
48961 EXPORT_SYMBOL(search_binary_handler);
48962
48963 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48964 +atomic64_unchecked_t global_exec_counter = ATOMIC64_INIT(0);
48965 +#endif
48966 +
48967 /*
48968 * sys_execve() executes a new program.
48969 */
48970 @@ -1347,11 +1396,35 @@ int do_execve(char * filename,
48971 char __user *__user *envp,
48972 struct pt_regs * regs)
48973 {
48974 +#ifdef CONFIG_GRKERNSEC
48975 + struct file *old_exec_file;
48976 + struct acl_subject_label *old_acl;
48977 + struct rlimit old_rlim[RLIM_NLIMITS];
48978 +#endif
48979 struct linux_binprm *bprm;
48980 struct file *file;
48981 struct files_struct *displaced;
48982 bool clear_in_exec;
48983 int retval;
48984 + const struct cred *cred = current_cred();
48985 +
48986 + /*
48987 + * We move the actual failure in case of RLIMIT_NPROC excess from
48988 + * set*uid() to execve() because too many poorly written programs
48989 + * don't check setuid() return code. Here we additionally recheck
48990 + * whether NPROC limit is still exceeded.
48991 + */
48992 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
48993 +
48994 + if ((current->flags & PF_NPROC_EXCEEDED) &&
48995 + atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
48996 + retval = -EAGAIN;
48997 + goto out_ret;
48998 + }
48999 +
49000 + /* We're below the limit (still or again), so we don't want to make
49001 + * further execve() calls fail. */
49002 + current->flags &= ~PF_NPROC_EXCEEDED;
49003
49004 retval = unshare_files(&displaced);
49005 if (retval)
49006 @@ -1377,12 +1450,27 @@ int do_execve(char * filename,
49007 if (IS_ERR(file))
49008 goto out_unmark;
49009
49010 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
49011 + retval = -EPERM;
49012 + goto out_file;
49013 + }
49014 +
49015 sched_exec();
49016
49017 bprm->file = file;
49018 bprm->filename = filename;
49019 bprm->interp = filename;
49020
49021 + if (gr_process_user_ban()) {
49022 + retval = -EPERM;
49023 + goto out_file;
49024 + }
49025 +
49026 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
49027 + retval = -EACCES;
49028 + goto out_file;
49029 + }
49030 +
49031 retval = bprm_mm_init(bprm);
49032 if (retval)
49033 goto out_file;
49034 @@ -1412,12 +1500,47 @@ int do_execve(char * filename,
49035 if (retval < 0)
49036 goto out;
49037
49038 + if (!gr_tpe_allow(file)) {
49039 + retval = -EACCES;
49040 + goto out;
49041 + }
49042 +
49043 + if (gr_check_crash_exec(file)) {
49044 + retval = -EACCES;
49045 + goto out;
49046 + }
49047 +
49048 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
49049 +
49050 + gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
49051 +
49052 +#ifdef CONFIG_GRKERNSEC
49053 + old_acl = current->acl;
49054 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
49055 + old_exec_file = current->exec_file;
49056 + get_file(file);
49057 + current->exec_file = file;
49058 +#endif
49059 +
49060 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
49061 + bprm->unsafe);
49062 + if (retval < 0)
49063 + goto out_fail;
49064 +
49065 current->flags &= ~PF_KTHREAD;
49066 retval = search_binary_handler(bprm,regs);
49067 if (retval < 0)
49068 - goto out;
49069 + goto out_fail;
49070 +#ifdef CONFIG_GRKERNSEC
49071 + if (old_exec_file)
49072 + fput(old_exec_file);
49073 +#endif
49074
49075 /* execve succeeded */
49076 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49077 + current->exec_id = atomic64_inc_return_unchecked(&global_exec_counter);
49078 +#endif
49079 +
49080 current->fs->in_exec = 0;
49081 current->in_execve = 0;
49082 acct_update_integrals(current);
49083 @@ -1426,6 +1549,14 @@ int do_execve(char * filename,
49084 put_files_struct(displaced);
49085 return retval;
49086
49087 +out_fail:
49088 +#ifdef CONFIG_GRKERNSEC
49089 + current->acl = old_acl;
49090 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
49091 + fput(current->exec_file);
49092 + current->exec_file = old_exec_file;
49093 +#endif
49094 +
49095 out:
49096 if (bprm->mm) {
49097 acct_arg_size(bprm, 0);
49098 @@ -1591,6 +1722,220 @@ out:
49099 return ispipe;
49100 }
49101
49102 +int pax_check_flags(unsigned long *flags)
49103 +{
49104 + int retval = 0;
49105 +
49106 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
49107 + if (*flags & MF_PAX_SEGMEXEC)
49108 + {
49109 + *flags &= ~MF_PAX_SEGMEXEC;
49110 + retval = -EINVAL;
49111 + }
49112 +#endif
49113 +
49114 + if ((*flags & MF_PAX_PAGEEXEC)
49115 +
49116 +#ifdef CONFIG_PAX_PAGEEXEC
49117 + && (*flags & MF_PAX_SEGMEXEC)
49118 +#endif
49119 +
49120 + )
49121 + {
49122 + *flags &= ~MF_PAX_PAGEEXEC;
49123 + retval = -EINVAL;
49124 + }
49125 +
49126 + if ((*flags & MF_PAX_MPROTECT)
49127 +
49128 +#ifdef CONFIG_PAX_MPROTECT
49129 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
49130 +#endif
49131 +
49132 + )
49133 + {
49134 + *flags &= ~MF_PAX_MPROTECT;
49135 + retval = -EINVAL;
49136 + }
49137 +
49138 + if ((*flags & MF_PAX_EMUTRAMP)
49139 +
49140 +#ifdef CONFIG_PAX_EMUTRAMP
49141 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
49142 +#endif
49143 +
49144 + )
49145 + {
49146 + *flags &= ~MF_PAX_EMUTRAMP;
49147 + retval = -EINVAL;
49148 + }
49149 +
49150 + return retval;
49151 +}
49152 +
49153 +EXPORT_SYMBOL(pax_check_flags);
49154 +
49155 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
49156 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
49157 +{
49158 + struct task_struct *tsk = current;
49159 + struct mm_struct *mm = current->mm;
49160 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
49161 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
49162 + char *path_exec = NULL;
49163 + char *path_fault = NULL;
49164 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
49165 +
49166 + if (buffer_exec && buffer_fault) {
49167 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
49168 +
49169 + down_read(&mm->mmap_sem);
49170 + vma = mm->mmap;
49171 + while (vma && (!vma_exec || !vma_fault)) {
49172 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
49173 + vma_exec = vma;
49174 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
49175 + vma_fault = vma;
49176 + vma = vma->vm_next;
49177 + }
49178 + if (vma_exec) {
49179 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
49180 + if (IS_ERR(path_exec))
49181 + path_exec = "<path too long>";
49182 + else {
49183 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
49184 + if (path_exec) {
49185 + *path_exec = 0;
49186 + path_exec = buffer_exec;
49187 + } else
49188 + path_exec = "<path too long>";
49189 + }
49190 + }
49191 + if (vma_fault) {
49192 + start = vma_fault->vm_start;
49193 + end = vma_fault->vm_end;
49194 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
49195 + if (vma_fault->vm_file) {
49196 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
49197 + if (IS_ERR(path_fault))
49198 + path_fault = "<path too long>";
49199 + else {
49200 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
49201 + if (path_fault) {
49202 + *path_fault = 0;
49203 + path_fault = buffer_fault;
49204 + } else
49205 + path_fault = "<path too long>";
49206 + }
49207 + } else
49208 + path_fault = "<anonymous mapping>";
49209 + }
49210 + up_read(&mm->mmap_sem);
49211 + }
49212 + if (tsk->signal->curr_ip)
49213 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
49214 + else
49215 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
49216 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
49217 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
49218 + task_uid(tsk), task_euid(tsk), pc, sp);
49219 + free_page((unsigned long)buffer_exec);
49220 + free_page((unsigned long)buffer_fault);
49221 + pax_report_insns(regs, pc, sp);
49222 + do_coredump(SIGKILL, SIGKILL, regs);
49223 +}
49224 +#endif
49225 +
49226 +#ifdef CONFIG_PAX_REFCOUNT
49227 +void pax_report_refcount_overflow(struct pt_regs *regs)
49228 +{
49229 + if (current->signal->curr_ip)
49230 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
49231 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
49232 + else
49233 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
49234 + current->comm, task_pid_nr(current), current_uid(), current_euid());
49235 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
49236 + show_regs(regs);
49237 + force_sig_specific(SIGKILL, current);
49238 +}
49239 +#endif
49240 +
49241 +#ifdef CONFIG_PAX_USERCOPY
49242 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
49243 +int object_is_on_stack(const void *obj, unsigned long len)
49244 +{
49245 + const void * const stack = task_stack_page(current);
49246 + const void * const stackend = stack + THREAD_SIZE;
49247 +
49248 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
49249 + const void *frame = NULL;
49250 + const void *oldframe;
49251 +#endif
49252 +
49253 + if (obj + len < obj)
49254 + return -1;
49255 +
49256 + if (obj + len <= stack || stackend <= obj)
49257 + return 0;
49258 +
49259 + if (obj < stack || stackend < obj + len)
49260 + return -1;
49261 +
49262 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
49263 + oldframe = __builtin_frame_address(1);
49264 + if (oldframe)
49265 + frame = __builtin_frame_address(2);
49266 + /*
49267 + low ----------------------------------------------> high
49268 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
49269 + ^----------------^
49270 + allow copies only within here
49271 + */
49272 + while (stack <= frame && frame < stackend) {
49273 + /* if obj + len extends past the last frame, this
49274 + check won't pass and the next frame will be 0,
49275 + causing us to bail out and correctly report
49276 + the copy as invalid
49277 + */
49278 + if (obj + len <= frame)
49279 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
49280 + oldframe = frame;
49281 + frame = *(const void * const *)frame;
49282 + }
49283 + return -1;
49284 +#else
49285 + return 1;
49286 +#endif
49287 +}
49288 +
49289 +
49290 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
49291 +{
49292 + if (current->signal->curr_ip)
49293 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
49294 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
49295 + else
49296 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
49297 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
49298 +
49299 + dump_stack();
49300 + gr_handle_kernel_exploit();
49301 + do_group_exit(SIGKILL);
49302 +}
49303 +#endif
49304 +
49305 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
49306 +void pax_track_stack(void)
49307 +{
49308 + unsigned long sp = (unsigned long)&sp;
49309 + if (sp < current_thread_info()->lowest_stack &&
49310 + sp > (unsigned long)task_stack_page(current))
49311 + current_thread_info()->lowest_stack = sp;
49312 +}
49313 +EXPORT_SYMBOL(pax_track_stack);
49314 +#endif
49315 +
49316 static int zap_process(struct task_struct *start)
49317 {
49318 struct task_struct *t;
49319 @@ -1793,17 +2138,17 @@ static void wait_for_dump_helpers(struct file *file)
49320 pipe = file->f_path.dentry->d_inode->i_pipe;
49321
49322 pipe_lock(pipe);
49323 - pipe->readers++;
49324 - pipe->writers--;
49325 + atomic_inc(&pipe->readers);
49326 + atomic_dec(&pipe->writers);
49327
49328 - while ((pipe->readers > 1) && (!signal_pending(current))) {
49329 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
49330 wake_up_interruptible_sync(&pipe->wait);
49331 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
49332 pipe_wait(pipe);
49333 }
49334
49335 - pipe->readers--;
49336 - pipe->writers++;
49337 + atomic_dec(&pipe->readers);
49338 + atomic_inc(&pipe->writers);
49339 pipe_unlock(pipe);
49340
49341 }
49342 @@ -1826,10 +2171,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
49343 char **helper_argv = NULL;
49344 int helper_argc = 0;
49345 int dump_count = 0;
49346 - static atomic_t core_dump_count = ATOMIC_INIT(0);
49347 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
49348
49349 audit_core_dumps(signr);
49350
49351 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
49352 + gr_handle_brute_attach(current, mm->flags);
49353 +
49354 binfmt = mm->binfmt;
49355 if (!binfmt || !binfmt->core_dump)
49356 goto fail;
49357 @@ -1874,6 +2222,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
49358 */
49359 clear_thread_flag(TIF_SIGPENDING);
49360
49361 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
49362 +
49363 /*
49364 * lock_kernel() because format_corename() is controlled by sysctl, which
49365 * uses lock_kernel()
49366 @@ -1908,7 +2258,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
49367 goto fail_unlock;
49368 }
49369
49370 - dump_count = atomic_inc_return(&core_dump_count);
49371 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
49372 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
49373 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
49374 task_tgid_vnr(current), current->comm);
49375 @@ -1972,7 +2322,7 @@ close_fail:
49376 filp_close(file, NULL);
49377 fail_dropcount:
49378 if (dump_count)
49379 - atomic_dec(&core_dump_count);
49380 + atomic_dec_unchecked(&core_dump_count);
49381 fail_unlock:
49382 if (helper_argv)
49383 argv_free(helper_argv);
49384 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
49385 index 7f8d2e5..a1abdbb 100644
49386 --- a/fs/ext2/balloc.c
49387 +++ b/fs/ext2/balloc.c
49388 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
49389
49390 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
49391 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
49392 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
49393 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
49394 sbi->s_resuid != current_fsuid() &&
49395 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
49396 return 0;
49397 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
49398 index 27967f9..9f2a5fb 100644
49399 --- a/fs/ext3/balloc.c
49400 +++ b/fs/ext3/balloc.c
49401 @@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
49402
49403 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
49404 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
49405 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
49406 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
49407 sbi->s_resuid != current_fsuid() &&
49408 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
49409 return 0;
49410 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
49411 index e85b63c..80398e6 100644
49412 --- a/fs/ext4/balloc.c
49413 +++ b/fs/ext4/balloc.c
49414 @@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
49415 /* Hm, nope. Are (enough) root reserved blocks available? */
49416 if (sbi->s_resuid == current_fsuid() ||
49417 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
49418 - capable(CAP_SYS_RESOURCE)) {
49419 + capable_nolog(CAP_SYS_RESOURCE)) {
49420 if (free_blocks >= (nblocks + dirty_blocks))
49421 return 1;
49422 }
49423 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
49424 index 67c46ed..1f237e5 100644
49425 --- a/fs/ext4/ext4.h
49426 +++ b/fs/ext4/ext4.h
49427 @@ -1077,19 +1077,19 @@ struct ext4_sb_info {
49428
49429 /* stats for buddy allocator */
49430 spinlock_t s_mb_pa_lock;
49431 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
49432 - atomic_t s_bal_success; /* we found long enough chunks */
49433 - atomic_t s_bal_allocated; /* in blocks */
49434 - atomic_t s_bal_ex_scanned; /* total extents scanned */
49435 - atomic_t s_bal_goals; /* goal hits */
49436 - atomic_t s_bal_breaks; /* too long searches */
49437 - atomic_t s_bal_2orders; /* 2^order hits */
49438 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
49439 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
49440 + atomic_unchecked_t s_bal_allocated; /* in blocks */
49441 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
49442 + atomic_unchecked_t s_bal_goals; /* goal hits */
49443 + atomic_unchecked_t s_bal_breaks; /* too long searches */
49444 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
49445 spinlock_t s_bal_lock;
49446 unsigned long s_mb_buddies_generated;
49447 unsigned long long s_mb_generation_time;
49448 - atomic_t s_mb_lost_chunks;
49449 - atomic_t s_mb_preallocated;
49450 - atomic_t s_mb_discarded;
49451 + atomic_unchecked_t s_mb_lost_chunks;
49452 + atomic_unchecked_t s_mb_preallocated;
49453 + atomic_unchecked_t s_mb_discarded;
49454 atomic_t s_lock_busy;
49455
49456 /* locality groups */
49457 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
49458 index 2a60541..7439d61 100644
49459 --- a/fs/ext4/file.c
49460 +++ b/fs/ext4/file.c
49461 @@ -122,8 +122,8 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
49462 cp = d_path(&path, buf, sizeof(buf));
49463 path_put(&path);
49464 if (!IS_ERR(cp)) {
49465 - memcpy(sbi->s_es->s_last_mounted, cp,
49466 - sizeof(sbi->s_es->s_last_mounted));
49467 + strlcpy(sbi->s_es->s_last_mounted, cp,
49468 + sizeof(sbi->s_es->s_last_mounted));
49469 sb->s_dirt = 1;
49470 }
49471 }
49472 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
49473 index 42bac1b..0aab9d8 100644
49474 --- a/fs/ext4/mballoc.c
49475 +++ b/fs/ext4/mballoc.c
49476 @@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
49477 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
49478
49479 if (EXT4_SB(sb)->s_mb_stats)
49480 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
49481 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
49482
49483 break;
49484 }
49485 @@ -2131,7 +2131,7 @@ repeat:
49486 ac->ac_status = AC_STATUS_CONTINUE;
49487 ac->ac_flags |= EXT4_MB_HINT_FIRST;
49488 cr = 3;
49489 - atomic_inc(&sbi->s_mb_lost_chunks);
49490 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
49491 goto repeat;
49492 }
49493 }
49494 @@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
49495 ext4_grpblk_t counters[16];
49496 } sg;
49497
49498 + pax_track_stack();
49499 +
49500 group--;
49501 if (group == 0)
49502 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
49503 @@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *sb)
49504 if (sbi->s_mb_stats) {
49505 printk(KERN_INFO
49506 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
49507 - atomic_read(&sbi->s_bal_allocated),
49508 - atomic_read(&sbi->s_bal_reqs),
49509 - atomic_read(&sbi->s_bal_success));
49510 + atomic_read_unchecked(&sbi->s_bal_allocated),
49511 + atomic_read_unchecked(&sbi->s_bal_reqs),
49512 + atomic_read_unchecked(&sbi->s_bal_success));
49513 printk(KERN_INFO
49514 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
49515 "%u 2^N hits, %u breaks, %u lost\n",
49516 - atomic_read(&sbi->s_bal_ex_scanned),
49517 - atomic_read(&sbi->s_bal_goals),
49518 - atomic_read(&sbi->s_bal_2orders),
49519 - atomic_read(&sbi->s_bal_breaks),
49520 - atomic_read(&sbi->s_mb_lost_chunks));
49521 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
49522 + atomic_read_unchecked(&sbi->s_bal_goals),
49523 + atomic_read_unchecked(&sbi->s_bal_2orders),
49524 + atomic_read_unchecked(&sbi->s_bal_breaks),
49525 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
49526 printk(KERN_INFO
49527 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
49528 sbi->s_mb_buddies_generated++,
49529 sbi->s_mb_generation_time);
49530 printk(KERN_INFO
49531 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
49532 - atomic_read(&sbi->s_mb_preallocated),
49533 - atomic_read(&sbi->s_mb_discarded));
49534 + atomic_read_unchecked(&sbi->s_mb_preallocated),
49535 + atomic_read_unchecked(&sbi->s_mb_discarded));
49536 }
49537
49538 free_percpu(sbi->s_locality_groups);
49539 @@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
49540 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
49541
49542 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
49543 - atomic_inc(&sbi->s_bal_reqs);
49544 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
49545 + atomic_inc_unchecked(&sbi->s_bal_reqs);
49546 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
49547 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
49548 - atomic_inc(&sbi->s_bal_success);
49549 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
49550 + atomic_inc_unchecked(&sbi->s_bal_success);
49551 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
49552 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
49553 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
49554 - atomic_inc(&sbi->s_bal_goals);
49555 + atomic_inc_unchecked(&sbi->s_bal_goals);
49556 if (ac->ac_found > sbi->s_mb_max_to_scan)
49557 - atomic_inc(&sbi->s_bal_breaks);
49558 + atomic_inc_unchecked(&sbi->s_bal_breaks);
49559 }
49560
49561 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
49562 @@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
49563 trace_ext4_mb_new_inode_pa(ac, pa);
49564
49565 ext4_mb_use_inode_pa(ac, pa);
49566 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49567 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49568
49569 ei = EXT4_I(ac->ac_inode);
49570 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
49571 @@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
49572 trace_ext4_mb_new_group_pa(ac, pa);
49573
49574 ext4_mb_use_group_pa(ac, pa);
49575 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49576 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49577
49578 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
49579 lg = ac->ac_lg;
49580 @@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
49581 * from the bitmap and continue.
49582 */
49583 }
49584 - atomic_add(free, &sbi->s_mb_discarded);
49585 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
49586
49587 return err;
49588 }
49589 @@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
49590 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
49591 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
49592 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
49593 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
49594 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
49595
49596 if (ac) {
49597 ac->ac_sb = sb;
49598 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
49599 index f1e7077..edd86b2 100644
49600 --- a/fs/ext4/super.c
49601 +++ b/fs/ext4/super.c
49602 @@ -2286,7 +2286,7 @@ static void ext4_sb_release(struct kobject *kobj)
49603 }
49604
49605
49606 -static struct sysfs_ops ext4_attr_ops = {
49607 +static const struct sysfs_ops ext4_attr_ops = {
49608 .show = ext4_attr_show,
49609 .store = ext4_attr_store,
49610 };
49611 diff --git a/fs/fcntl.c b/fs/fcntl.c
49612 index 97e01dc..e9aab2d 100644
49613 --- a/fs/fcntl.c
49614 +++ b/fs/fcntl.c
49615 @@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
49616 if (err)
49617 return err;
49618
49619 + if (gr_handle_chroot_fowner(pid, type))
49620 + return -ENOENT;
49621 + if (gr_check_protected_task_fowner(pid, type))
49622 + return -EACCES;
49623 +
49624 f_modown(filp, pid, type, force);
49625 return 0;
49626 }
49627 @@ -265,7 +270,7 @@ pid_t f_getown(struct file *filp)
49628
49629 static int f_setown_ex(struct file *filp, unsigned long arg)
49630 {
49631 - struct f_owner_ex * __user owner_p = (void * __user)arg;
49632 + struct f_owner_ex __user *owner_p = (void __user *)arg;
49633 struct f_owner_ex owner;
49634 struct pid *pid;
49635 int type;
49636 @@ -305,7 +310,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
49637
49638 static int f_getown_ex(struct file *filp, unsigned long arg)
49639 {
49640 - struct f_owner_ex * __user owner_p = (void * __user)arg;
49641 + struct f_owner_ex __user *owner_p = (void __user *)arg;
49642 struct f_owner_ex owner;
49643 int ret = 0;
49644
49645 @@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
49646 switch (cmd) {
49647 case F_DUPFD:
49648 case F_DUPFD_CLOEXEC:
49649 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
49650 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
49651 break;
49652 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
49653 diff --git a/fs/fifo.c b/fs/fifo.c
49654 index f8f97b8..b1f2259 100644
49655 --- a/fs/fifo.c
49656 +++ b/fs/fifo.c
49657 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
49658 */
49659 filp->f_op = &read_pipefifo_fops;
49660 pipe->r_counter++;
49661 - if (pipe->readers++ == 0)
49662 + if (atomic_inc_return(&pipe->readers) == 1)
49663 wake_up_partner(inode);
49664
49665 - if (!pipe->writers) {
49666 + if (!atomic_read(&pipe->writers)) {
49667 if ((filp->f_flags & O_NONBLOCK)) {
49668 /* suppress POLLHUP until we have
49669 * seen a writer */
49670 @@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
49671 * errno=ENXIO when there is no process reading the FIFO.
49672 */
49673 ret = -ENXIO;
49674 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
49675 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
49676 goto err;
49677
49678 filp->f_op = &write_pipefifo_fops;
49679 pipe->w_counter++;
49680 - if (!pipe->writers++)
49681 + if (atomic_inc_return(&pipe->writers) == 1)
49682 wake_up_partner(inode);
49683
49684 - if (!pipe->readers) {
49685 + if (!atomic_read(&pipe->readers)) {
49686 wait_for_partner(inode, &pipe->r_counter);
49687 if (signal_pending(current))
49688 goto err_wr;
49689 @@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
49690 */
49691 filp->f_op = &rdwr_pipefifo_fops;
49692
49693 - pipe->readers++;
49694 - pipe->writers++;
49695 + atomic_inc(&pipe->readers);
49696 + atomic_inc(&pipe->writers);
49697 pipe->r_counter++;
49698 pipe->w_counter++;
49699 - if (pipe->readers == 1 || pipe->writers == 1)
49700 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
49701 wake_up_partner(inode);
49702 break;
49703
49704 @@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
49705 return 0;
49706
49707 err_rd:
49708 - if (!--pipe->readers)
49709 + if (atomic_dec_and_test(&pipe->readers))
49710 wake_up_interruptible(&pipe->wait);
49711 ret = -ERESTARTSYS;
49712 goto err;
49713
49714 err_wr:
49715 - if (!--pipe->writers)
49716 + if (atomic_dec_and_test(&pipe->writers))
49717 wake_up_interruptible(&pipe->wait);
49718 ret = -ERESTARTSYS;
49719 goto err;
49720
49721 err:
49722 - if (!pipe->readers && !pipe->writers)
49723 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
49724 free_pipe_info(inode);
49725
49726 err_nocleanup:
49727 diff --git a/fs/file.c b/fs/file.c
49728 index 87e1290..a930cc4 100644
49729 --- a/fs/file.c
49730 +++ b/fs/file.c
49731 @@ -14,6 +14,7 @@
49732 #include <linux/slab.h>
49733 #include <linux/vmalloc.h>
49734 #include <linux/file.h>
49735 +#include <linux/security.h>
49736 #include <linux/fdtable.h>
49737 #include <linux/bitops.h>
49738 #include <linux/interrupt.h>
49739 @@ -257,6 +258,8 @@ int expand_files(struct files_struct *files, int nr)
49740 * N.B. For clone tasks sharing a files structure, this test
49741 * will limit the total number of files that can be opened.
49742 */
49743 +
49744 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
49745 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
49746 return -EMFILE;
49747
49748 diff --git a/fs/filesystems.c b/fs/filesystems.c
49749 index a24c58e..53f91ee 100644
49750 --- a/fs/filesystems.c
49751 +++ b/fs/filesystems.c
49752 @@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(const char *name)
49753 int len = dot ? dot - name : strlen(name);
49754
49755 fs = __get_fs_type(name, len);
49756 +
49757 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
49758 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
49759 +#else
49760 if (!fs && (request_module("%.*s", len, name) == 0))
49761 +#endif
49762 fs = __get_fs_type(name, len);
49763
49764 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
49765 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
49766 index eee0590..1181166 100644
49767 --- a/fs/fs_struct.c
49768 +++ b/fs/fs_struct.c
49769 @@ -4,6 +4,7 @@
49770 #include <linux/path.h>
49771 #include <linux/slab.h>
49772 #include <linux/fs_struct.h>
49773 +#include <linux/grsecurity.h>
49774
49775 /*
49776 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
49777 @@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
49778 old_root = fs->root;
49779 fs->root = *path;
49780 path_get(path);
49781 + gr_set_chroot_entries(current, path);
49782 write_unlock(&fs->lock);
49783 if (old_root.dentry)
49784 path_put(&old_root);
49785 @@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
49786 && fs->root.mnt == old_root->mnt) {
49787 path_get(new_root);
49788 fs->root = *new_root;
49789 + gr_set_chroot_entries(p, new_root);
49790 count++;
49791 }
49792 if (fs->pwd.dentry == old_root->dentry
49793 @@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
49794 task_lock(tsk);
49795 write_lock(&fs->lock);
49796 tsk->fs = NULL;
49797 - kill = !--fs->users;
49798 + gr_clear_chroot_entries(tsk);
49799 + kill = !atomic_dec_return(&fs->users);
49800 write_unlock(&fs->lock);
49801 task_unlock(tsk);
49802 if (kill)
49803 @@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
49804 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
49805 /* We don't need to lock fs - think why ;-) */
49806 if (fs) {
49807 - fs->users = 1;
49808 + atomic_set(&fs->users, 1);
49809 fs->in_exec = 0;
49810 rwlock_init(&fs->lock);
49811 fs->umask = old->umask;
49812 @@ -127,8 +131,9 @@ int unshare_fs_struct(void)
49813
49814 task_lock(current);
49815 write_lock(&fs->lock);
49816 - kill = !--fs->users;
49817 + kill = !atomic_dec_return(&fs->users);
49818 current->fs = new_fs;
49819 + gr_set_chroot_entries(current, &new_fs->root);
49820 write_unlock(&fs->lock);
49821 task_unlock(current);
49822
49823 @@ -141,13 +146,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
49824
49825 int current_umask(void)
49826 {
49827 - return current->fs->umask;
49828 + return current->fs->umask | gr_acl_umask();
49829 }
49830 EXPORT_SYMBOL(current_umask);
49831
49832 /* to be mentioned only in INIT_TASK */
49833 struct fs_struct init_fs = {
49834 - .users = 1,
49835 + .users = ATOMIC_INIT(1),
49836 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
49837 .umask = 0022,
49838 };
49839 @@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
49840 task_lock(current);
49841
49842 write_lock(&init_fs.lock);
49843 - init_fs.users++;
49844 + atomic_inc(&init_fs.users);
49845 write_unlock(&init_fs.lock);
49846
49847 write_lock(&fs->lock);
49848 current->fs = &init_fs;
49849 - kill = !--fs->users;
49850 + gr_set_chroot_entries(current, &current->fs->root);
49851 + kill = !atomic_dec_return(&fs->users);
49852 write_unlock(&fs->lock);
49853
49854 task_unlock(current);
49855 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
49856 index 9905350..02eaec4 100644
49857 --- a/fs/fscache/cookie.c
49858 +++ b/fs/fscache/cookie.c
49859 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
49860 parent ? (char *) parent->def->name : "<no-parent>",
49861 def->name, netfs_data);
49862
49863 - fscache_stat(&fscache_n_acquires);
49864 + fscache_stat_unchecked(&fscache_n_acquires);
49865
49866 /* if there's no parent cookie, then we don't create one here either */
49867 if (!parent) {
49868 - fscache_stat(&fscache_n_acquires_null);
49869 + fscache_stat_unchecked(&fscache_n_acquires_null);
49870 _leave(" [no parent]");
49871 return NULL;
49872 }
49873 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
49874 /* allocate and initialise a cookie */
49875 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
49876 if (!cookie) {
49877 - fscache_stat(&fscache_n_acquires_oom);
49878 + fscache_stat_unchecked(&fscache_n_acquires_oom);
49879 _leave(" [ENOMEM]");
49880 return NULL;
49881 }
49882 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
49883
49884 switch (cookie->def->type) {
49885 case FSCACHE_COOKIE_TYPE_INDEX:
49886 - fscache_stat(&fscache_n_cookie_index);
49887 + fscache_stat_unchecked(&fscache_n_cookie_index);
49888 break;
49889 case FSCACHE_COOKIE_TYPE_DATAFILE:
49890 - fscache_stat(&fscache_n_cookie_data);
49891 + fscache_stat_unchecked(&fscache_n_cookie_data);
49892 break;
49893 default:
49894 - fscache_stat(&fscache_n_cookie_special);
49895 + fscache_stat_unchecked(&fscache_n_cookie_special);
49896 break;
49897 }
49898
49899 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
49900 if (fscache_acquire_non_index_cookie(cookie) < 0) {
49901 atomic_dec(&parent->n_children);
49902 __fscache_cookie_put(cookie);
49903 - fscache_stat(&fscache_n_acquires_nobufs);
49904 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
49905 _leave(" = NULL");
49906 return NULL;
49907 }
49908 }
49909
49910 - fscache_stat(&fscache_n_acquires_ok);
49911 + fscache_stat_unchecked(&fscache_n_acquires_ok);
49912 _leave(" = %p", cookie);
49913 return cookie;
49914 }
49915 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
49916 cache = fscache_select_cache_for_object(cookie->parent);
49917 if (!cache) {
49918 up_read(&fscache_addremove_sem);
49919 - fscache_stat(&fscache_n_acquires_no_cache);
49920 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
49921 _leave(" = -ENOMEDIUM [no cache]");
49922 return -ENOMEDIUM;
49923 }
49924 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
49925 object = cache->ops->alloc_object(cache, cookie);
49926 fscache_stat_d(&fscache_n_cop_alloc_object);
49927 if (IS_ERR(object)) {
49928 - fscache_stat(&fscache_n_object_no_alloc);
49929 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
49930 ret = PTR_ERR(object);
49931 goto error;
49932 }
49933
49934 - fscache_stat(&fscache_n_object_alloc);
49935 + fscache_stat_unchecked(&fscache_n_object_alloc);
49936
49937 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
49938
49939 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
49940 struct fscache_object *object;
49941 struct hlist_node *_p;
49942
49943 - fscache_stat(&fscache_n_updates);
49944 + fscache_stat_unchecked(&fscache_n_updates);
49945
49946 if (!cookie) {
49947 - fscache_stat(&fscache_n_updates_null);
49948 + fscache_stat_unchecked(&fscache_n_updates_null);
49949 _leave(" [no cookie]");
49950 return;
49951 }
49952 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
49953 struct fscache_object *object;
49954 unsigned long event;
49955
49956 - fscache_stat(&fscache_n_relinquishes);
49957 + fscache_stat_unchecked(&fscache_n_relinquishes);
49958 if (retire)
49959 - fscache_stat(&fscache_n_relinquishes_retire);
49960 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
49961
49962 if (!cookie) {
49963 - fscache_stat(&fscache_n_relinquishes_null);
49964 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
49965 _leave(" [no cookie]");
49966 return;
49967 }
49968 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
49969
49970 /* wait for the cookie to finish being instantiated (or to fail) */
49971 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
49972 - fscache_stat(&fscache_n_relinquishes_waitcrt);
49973 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
49974 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
49975 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
49976 }
49977 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
49978 index edd7434..0725e66 100644
49979 --- a/fs/fscache/internal.h
49980 +++ b/fs/fscache/internal.h
49981 @@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
49982 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
49983 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
49984
49985 -extern atomic_t fscache_n_op_pend;
49986 -extern atomic_t fscache_n_op_run;
49987 -extern atomic_t fscache_n_op_enqueue;
49988 -extern atomic_t fscache_n_op_deferred_release;
49989 -extern atomic_t fscache_n_op_release;
49990 -extern atomic_t fscache_n_op_gc;
49991 -extern atomic_t fscache_n_op_cancelled;
49992 -extern atomic_t fscache_n_op_rejected;
49993 +extern atomic_unchecked_t fscache_n_op_pend;
49994 +extern atomic_unchecked_t fscache_n_op_run;
49995 +extern atomic_unchecked_t fscache_n_op_enqueue;
49996 +extern atomic_unchecked_t fscache_n_op_deferred_release;
49997 +extern atomic_unchecked_t fscache_n_op_release;
49998 +extern atomic_unchecked_t fscache_n_op_gc;
49999 +extern atomic_unchecked_t fscache_n_op_cancelled;
50000 +extern atomic_unchecked_t fscache_n_op_rejected;
50001
50002 -extern atomic_t fscache_n_attr_changed;
50003 -extern atomic_t fscache_n_attr_changed_ok;
50004 -extern atomic_t fscache_n_attr_changed_nobufs;
50005 -extern atomic_t fscache_n_attr_changed_nomem;
50006 -extern atomic_t fscache_n_attr_changed_calls;
50007 +extern atomic_unchecked_t fscache_n_attr_changed;
50008 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
50009 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
50010 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
50011 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
50012
50013 -extern atomic_t fscache_n_allocs;
50014 -extern atomic_t fscache_n_allocs_ok;
50015 -extern atomic_t fscache_n_allocs_wait;
50016 -extern atomic_t fscache_n_allocs_nobufs;
50017 -extern atomic_t fscache_n_allocs_intr;
50018 -extern atomic_t fscache_n_allocs_object_dead;
50019 -extern atomic_t fscache_n_alloc_ops;
50020 -extern atomic_t fscache_n_alloc_op_waits;
50021 +extern atomic_unchecked_t fscache_n_allocs;
50022 +extern atomic_unchecked_t fscache_n_allocs_ok;
50023 +extern atomic_unchecked_t fscache_n_allocs_wait;
50024 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
50025 +extern atomic_unchecked_t fscache_n_allocs_intr;
50026 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
50027 +extern atomic_unchecked_t fscache_n_alloc_ops;
50028 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
50029
50030 -extern atomic_t fscache_n_retrievals;
50031 -extern atomic_t fscache_n_retrievals_ok;
50032 -extern atomic_t fscache_n_retrievals_wait;
50033 -extern atomic_t fscache_n_retrievals_nodata;
50034 -extern atomic_t fscache_n_retrievals_nobufs;
50035 -extern atomic_t fscache_n_retrievals_intr;
50036 -extern atomic_t fscache_n_retrievals_nomem;
50037 -extern atomic_t fscache_n_retrievals_object_dead;
50038 -extern atomic_t fscache_n_retrieval_ops;
50039 -extern atomic_t fscache_n_retrieval_op_waits;
50040 +extern atomic_unchecked_t fscache_n_retrievals;
50041 +extern atomic_unchecked_t fscache_n_retrievals_ok;
50042 +extern atomic_unchecked_t fscache_n_retrievals_wait;
50043 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
50044 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
50045 +extern atomic_unchecked_t fscache_n_retrievals_intr;
50046 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
50047 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
50048 +extern atomic_unchecked_t fscache_n_retrieval_ops;
50049 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
50050
50051 -extern atomic_t fscache_n_stores;
50052 -extern atomic_t fscache_n_stores_ok;
50053 -extern atomic_t fscache_n_stores_again;
50054 -extern atomic_t fscache_n_stores_nobufs;
50055 -extern atomic_t fscache_n_stores_oom;
50056 -extern atomic_t fscache_n_store_ops;
50057 -extern atomic_t fscache_n_store_calls;
50058 -extern atomic_t fscache_n_store_pages;
50059 -extern atomic_t fscache_n_store_radix_deletes;
50060 -extern atomic_t fscache_n_store_pages_over_limit;
50061 +extern atomic_unchecked_t fscache_n_stores;
50062 +extern atomic_unchecked_t fscache_n_stores_ok;
50063 +extern atomic_unchecked_t fscache_n_stores_again;
50064 +extern atomic_unchecked_t fscache_n_stores_nobufs;
50065 +extern atomic_unchecked_t fscache_n_stores_oom;
50066 +extern atomic_unchecked_t fscache_n_store_ops;
50067 +extern atomic_unchecked_t fscache_n_store_calls;
50068 +extern atomic_unchecked_t fscache_n_store_pages;
50069 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
50070 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
50071
50072 -extern atomic_t fscache_n_store_vmscan_not_storing;
50073 -extern atomic_t fscache_n_store_vmscan_gone;
50074 -extern atomic_t fscache_n_store_vmscan_busy;
50075 -extern atomic_t fscache_n_store_vmscan_cancelled;
50076 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
50077 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
50078 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
50079 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
50080
50081 -extern atomic_t fscache_n_marks;
50082 -extern atomic_t fscache_n_uncaches;
50083 +extern atomic_unchecked_t fscache_n_marks;
50084 +extern atomic_unchecked_t fscache_n_uncaches;
50085
50086 -extern atomic_t fscache_n_acquires;
50087 -extern atomic_t fscache_n_acquires_null;
50088 -extern atomic_t fscache_n_acquires_no_cache;
50089 -extern atomic_t fscache_n_acquires_ok;
50090 -extern atomic_t fscache_n_acquires_nobufs;
50091 -extern atomic_t fscache_n_acquires_oom;
50092 +extern atomic_unchecked_t fscache_n_acquires;
50093 +extern atomic_unchecked_t fscache_n_acquires_null;
50094 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
50095 +extern atomic_unchecked_t fscache_n_acquires_ok;
50096 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
50097 +extern atomic_unchecked_t fscache_n_acquires_oom;
50098
50099 -extern atomic_t fscache_n_updates;
50100 -extern atomic_t fscache_n_updates_null;
50101 -extern atomic_t fscache_n_updates_run;
50102 +extern atomic_unchecked_t fscache_n_updates;
50103 +extern atomic_unchecked_t fscache_n_updates_null;
50104 +extern atomic_unchecked_t fscache_n_updates_run;
50105
50106 -extern atomic_t fscache_n_relinquishes;
50107 -extern atomic_t fscache_n_relinquishes_null;
50108 -extern atomic_t fscache_n_relinquishes_waitcrt;
50109 -extern atomic_t fscache_n_relinquishes_retire;
50110 +extern atomic_unchecked_t fscache_n_relinquishes;
50111 +extern atomic_unchecked_t fscache_n_relinquishes_null;
50112 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
50113 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
50114
50115 -extern atomic_t fscache_n_cookie_index;
50116 -extern atomic_t fscache_n_cookie_data;
50117 -extern atomic_t fscache_n_cookie_special;
50118 +extern atomic_unchecked_t fscache_n_cookie_index;
50119 +extern atomic_unchecked_t fscache_n_cookie_data;
50120 +extern atomic_unchecked_t fscache_n_cookie_special;
50121
50122 -extern atomic_t fscache_n_object_alloc;
50123 -extern atomic_t fscache_n_object_no_alloc;
50124 -extern atomic_t fscache_n_object_lookups;
50125 -extern atomic_t fscache_n_object_lookups_negative;
50126 -extern atomic_t fscache_n_object_lookups_positive;
50127 -extern atomic_t fscache_n_object_lookups_timed_out;
50128 -extern atomic_t fscache_n_object_created;
50129 -extern atomic_t fscache_n_object_avail;
50130 -extern atomic_t fscache_n_object_dead;
50131 +extern atomic_unchecked_t fscache_n_object_alloc;
50132 +extern atomic_unchecked_t fscache_n_object_no_alloc;
50133 +extern atomic_unchecked_t fscache_n_object_lookups;
50134 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
50135 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
50136 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
50137 +extern atomic_unchecked_t fscache_n_object_created;
50138 +extern atomic_unchecked_t fscache_n_object_avail;
50139 +extern atomic_unchecked_t fscache_n_object_dead;
50140
50141 -extern atomic_t fscache_n_checkaux_none;
50142 -extern atomic_t fscache_n_checkaux_okay;
50143 -extern atomic_t fscache_n_checkaux_update;
50144 -extern atomic_t fscache_n_checkaux_obsolete;
50145 +extern atomic_unchecked_t fscache_n_checkaux_none;
50146 +extern atomic_unchecked_t fscache_n_checkaux_okay;
50147 +extern atomic_unchecked_t fscache_n_checkaux_update;
50148 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
50149
50150 extern atomic_t fscache_n_cop_alloc_object;
50151 extern atomic_t fscache_n_cop_lookup_object;
50152 @@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t *stat)
50153 atomic_inc(stat);
50154 }
50155
50156 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
50157 +{
50158 + atomic_inc_unchecked(stat);
50159 +}
50160 +
50161 static inline void fscache_stat_d(atomic_t *stat)
50162 {
50163 atomic_dec(stat);
50164 @@ -259,6 +264,7 @@ extern const struct file_operations fscache_stats_fops;
50165
50166 #define __fscache_stat(stat) (NULL)
50167 #define fscache_stat(stat) do {} while (0)
50168 +#define fscache_stat_unchecked(stat) do {} while (0)
50169 #define fscache_stat_d(stat) do {} while (0)
50170 #endif
50171
50172 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
50173 index e513ac5..e888d34 100644
50174 --- a/fs/fscache/object.c
50175 +++ b/fs/fscache/object.c
50176 @@ -144,7 +144,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
50177 /* update the object metadata on disk */
50178 case FSCACHE_OBJECT_UPDATING:
50179 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
50180 - fscache_stat(&fscache_n_updates_run);
50181 + fscache_stat_unchecked(&fscache_n_updates_run);
50182 fscache_stat(&fscache_n_cop_update_object);
50183 object->cache->ops->update_object(object);
50184 fscache_stat_d(&fscache_n_cop_update_object);
50185 @@ -233,7 +233,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
50186 spin_lock(&object->lock);
50187 object->state = FSCACHE_OBJECT_DEAD;
50188 spin_unlock(&object->lock);
50189 - fscache_stat(&fscache_n_object_dead);
50190 + fscache_stat_unchecked(&fscache_n_object_dead);
50191 goto terminal_transit;
50192
50193 /* handle the parent cache of this object being withdrawn from
50194 @@ -248,7 +248,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
50195 spin_lock(&object->lock);
50196 object->state = FSCACHE_OBJECT_DEAD;
50197 spin_unlock(&object->lock);
50198 - fscache_stat(&fscache_n_object_dead);
50199 + fscache_stat_unchecked(&fscache_n_object_dead);
50200 goto terminal_transit;
50201
50202 /* complain about the object being woken up once it is
50203 @@ -492,7 +492,7 @@ static void fscache_lookup_object(struct fscache_object *object)
50204 parent->cookie->def->name, cookie->def->name,
50205 object->cache->tag->name);
50206
50207 - fscache_stat(&fscache_n_object_lookups);
50208 + fscache_stat_unchecked(&fscache_n_object_lookups);
50209 fscache_stat(&fscache_n_cop_lookup_object);
50210 ret = object->cache->ops->lookup_object(object);
50211 fscache_stat_d(&fscache_n_cop_lookup_object);
50212 @@ -503,7 +503,7 @@ static void fscache_lookup_object(struct fscache_object *object)
50213 if (ret == -ETIMEDOUT) {
50214 /* probably stuck behind another object, so move this one to
50215 * the back of the queue */
50216 - fscache_stat(&fscache_n_object_lookups_timed_out);
50217 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
50218 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
50219 }
50220
50221 @@ -526,7 +526,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
50222
50223 spin_lock(&object->lock);
50224 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
50225 - fscache_stat(&fscache_n_object_lookups_negative);
50226 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
50227
50228 /* transit here to allow write requests to begin stacking up
50229 * and read requests to begin returning ENODATA */
50230 @@ -572,7 +572,7 @@ void fscache_obtained_object(struct fscache_object *object)
50231 * result, in which case there may be data available */
50232 spin_lock(&object->lock);
50233 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
50234 - fscache_stat(&fscache_n_object_lookups_positive);
50235 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
50236
50237 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
50238
50239 @@ -586,7 +586,7 @@ void fscache_obtained_object(struct fscache_object *object)
50240 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
50241 } else {
50242 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
50243 - fscache_stat(&fscache_n_object_created);
50244 + fscache_stat_unchecked(&fscache_n_object_created);
50245
50246 object->state = FSCACHE_OBJECT_AVAILABLE;
50247 spin_unlock(&object->lock);
50248 @@ -633,7 +633,7 @@ static void fscache_object_available(struct fscache_object *object)
50249 fscache_enqueue_dependents(object);
50250
50251 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
50252 - fscache_stat(&fscache_n_object_avail);
50253 + fscache_stat_unchecked(&fscache_n_object_avail);
50254
50255 _leave("");
50256 }
50257 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
50258 enum fscache_checkaux result;
50259
50260 if (!object->cookie->def->check_aux) {
50261 - fscache_stat(&fscache_n_checkaux_none);
50262 + fscache_stat_unchecked(&fscache_n_checkaux_none);
50263 return FSCACHE_CHECKAUX_OKAY;
50264 }
50265
50266 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
50267 switch (result) {
50268 /* entry okay as is */
50269 case FSCACHE_CHECKAUX_OKAY:
50270 - fscache_stat(&fscache_n_checkaux_okay);
50271 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
50272 break;
50273
50274 /* entry requires update */
50275 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
50276 - fscache_stat(&fscache_n_checkaux_update);
50277 + fscache_stat_unchecked(&fscache_n_checkaux_update);
50278 break;
50279
50280 /* entry requires deletion */
50281 case FSCACHE_CHECKAUX_OBSOLETE:
50282 - fscache_stat(&fscache_n_checkaux_obsolete);
50283 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
50284 break;
50285
50286 default:
50287 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
50288 index 313e79a..775240f 100644
50289 --- a/fs/fscache/operation.c
50290 +++ b/fs/fscache/operation.c
50291 @@ -16,7 +16,7 @@
50292 #include <linux/seq_file.h>
50293 #include "internal.h"
50294
50295 -atomic_t fscache_op_debug_id;
50296 +atomic_unchecked_t fscache_op_debug_id;
50297 EXPORT_SYMBOL(fscache_op_debug_id);
50298
50299 /**
50300 @@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
50301 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
50302 ASSERTCMP(atomic_read(&op->usage), >, 0);
50303
50304 - fscache_stat(&fscache_n_op_enqueue);
50305 + fscache_stat_unchecked(&fscache_n_op_enqueue);
50306 switch (op->flags & FSCACHE_OP_TYPE) {
50307 case FSCACHE_OP_FAST:
50308 _debug("queue fast");
50309 @@ -76,7 +76,7 @@ static void fscache_run_op(struct fscache_object *object,
50310 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
50311 if (op->processor)
50312 fscache_enqueue_operation(op);
50313 - fscache_stat(&fscache_n_op_run);
50314 + fscache_stat_unchecked(&fscache_n_op_run);
50315 }
50316
50317 /*
50318 @@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
50319 if (object->n_ops > 0) {
50320 atomic_inc(&op->usage);
50321 list_add_tail(&op->pend_link, &object->pending_ops);
50322 - fscache_stat(&fscache_n_op_pend);
50323 + fscache_stat_unchecked(&fscache_n_op_pend);
50324 } else if (!list_empty(&object->pending_ops)) {
50325 atomic_inc(&op->usage);
50326 list_add_tail(&op->pend_link, &object->pending_ops);
50327 - fscache_stat(&fscache_n_op_pend);
50328 + fscache_stat_unchecked(&fscache_n_op_pend);
50329 fscache_start_operations(object);
50330 } else {
50331 ASSERTCMP(object->n_in_progress, ==, 0);
50332 @@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
50333 object->n_exclusive++; /* reads and writes must wait */
50334 atomic_inc(&op->usage);
50335 list_add_tail(&op->pend_link, &object->pending_ops);
50336 - fscache_stat(&fscache_n_op_pend);
50337 + fscache_stat_unchecked(&fscache_n_op_pend);
50338 ret = 0;
50339 } else {
50340 /* not allowed to submit ops in any other state */
50341 @@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_object *object,
50342 if (object->n_exclusive > 0) {
50343 atomic_inc(&op->usage);
50344 list_add_tail(&op->pend_link, &object->pending_ops);
50345 - fscache_stat(&fscache_n_op_pend);
50346 + fscache_stat_unchecked(&fscache_n_op_pend);
50347 } else if (!list_empty(&object->pending_ops)) {
50348 atomic_inc(&op->usage);
50349 list_add_tail(&op->pend_link, &object->pending_ops);
50350 - fscache_stat(&fscache_n_op_pend);
50351 + fscache_stat_unchecked(&fscache_n_op_pend);
50352 fscache_start_operations(object);
50353 } else {
50354 ASSERTCMP(object->n_exclusive, ==, 0);
50355 @@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_object *object,
50356 object->n_ops++;
50357 atomic_inc(&op->usage);
50358 list_add_tail(&op->pend_link, &object->pending_ops);
50359 - fscache_stat(&fscache_n_op_pend);
50360 + fscache_stat_unchecked(&fscache_n_op_pend);
50361 ret = 0;
50362 } else if (object->state == FSCACHE_OBJECT_DYING ||
50363 object->state == FSCACHE_OBJECT_LC_DYING ||
50364 object->state == FSCACHE_OBJECT_WITHDRAWING) {
50365 - fscache_stat(&fscache_n_op_rejected);
50366 + fscache_stat_unchecked(&fscache_n_op_rejected);
50367 ret = -ENOBUFS;
50368 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
50369 fscache_report_unexpected_submission(object, op, ostate);
50370 @@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_operation *op)
50371
50372 ret = -EBUSY;
50373 if (!list_empty(&op->pend_link)) {
50374 - fscache_stat(&fscache_n_op_cancelled);
50375 + fscache_stat_unchecked(&fscache_n_op_cancelled);
50376 list_del_init(&op->pend_link);
50377 object->n_ops--;
50378 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
50379 @@ -344,7 +344,7 @@ void fscache_put_operation(struct fscache_operation *op)
50380 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
50381 BUG();
50382
50383 - fscache_stat(&fscache_n_op_release);
50384 + fscache_stat_unchecked(&fscache_n_op_release);
50385
50386 if (op->release) {
50387 op->release(op);
50388 @@ -361,7 +361,7 @@ void fscache_put_operation(struct fscache_operation *op)
50389 * lock, and defer it otherwise */
50390 if (!spin_trylock(&object->lock)) {
50391 _debug("defer put");
50392 - fscache_stat(&fscache_n_op_deferred_release);
50393 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
50394
50395 cache = object->cache;
50396 spin_lock(&cache->op_gc_list_lock);
50397 @@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_struct *work)
50398
50399 _debug("GC DEFERRED REL OBJ%x OP%x",
50400 object->debug_id, op->debug_id);
50401 - fscache_stat(&fscache_n_op_gc);
50402 + fscache_stat_unchecked(&fscache_n_op_gc);
50403
50404 ASSERTCMP(atomic_read(&op->usage), ==, 0);
50405
50406 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
50407 index c598ea4..6aac13e 100644
50408 --- a/fs/fscache/page.c
50409 +++ b/fs/fscache/page.c
50410 @@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
50411 val = radix_tree_lookup(&cookie->stores, page->index);
50412 if (!val) {
50413 rcu_read_unlock();
50414 - fscache_stat(&fscache_n_store_vmscan_not_storing);
50415 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
50416 __fscache_uncache_page(cookie, page);
50417 return true;
50418 }
50419 @@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
50420 spin_unlock(&cookie->stores_lock);
50421
50422 if (xpage) {
50423 - fscache_stat(&fscache_n_store_vmscan_cancelled);
50424 - fscache_stat(&fscache_n_store_radix_deletes);
50425 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
50426 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
50427 ASSERTCMP(xpage, ==, page);
50428 } else {
50429 - fscache_stat(&fscache_n_store_vmscan_gone);
50430 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
50431 }
50432
50433 wake_up_bit(&cookie->flags, 0);
50434 @@ -106,7 +106,7 @@ page_busy:
50435 /* we might want to wait here, but that could deadlock the allocator as
50436 * the slow-work threads writing to the cache may all end up sleeping
50437 * on memory allocation */
50438 - fscache_stat(&fscache_n_store_vmscan_busy);
50439 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
50440 return false;
50441 }
50442 EXPORT_SYMBOL(__fscache_maybe_release_page);
50443 @@ -130,7 +130,7 @@ static void fscache_end_page_write(struct fscache_object *object,
50444 FSCACHE_COOKIE_STORING_TAG);
50445 if (!radix_tree_tag_get(&cookie->stores, page->index,
50446 FSCACHE_COOKIE_PENDING_TAG)) {
50447 - fscache_stat(&fscache_n_store_radix_deletes);
50448 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
50449 xpage = radix_tree_delete(&cookie->stores, page->index);
50450 }
50451 spin_unlock(&cookie->stores_lock);
50452 @@ -151,7 +151,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
50453
50454 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
50455
50456 - fscache_stat(&fscache_n_attr_changed_calls);
50457 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
50458
50459 if (fscache_object_is_active(object)) {
50460 fscache_set_op_state(op, "CallFS");
50461 @@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50462
50463 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50464
50465 - fscache_stat(&fscache_n_attr_changed);
50466 + fscache_stat_unchecked(&fscache_n_attr_changed);
50467
50468 op = kzalloc(sizeof(*op), GFP_KERNEL);
50469 if (!op) {
50470 - fscache_stat(&fscache_n_attr_changed_nomem);
50471 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
50472 _leave(" = -ENOMEM");
50473 return -ENOMEM;
50474 }
50475 @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50476 if (fscache_submit_exclusive_op(object, op) < 0)
50477 goto nobufs;
50478 spin_unlock(&cookie->lock);
50479 - fscache_stat(&fscache_n_attr_changed_ok);
50480 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
50481 fscache_put_operation(op);
50482 _leave(" = 0");
50483 return 0;
50484 @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50485 nobufs:
50486 spin_unlock(&cookie->lock);
50487 kfree(op);
50488 - fscache_stat(&fscache_n_attr_changed_nobufs);
50489 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
50490 _leave(" = %d", -ENOBUFS);
50491 return -ENOBUFS;
50492 }
50493 @@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
50494 /* allocate a retrieval operation and attempt to submit it */
50495 op = kzalloc(sizeof(*op), GFP_NOIO);
50496 if (!op) {
50497 - fscache_stat(&fscache_n_retrievals_nomem);
50498 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50499 return NULL;
50500 }
50501
50502 @@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
50503 return 0;
50504 }
50505
50506 - fscache_stat(&fscache_n_retrievals_wait);
50507 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
50508
50509 jif = jiffies;
50510 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
50511 fscache_wait_bit_interruptible,
50512 TASK_INTERRUPTIBLE) != 0) {
50513 - fscache_stat(&fscache_n_retrievals_intr);
50514 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
50515 _leave(" = -ERESTARTSYS");
50516 return -ERESTARTSYS;
50517 }
50518 @@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
50519 */
50520 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50521 struct fscache_retrieval *op,
50522 - atomic_t *stat_op_waits,
50523 - atomic_t *stat_object_dead)
50524 + atomic_unchecked_t *stat_op_waits,
50525 + atomic_unchecked_t *stat_object_dead)
50526 {
50527 int ret;
50528
50529 @@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50530 goto check_if_dead;
50531
50532 _debug(">>> WT");
50533 - fscache_stat(stat_op_waits);
50534 + fscache_stat_unchecked(stat_op_waits);
50535 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
50536 fscache_wait_bit_interruptible,
50537 TASK_INTERRUPTIBLE) < 0) {
50538 @@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50539
50540 check_if_dead:
50541 if (unlikely(fscache_object_is_dead(object))) {
50542 - fscache_stat(stat_object_dead);
50543 + fscache_stat_unchecked(stat_object_dead);
50544 return -ENOBUFS;
50545 }
50546 return 0;
50547 @@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50548
50549 _enter("%p,%p,,,", cookie, page);
50550
50551 - fscache_stat(&fscache_n_retrievals);
50552 + fscache_stat_unchecked(&fscache_n_retrievals);
50553
50554 if (hlist_empty(&cookie->backing_objects))
50555 goto nobufs;
50556 @@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50557 goto nobufs_unlock;
50558 spin_unlock(&cookie->lock);
50559
50560 - fscache_stat(&fscache_n_retrieval_ops);
50561 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
50562
50563 /* pin the netfs read context in case we need to do the actual netfs
50564 * read because we've encountered a cache read failure */
50565 @@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50566
50567 error:
50568 if (ret == -ENOMEM)
50569 - fscache_stat(&fscache_n_retrievals_nomem);
50570 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50571 else if (ret == -ERESTARTSYS)
50572 - fscache_stat(&fscache_n_retrievals_intr);
50573 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
50574 else if (ret == -ENODATA)
50575 - fscache_stat(&fscache_n_retrievals_nodata);
50576 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
50577 else if (ret < 0)
50578 - fscache_stat(&fscache_n_retrievals_nobufs);
50579 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50580 else
50581 - fscache_stat(&fscache_n_retrievals_ok);
50582 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
50583
50584 fscache_put_retrieval(op);
50585 _leave(" = %d", ret);
50586 @@ -453,7 +453,7 @@ nobufs_unlock:
50587 spin_unlock(&cookie->lock);
50588 kfree(op);
50589 nobufs:
50590 - fscache_stat(&fscache_n_retrievals_nobufs);
50591 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50592 _leave(" = -ENOBUFS");
50593 return -ENOBUFS;
50594 }
50595 @@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50596
50597 _enter("%p,,%d,,,", cookie, *nr_pages);
50598
50599 - fscache_stat(&fscache_n_retrievals);
50600 + fscache_stat_unchecked(&fscache_n_retrievals);
50601
50602 if (hlist_empty(&cookie->backing_objects))
50603 goto nobufs;
50604 @@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50605 goto nobufs_unlock;
50606 spin_unlock(&cookie->lock);
50607
50608 - fscache_stat(&fscache_n_retrieval_ops);
50609 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
50610
50611 /* pin the netfs read context in case we need to do the actual netfs
50612 * read because we've encountered a cache read failure */
50613 @@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50614
50615 error:
50616 if (ret == -ENOMEM)
50617 - fscache_stat(&fscache_n_retrievals_nomem);
50618 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50619 else if (ret == -ERESTARTSYS)
50620 - fscache_stat(&fscache_n_retrievals_intr);
50621 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
50622 else if (ret == -ENODATA)
50623 - fscache_stat(&fscache_n_retrievals_nodata);
50624 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
50625 else if (ret < 0)
50626 - fscache_stat(&fscache_n_retrievals_nobufs);
50627 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50628 else
50629 - fscache_stat(&fscache_n_retrievals_ok);
50630 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
50631
50632 fscache_put_retrieval(op);
50633 _leave(" = %d", ret);
50634 @@ -570,7 +570,7 @@ nobufs_unlock:
50635 spin_unlock(&cookie->lock);
50636 kfree(op);
50637 nobufs:
50638 - fscache_stat(&fscache_n_retrievals_nobufs);
50639 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50640 _leave(" = -ENOBUFS");
50641 return -ENOBUFS;
50642 }
50643 @@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50644
50645 _enter("%p,%p,,,", cookie, page);
50646
50647 - fscache_stat(&fscache_n_allocs);
50648 + fscache_stat_unchecked(&fscache_n_allocs);
50649
50650 if (hlist_empty(&cookie->backing_objects))
50651 goto nobufs;
50652 @@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50653 goto nobufs_unlock;
50654 spin_unlock(&cookie->lock);
50655
50656 - fscache_stat(&fscache_n_alloc_ops);
50657 + fscache_stat_unchecked(&fscache_n_alloc_ops);
50658
50659 ret = fscache_wait_for_retrieval_activation(
50660 object, op,
50661 @@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50662
50663 error:
50664 if (ret == -ERESTARTSYS)
50665 - fscache_stat(&fscache_n_allocs_intr);
50666 + fscache_stat_unchecked(&fscache_n_allocs_intr);
50667 else if (ret < 0)
50668 - fscache_stat(&fscache_n_allocs_nobufs);
50669 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
50670 else
50671 - fscache_stat(&fscache_n_allocs_ok);
50672 + fscache_stat_unchecked(&fscache_n_allocs_ok);
50673
50674 fscache_put_retrieval(op);
50675 _leave(" = %d", ret);
50676 @@ -651,7 +651,7 @@ nobufs_unlock:
50677 spin_unlock(&cookie->lock);
50678 kfree(op);
50679 nobufs:
50680 - fscache_stat(&fscache_n_allocs_nobufs);
50681 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
50682 _leave(" = -ENOBUFS");
50683 return -ENOBUFS;
50684 }
50685 @@ -694,7 +694,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50686
50687 spin_lock(&cookie->stores_lock);
50688
50689 - fscache_stat(&fscache_n_store_calls);
50690 + fscache_stat_unchecked(&fscache_n_store_calls);
50691
50692 /* find a page to store */
50693 page = NULL;
50694 @@ -705,7 +705,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50695 page = results[0];
50696 _debug("gang %d [%lx]", n, page->index);
50697 if (page->index > op->store_limit) {
50698 - fscache_stat(&fscache_n_store_pages_over_limit);
50699 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
50700 goto superseded;
50701 }
50702
50703 @@ -721,7 +721,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50704
50705 if (page) {
50706 fscache_set_op_state(&op->op, "Store");
50707 - fscache_stat(&fscache_n_store_pages);
50708 + fscache_stat_unchecked(&fscache_n_store_pages);
50709 fscache_stat(&fscache_n_cop_write_page);
50710 ret = object->cache->ops->write_page(op, page);
50711 fscache_stat_d(&fscache_n_cop_write_page);
50712 @@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50713 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50714 ASSERT(PageFsCache(page));
50715
50716 - fscache_stat(&fscache_n_stores);
50717 + fscache_stat_unchecked(&fscache_n_stores);
50718
50719 op = kzalloc(sizeof(*op), GFP_NOIO);
50720 if (!op)
50721 @@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50722 spin_unlock(&cookie->stores_lock);
50723 spin_unlock(&object->lock);
50724
50725 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
50726 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
50727 op->store_limit = object->store_limit;
50728
50729 if (fscache_submit_op(object, &op->op) < 0)
50730 @@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50731
50732 spin_unlock(&cookie->lock);
50733 radix_tree_preload_end();
50734 - fscache_stat(&fscache_n_store_ops);
50735 - fscache_stat(&fscache_n_stores_ok);
50736 + fscache_stat_unchecked(&fscache_n_store_ops);
50737 + fscache_stat_unchecked(&fscache_n_stores_ok);
50738
50739 /* the slow work queue now carries its own ref on the object */
50740 fscache_put_operation(&op->op);
50741 @@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50742 return 0;
50743
50744 already_queued:
50745 - fscache_stat(&fscache_n_stores_again);
50746 + fscache_stat_unchecked(&fscache_n_stores_again);
50747 already_pending:
50748 spin_unlock(&cookie->stores_lock);
50749 spin_unlock(&object->lock);
50750 spin_unlock(&cookie->lock);
50751 radix_tree_preload_end();
50752 kfree(op);
50753 - fscache_stat(&fscache_n_stores_ok);
50754 + fscache_stat_unchecked(&fscache_n_stores_ok);
50755 _leave(" = 0");
50756 return 0;
50757
50758 @@ -886,14 +886,14 @@ nobufs:
50759 spin_unlock(&cookie->lock);
50760 radix_tree_preload_end();
50761 kfree(op);
50762 - fscache_stat(&fscache_n_stores_nobufs);
50763 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
50764 _leave(" = -ENOBUFS");
50765 return -ENOBUFS;
50766
50767 nomem_free:
50768 kfree(op);
50769 nomem:
50770 - fscache_stat(&fscache_n_stores_oom);
50771 + fscache_stat_unchecked(&fscache_n_stores_oom);
50772 _leave(" = -ENOMEM");
50773 return -ENOMEM;
50774 }
50775 @@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
50776 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50777 ASSERTCMP(page, !=, NULL);
50778
50779 - fscache_stat(&fscache_n_uncaches);
50780 + fscache_stat_unchecked(&fscache_n_uncaches);
50781
50782 /* cache withdrawal may beat us to it */
50783 if (!PageFsCache(page))
50784 @@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
50785 unsigned long loop;
50786
50787 #ifdef CONFIG_FSCACHE_STATS
50788 - atomic_add(pagevec->nr, &fscache_n_marks);
50789 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
50790 #endif
50791
50792 for (loop = 0; loop < pagevec->nr; loop++) {
50793 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
50794 index 46435f3..8cddf18 100644
50795 --- a/fs/fscache/stats.c
50796 +++ b/fs/fscache/stats.c
50797 @@ -18,95 +18,95 @@
50798 /*
50799 * operation counters
50800 */
50801 -atomic_t fscache_n_op_pend;
50802 -atomic_t fscache_n_op_run;
50803 -atomic_t fscache_n_op_enqueue;
50804 -atomic_t fscache_n_op_requeue;
50805 -atomic_t fscache_n_op_deferred_release;
50806 -atomic_t fscache_n_op_release;
50807 -atomic_t fscache_n_op_gc;
50808 -atomic_t fscache_n_op_cancelled;
50809 -atomic_t fscache_n_op_rejected;
50810 +atomic_unchecked_t fscache_n_op_pend;
50811 +atomic_unchecked_t fscache_n_op_run;
50812 +atomic_unchecked_t fscache_n_op_enqueue;
50813 +atomic_unchecked_t fscache_n_op_requeue;
50814 +atomic_unchecked_t fscache_n_op_deferred_release;
50815 +atomic_unchecked_t fscache_n_op_release;
50816 +atomic_unchecked_t fscache_n_op_gc;
50817 +atomic_unchecked_t fscache_n_op_cancelled;
50818 +atomic_unchecked_t fscache_n_op_rejected;
50819
50820 -atomic_t fscache_n_attr_changed;
50821 -atomic_t fscache_n_attr_changed_ok;
50822 -atomic_t fscache_n_attr_changed_nobufs;
50823 -atomic_t fscache_n_attr_changed_nomem;
50824 -atomic_t fscache_n_attr_changed_calls;
50825 +atomic_unchecked_t fscache_n_attr_changed;
50826 +atomic_unchecked_t fscache_n_attr_changed_ok;
50827 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
50828 +atomic_unchecked_t fscache_n_attr_changed_nomem;
50829 +atomic_unchecked_t fscache_n_attr_changed_calls;
50830
50831 -atomic_t fscache_n_allocs;
50832 -atomic_t fscache_n_allocs_ok;
50833 -atomic_t fscache_n_allocs_wait;
50834 -atomic_t fscache_n_allocs_nobufs;
50835 -atomic_t fscache_n_allocs_intr;
50836 -atomic_t fscache_n_allocs_object_dead;
50837 -atomic_t fscache_n_alloc_ops;
50838 -atomic_t fscache_n_alloc_op_waits;
50839 +atomic_unchecked_t fscache_n_allocs;
50840 +atomic_unchecked_t fscache_n_allocs_ok;
50841 +atomic_unchecked_t fscache_n_allocs_wait;
50842 +atomic_unchecked_t fscache_n_allocs_nobufs;
50843 +atomic_unchecked_t fscache_n_allocs_intr;
50844 +atomic_unchecked_t fscache_n_allocs_object_dead;
50845 +atomic_unchecked_t fscache_n_alloc_ops;
50846 +atomic_unchecked_t fscache_n_alloc_op_waits;
50847
50848 -atomic_t fscache_n_retrievals;
50849 -atomic_t fscache_n_retrievals_ok;
50850 -atomic_t fscache_n_retrievals_wait;
50851 -atomic_t fscache_n_retrievals_nodata;
50852 -atomic_t fscache_n_retrievals_nobufs;
50853 -atomic_t fscache_n_retrievals_intr;
50854 -atomic_t fscache_n_retrievals_nomem;
50855 -atomic_t fscache_n_retrievals_object_dead;
50856 -atomic_t fscache_n_retrieval_ops;
50857 -atomic_t fscache_n_retrieval_op_waits;
50858 +atomic_unchecked_t fscache_n_retrievals;
50859 +atomic_unchecked_t fscache_n_retrievals_ok;
50860 +atomic_unchecked_t fscache_n_retrievals_wait;
50861 +atomic_unchecked_t fscache_n_retrievals_nodata;
50862 +atomic_unchecked_t fscache_n_retrievals_nobufs;
50863 +atomic_unchecked_t fscache_n_retrievals_intr;
50864 +atomic_unchecked_t fscache_n_retrievals_nomem;
50865 +atomic_unchecked_t fscache_n_retrievals_object_dead;
50866 +atomic_unchecked_t fscache_n_retrieval_ops;
50867 +atomic_unchecked_t fscache_n_retrieval_op_waits;
50868
50869 -atomic_t fscache_n_stores;
50870 -atomic_t fscache_n_stores_ok;
50871 -atomic_t fscache_n_stores_again;
50872 -atomic_t fscache_n_stores_nobufs;
50873 -atomic_t fscache_n_stores_oom;
50874 -atomic_t fscache_n_store_ops;
50875 -atomic_t fscache_n_store_calls;
50876 -atomic_t fscache_n_store_pages;
50877 -atomic_t fscache_n_store_radix_deletes;
50878 -atomic_t fscache_n_store_pages_over_limit;
50879 +atomic_unchecked_t fscache_n_stores;
50880 +atomic_unchecked_t fscache_n_stores_ok;
50881 +atomic_unchecked_t fscache_n_stores_again;
50882 +atomic_unchecked_t fscache_n_stores_nobufs;
50883 +atomic_unchecked_t fscache_n_stores_oom;
50884 +atomic_unchecked_t fscache_n_store_ops;
50885 +atomic_unchecked_t fscache_n_store_calls;
50886 +atomic_unchecked_t fscache_n_store_pages;
50887 +atomic_unchecked_t fscache_n_store_radix_deletes;
50888 +atomic_unchecked_t fscache_n_store_pages_over_limit;
50889
50890 -atomic_t fscache_n_store_vmscan_not_storing;
50891 -atomic_t fscache_n_store_vmscan_gone;
50892 -atomic_t fscache_n_store_vmscan_busy;
50893 -atomic_t fscache_n_store_vmscan_cancelled;
50894 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
50895 +atomic_unchecked_t fscache_n_store_vmscan_gone;
50896 +atomic_unchecked_t fscache_n_store_vmscan_busy;
50897 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
50898
50899 -atomic_t fscache_n_marks;
50900 -atomic_t fscache_n_uncaches;
50901 +atomic_unchecked_t fscache_n_marks;
50902 +atomic_unchecked_t fscache_n_uncaches;
50903
50904 -atomic_t fscache_n_acquires;
50905 -atomic_t fscache_n_acquires_null;
50906 -atomic_t fscache_n_acquires_no_cache;
50907 -atomic_t fscache_n_acquires_ok;
50908 -atomic_t fscache_n_acquires_nobufs;
50909 -atomic_t fscache_n_acquires_oom;
50910 +atomic_unchecked_t fscache_n_acquires;
50911 +atomic_unchecked_t fscache_n_acquires_null;
50912 +atomic_unchecked_t fscache_n_acquires_no_cache;
50913 +atomic_unchecked_t fscache_n_acquires_ok;
50914 +atomic_unchecked_t fscache_n_acquires_nobufs;
50915 +atomic_unchecked_t fscache_n_acquires_oom;
50916
50917 -atomic_t fscache_n_updates;
50918 -atomic_t fscache_n_updates_null;
50919 -atomic_t fscache_n_updates_run;
50920 +atomic_unchecked_t fscache_n_updates;
50921 +atomic_unchecked_t fscache_n_updates_null;
50922 +atomic_unchecked_t fscache_n_updates_run;
50923
50924 -atomic_t fscache_n_relinquishes;
50925 -atomic_t fscache_n_relinquishes_null;
50926 -atomic_t fscache_n_relinquishes_waitcrt;
50927 -atomic_t fscache_n_relinquishes_retire;
50928 +atomic_unchecked_t fscache_n_relinquishes;
50929 +atomic_unchecked_t fscache_n_relinquishes_null;
50930 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
50931 +atomic_unchecked_t fscache_n_relinquishes_retire;
50932
50933 -atomic_t fscache_n_cookie_index;
50934 -atomic_t fscache_n_cookie_data;
50935 -atomic_t fscache_n_cookie_special;
50936 +atomic_unchecked_t fscache_n_cookie_index;
50937 +atomic_unchecked_t fscache_n_cookie_data;
50938 +atomic_unchecked_t fscache_n_cookie_special;
50939
50940 -atomic_t fscache_n_object_alloc;
50941 -atomic_t fscache_n_object_no_alloc;
50942 -atomic_t fscache_n_object_lookups;
50943 -atomic_t fscache_n_object_lookups_negative;
50944 -atomic_t fscache_n_object_lookups_positive;
50945 -atomic_t fscache_n_object_lookups_timed_out;
50946 -atomic_t fscache_n_object_created;
50947 -atomic_t fscache_n_object_avail;
50948 -atomic_t fscache_n_object_dead;
50949 +atomic_unchecked_t fscache_n_object_alloc;
50950 +atomic_unchecked_t fscache_n_object_no_alloc;
50951 +atomic_unchecked_t fscache_n_object_lookups;
50952 +atomic_unchecked_t fscache_n_object_lookups_negative;
50953 +atomic_unchecked_t fscache_n_object_lookups_positive;
50954 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
50955 +atomic_unchecked_t fscache_n_object_created;
50956 +atomic_unchecked_t fscache_n_object_avail;
50957 +atomic_unchecked_t fscache_n_object_dead;
50958
50959 -atomic_t fscache_n_checkaux_none;
50960 -atomic_t fscache_n_checkaux_okay;
50961 -atomic_t fscache_n_checkaux_update;
50962 -atomic_t fscache_n_checkaux_obsolete;
50963 +atomic_unchecked_t fscache_n_checkaux_none;
50964 +atomic_unchecked_t fscache_n_checkaux_okay;
50965 +atomic_unchecked_t fscache_n_checkaux_update;
50966 +atomic_unchecked_t fscache_n_checkaux_obsolete;
50967
50968 atomic_t fscache_n_cop_alloc_object;
50969 atomic_t fscache_n_cop_lookup_object;
50970 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
50971 seq_puts(m, "FS-Cache statistics\n");
50972
50973 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
50974 - atomic_read(&fscache_n_cookie_index),
50975 - atomic_read(&fscache_n_cookie_data),
50976 - atomic_read(&fscache_n_cookie_special));
50977 + atomic_read_unchecked(&fscache_n_cookie_index),
50978 + atomic_read_unchecked(&fscache_n_cookie_data),
50979 + atomic_read_unchecked(&fscache_n_cookie_special));
50980
50981 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
50982 - atomic_read(&fscache_n_object_alloc),
50983 - atomic_read(&fscache_n_object_no_alloc),
50984 - atomic_read(&fscache_n_object_avail),
50985 - atomic_read(&fscache_n_object_dead));
50986 + atomic_read_unchecked(&fscache_n_object_alloc),
50987 + atomic_read_unchecked(&fscache_n_object_no_alloc),
50988 + atomic_read_unchecked(&fscache_n_object_avail),
50989 + atomic_read_unchecked(&fscache_n_object_dead));
50990 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
50991 - atomic_read(&fscache_n_checkaux_none),
50992 - atomic_read(&fscache_n_checkaux_okay),
50993 - atomic_read(&fscache_n_checkaux_update),
50994 - atomic_read(&fscache_n_checkaux_obsolete));
50995 + atomic_read_unchecked(&fscache_n_checkaux_none),
50996 + atomic_read_unchecked(&fscache_n_checkaux_okay),
50997 + atomic_read_unchecked(&fscache_n_checkaux_update),
50998 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
50999
51000 seq_printf(m, "Pages : mrk=%u unc=%u\n",
51001 - atomic_read(&fscache_n_marks),
51002 - atomic_read(&fscache_n_uncaches));
51003 + atomic_read_unchecked(&fscache_n_marks),
51004 + atomic_read_unchecked(&fscache_n_uncaches));
51005
51006 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
51007 " oom=%u\n",
51008 - atomic_read(&fscache_n_acquires),
51009 - atomic_read(&fscache_n_acquires_null),
51010 - atomic_read(&fscache_n_acquires_no_cache),
51011 - atomic_read(&fscache_n_acquires_ok),
51012 - atomic_read(&fscache_n_acquires_nobufs),
51013 - atomic_read(&fscache_n_acquires_oom));
51014 + atomic_read_unchecked(&fscache_n_acquires),
51015 + atomic_read_unchecked(&fscache_n_acquires_null),
51016 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
51017 + atomic_read_unchecked(&fscache_n_acquires_ok),
51018 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
51019 + atomic_read_unchecked(&fscache_n_acquires_oom));
51020
51021 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
51022 - atomic_read(&fscache_n_object_lookups),
51023 - atomic_read(&fscache_n_object_lookups_negative),
51024 - atomic_read(&fscache_n_object_lookups_positive),
51025 - atomic_read(&fscache_n_object_lookups_timed_out),
51026 - atomic_read(&fscache_n_object_created));
51027 + atomic_read_unchecked(&fscache_n_object_lookups),
51028 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
51029 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
51030 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
51031 + atomic_read_unchecked(&fscache_n_object_created));
51032
51033 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
51034 - atomic_read(&fscache_n_updates),
51035 - atomic_read(&fscache_n_updates_null),
51036 - atomic_read(&fscache_n_updates_run));
51037 + atomic_read_unchecked(&fscache_n_updates),
51038 + atomic_read_unchecked(&fscache_n_updates_null),
51039 + atomic_read_unchecked(&fscache_n_updates_run));
51040
51041 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
51042 - atomic_read(&fscache_n_relinquishes),
51043 - atomic_read(&fscache_n_relinquishes_null),
51044 - atomic_read(&fscache_n_relinquishes_waitcrt),
51045 - atomic_read(&fscache_n_relinquishes_retire));
51046 + atomic_read_unchecked(&fscache_n_relinquishes),
51047 + atomic_read_unchecked(&fscache_n_relinquishes_null),
51048 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
51049 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
51050
51051 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
51052 - atomic_read(&fscache_n_attr_changed),
51053 - atomic_read(&fscache_n_attr_changed_ok),
51054 - atomic_read(&fscache_n_attr_changed_nobufs),
51055 - atomic_read(&fscache_n_attr_changed_nomem),
51056 - atomic_read(&fscache_n_attr_changed_calls));
51057 + atomic_read_unchecked(&fscache_n_attr_changed),
51058 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
51059 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
51060 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
51061 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
51062
51063 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
51064 - atomic_read(&fscache_n_allocs),
51065 - atomic_read(&fscache_n_allocs_ok),
51066 - atomic_read(&fscache_n_allocs_wait),
51067 - atomic_read(&fscache_n_allocs_nobufs),
51068 - atomic_read(&fscache_n_allocs_intr));
51069 + atomic_read_unchecked(&fscache_n_allocs),
51070 + atomic_read_unchecked(&fscache_n_allocs_ok),
51071 + atomic_read_unchecked(&fscache_n_allocs_wait),
51072 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
51073 + atomic_read_unchecked(&fscache_n_allocs_intr));
51074 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
51075 - atomic_read(&fscache_n_alloc_ops),
51076 - atomic_read(&fscache_n_alloc_op_waits),
51077 - atomic_read(&fscache_n_allocs_object_dead));
51078 + atomic_read_unchecked(&fscache_n_alloc_ops),
51079 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
51080 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
51081
51082 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
51083 " int=%u oom=%u\n",
51084 - atomic_read(&fscache_n_retrievals),
51085 - atomic_read(&fscache_n_retrievals_ok),
51086 - atomic_read(&fscache_n_retrievals_wait),
51087 - atomic_read(&fscache_n_retrievals_nodata),
51088 - atomic_read(&fscache_n_retrievals_nobufs),
51089 - atomic_read(&fscache_n_retrievals_intr),
51090 - atomic_read(&fscache_n_retrievals_nomem));
51091 + atomic_read_unchecked(&fscache_n_retrievals),
51092 + atomic_read_unchecked(&fscache_n_retrievals_ok),
51093 + atomic_read_unchecked(&fscache_n_retrievals_wait),
51094 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
51095 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
51096 + atomic_read_unchecked(&fscache_n_retrievals_intr),
51097 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
51098 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
51099 - atomic_read(&fscache_n_retrieval_ops),
51100 - atomic_read(&fscache_n_retrieval_op_waits),
51101 - atomic_read(&fscache_n_retrievals_object_dead));
51102 + atomic_read_unchecked(&fscache_n_retrieval_ops),
51103 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
51104 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
51105
51106 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
51107 - atomic_read(&fscache_n_stores),
51108 - atomic_read(&fscache_n_stores_ok),
51109 - atomic_read(&fscache_n_stores_again),
51110 - atomic_read(&fscache_n_stores_nobufs),
51111 - atomic_read(&fscache_n_stores_oom));
51112 + atomic_read_unchecked(&fscache_n_stores),
51113 + atomic_read_unchecked(&fscache_n_stores_ok),
51114 + atomic_read_unchecked(&fscache_n_stores_again),
51115 + atomic_read_unchecked(&fscache_n_stores_nobufs),
51116 + atomic_read_unchecked(&fscache_n_stores_oom));
51117 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
51118 - atomic_read(&fscache_n_store_ops),
51119 - atomic_read(&fscache_n_store_calls),
51120 - atomic_read(&fscache_n_store_pages),
51121 - atomic_read(&fscache_n_store_radix_deletes),
51122 - atomic_read(&fscache_n_store_pages_over_limit));
51123 + atomic_read_unchecked(&fscache_n_store_ops),
51124 + atomic_read_unchecked(&fscache_n_store_calls),
51125 + atomic_read_unchecked(&fscache_n_store_pages),
51126 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
51127 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
51128
51129 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
51130 - atomic_read(&fscache_n_store_vmscan_not_storing),
51131 - atomic_read(&fscache_n_store_vmscan_gone),
51132 - atomic_read(&fscache_n_store_vmscan_busy),
51133 - atomic_read(&fscache_n_store_vmscan_cancelled));
51134 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
51135 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
51136 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
51137 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
51138
51139 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
51140 - atomic_read(&fscache_n_op_pend),
51141 - atomic_read(&fscache_n_op_run),
51142 - atomic_read(&fscache_n_op_enqueue),
51143 - atomic_read(&fscache_n_op_cancelled),
51144 - atomic_read(&fscache_n_op_rejected));
51145 + atomic_read_unchecked(&fscache_n_op_pend),
51146 + atomic_read_unchecked(&fscache_n_op_run),
51147 + atomic_read_unchecked(&fscache_n_op_enqueue),
51148 + atomic_read_unchecked(&fscache_n_op_cancelled),
51149 + atomic_read_unchecked(&fscache_n_op_rejected));
51150 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
51151 - atomic_read(&fscache_n_op_deferred_release),
51152 - atomic_read(&fscache_n_op_release),
51153 - atomic_read(&fscache_n_op_gc));
51154 + atomic_read_unchecked(&fscache_n_op_deferred_release),
51155 + atomic_read_unchecked(&fscache_n_op_release),
51156 + atomic_read_unchecked(&fscache_n_op_gc));
51157
51158 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
51159 atomic_read(&fscache_n_cop_alloc_object),
51160 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
51161 index de792dc..448b532 100644
51162 --- a/fs/fuse/cuse.c
51163 +++ b/fs/fuse/cuse.c
51164 @@ -576,10 +576,12 @@ static int __init cuse_init(void)
51165 INIT_LIST_HEAD(&cuse_conntbl[i]);
51166
51167 /* inherit and extend fuse_dev_operations */
51168 - cuse_channel_fops = fuse_dev_operations;
51169 - cuse_channel_fops.owner = THIS_MODULE;
51170 - cuse_channel_fops.open = cuse_channel_open;
51171 - cuse_channel_fops.release = cuse_channel_release;
51172 + pax_open_kernel();
51173 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
51174 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
51175 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
51176 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
51177 + pax_close_kernel();
51178
51179 cuse_class = class_create(THIS_MODULE, "cuse");
51180 if (IS_ERR(cuse_class))
51181 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
51182 index 1facb39..7f48557 100644
51183 --- a/fs/fuse/dev.c
51184 +++ b/fs/fuse/dev.c
51185 @@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
51186 {
51187 struct fuse_notify_inval_entry_out outarg;
51188 int err = -EINVAL;
51189 - char buf[FUSE_NAME_MAX+1];
51190 + char *buf = NULL;
51191 struct qstr name;
51192
51193 if (size < sizeof(outarg))
51194 @@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
51195 if (outarg.namelen > FUSE_NAME_MAX)
51196 goto err;
51197
51198 + err = -ENOMEM;
51199 + buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
51200 + if (!buf)
51201 + goto err;
51202 +
51203 err = -EINVAL;
51204 if (size != sizeof(outarg) + outarg.namelen + 1)
51205 goto err;
51206 @@ -914,17 +919,15 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
51207
51208 down_read(&fc->killsb);
51209 err = -ENOENT;
51210 - if (!fc->sb)
51211 - goto err_unlock;
51212 -
51213 - err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
51214 -
51215 -err_unlock:
51216 + if (fc->sb)
51217 + err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
51218 up_read(&fc->killsb);
51219 + kfree(buf);
51220 return err;
51221
51222 err:
51223 fuse_copy_finish(cs);
51224 + kfree(buf);
51225 return err;
51226 }
51227
51228 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
51229 index 4787ae6..73efff7 100644
51230 --- a/fs/fuse/dir.c
51231 +++ b/fs/fuse/dir.c
51232 @@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *dentry)
51233 return link;
51234 }
51235
51236 -static void free_link(char *link)
51237 +static void free_link(const char *link)
51238 {
51239 if (!IS_ERR(link))
51240 free_page((unsigned long) link);
51241 diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
51242 index 247436c..e650ccb 100644
51243 --- a/fs/gfs2/ops_inode.c
51244 +++ b/fs/gfs2/ops_inode.c
51245 @@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
51246 unsigned int x;
51247 int error;
51248
51249 + pax_track_stack();
51250 +
51251 if (ndentry->d_inode) {
51252 nip = GFS2_I(ndentry->d_inode);
51253 if (ip == nip)
51254 diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
51255 index 4463297..4fed53b 100644
51256 --- a/fs/gfs2/sys.c
51257 +++ b/fs/gfs2/sys.c
51258 @@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr,
51259 return a->store ? a->store(sdp, buf, len) : len;
51260 }
51261
51262 -static struct sysfs_ops gfs2_attr_ops = {
51263 +static const struct sysfs_ops gfs2_attr_ops = {
51264 .show = gfs2_attr_show,
51265 .store = gfs2_attr_store,
51266 };
51267 @@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
51268 return 0;
51269 }
51270
51271 -static struct kset_uevent_ops gfs2_uevent_ops = {
51272 +static const struct kset_uevent_ops gfs2_uevent_ops = {
51273 .uevent = gfs2_uevent,
51274 };
51275
51276 diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
51277 index f6874ac..7cd98a8 100644
51278 --- a/fs/hfsplus/catalog.c
51279 +++ b/fs/hfsplus/catalog.c
51280 @@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
51281 int err;
51282 u16 type;
51283
51284 + pax_track_stack();
51285 +
51286 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
51287 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
51288 if (err)
51289 @@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct ino
51290 int entry_size;
51291 int err;
51292
51293 + pax_track_stack();
51294 +
51295 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
51296 sb = dir->i_sb;
51297 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
51298 @@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
51299 int entry_size, type;
51300 int err = 0;
51301
51302 + pax_track_stack();
51303 +
51304 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
51305 dst_dir->i_ino, dst_name->name);
51306 sb = src_dir->i_sb;
51307 diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
51308 index 5f40236..dac3421 100644
51309 --- a/fs/hfsplus/dir.c
51310 +++ b/fs/hfsplus/dir.c
51311 @@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
51312 struct hfsplus_readdir_data *rd;
51313 u16 type;
51314
51315 + pax_track_stack();
51316 +
51317 if (filp->f_pos >= inode->i_size)
51318 return 0;
51319
51320 diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
51321 index 1bcf597..905a251 100644
51322 --- a/fs/hfsplus/inode.c
51323 +++ b/fs/hfsplus/inode.c
51324 @@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
51325 int res = 0;
51326 u16 type;
51327
51328 + pax_track_stack();
51329 +
51330 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
51331
51332 HFSPLUS_I(inode).dev = 0;
51333 @@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
51334 struct hfs_find_data fd;
51335 hfsplus_cat_entry entry;
51336
51337 + pax_track_stack();
51338 +
51339 if (HFSPLUS_IS_RSRC(inode))
51340 main_inode = HFSPLUS_I(inode).rsrc_inode;
51341
51342 diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
51343 index f457d2c..7ef4ad5 100644
51344 --- a/fs/hfsplus/ioctl.c
51345 +++ b/fs/hfsplus/ioctl.c
51346 @@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
51347 struct hfsplus_cat_file *file;
51348 int res;
51349
51350 + pax_track_stack();
51351 +
51352 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
51353 return -EOPNOTSUPP;
51354
51355 @@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
51356 struct hfsplus_cat_file *file;
51357 ssize_t res = 0;
51358
51359 + pax_track_stack();
51360 +
51361 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
51362 return -EOPNOTSUPP;
51363
51364 diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
51365 index 43022f3..7298079 100644
51366 --- a/fs/hfsplus/super.c
51367 +++ b/fs/hfsplus/super.c
51368 @@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
51369 struct nls_table *nls = NULL;
51370 int err = -EINVAL;
51371
51372 + pax_track_stack();
51373 +
51374 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
51375 if (!sbi)
51376 return -ENOMEM;
51377 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
51378 index 87a1258..5694d91 100644
51379 --- a/fs/hugetlbfs/inode.c
51380 +++ b/fs/hugetlbfs/inode.c
51381 @@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs_fs_type = {
51382 .kill_sb = kill_litter_super,
51383 };
51384
51385 -static struct vfsmount *hugetlbfs_vfsmount;
51386 +struct vfsmount *hugetlbfs_vfsmount;
51387
51388 static int can_do_hugetlb_shm(void)
51389 {
51390 diff --git a/fs/ioctl.c b/fs/ioctl.c
51391 index 6c75110..19d2c3c 100644
51392 --- a/fs/ioctl.c
51393 +++ b/fs/ioctl.c
51394 @@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
51395 u64 phys, u64 len, u32 flags)
51396 {
51397 struct fiemap_extent extent;
51398 - struct fiemap_extent *dest = fieinfo->fi_extents_start;
51399 + struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
51400
51401 /* only count the extents */
51402 if (fieinfo->fi_extents_max == 0) {
51403 @@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
51404
51405 fieinfo.fi_flags = fiemap.fm_flags;
51406 fieinfo.fi_extents_max = fiemap.fm_extent_count;
51407 - fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
51408 + fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
51409
51410 if (fiemap.fm_extent_count != 0 &&
51411 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
51412 @@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
51413 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
51414 fiemap.fm_flags = fieinfo.fi_flags;
51415 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
51416 - if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
51417 + if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
51418 error = -EFAULT;
51419
51420 return error;
51421 diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
51422 index b0435dd..81ee0be 100644
51423 --- a/fs/jbd/checkpoint.c
51424 +++ b/fs/jbd/checkpoint.c
51425 @@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal)
51426 tid_t this_tid;
51427 int result;
51428
51429 + pax_track_stack();
51430 +
51431 jbd_debug(1, "Start checkpoint\n");
51432
51433 /*
51434 diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
51435 index 546d153..736896c 100644
51436 --- a/fs/jffs2/compr_rtime.c
51437 +++ b/fs/jffs2/compr_rtime.c
51438 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned char *data_in,
51439 int outpos = 0;
51440 int pos=0;
51441
51442 + pax_track_stack();
51443 +
51444 memset(positions,0,sizeof(positions));
51445
51446 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
51447 @@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
51448 int outpos = 0;
51449 int pos=0;
51450
51451 + pax_track_stack();
51452 +
51453 memset(positions,0,sizeof(positions));
51454
51455 while (outpos<destlen) {
51456 diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
51457 index 170d289..3254b98 100644
51458 --- a/fs/jffs2/compr_rubin.c
51459 +++ b/fs/jffs2/compr_rubin.c
51460 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
51461 int ret;
51462 uint32_t mysrclen, mydstlen;
51463
51464 + pax_track_stack();
51465 +
51466 mysrclen = *sourcelen;
51467 mydstlen = *dstlen - 8;
51468
51469 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
51470 index b47679b..00d65d3 100644
51471 --- a/fs/jffs2/erase.c
51472 +++ b/fs/jffs2/erase.c
51473 @@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
51474 struct jffs2_unknown_node marker = {
51475 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
51476 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
51477 - .totlen = cpu_to_je32(c->cleanmarker_size)
51478 + .totlen = cpu_to_je32(c->cleanmarker_size),
51479 + .hdr_crc = cpu_to_je32(0)
51480 };
51481
51482 jffs2_prealloc_raw_node_refs(c, jeb, 1);
51483 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
51484 index 5ef7bac..4fd1e3c 100644
51485 --- a/fs/jffs2/wbuf.c
51486 +++ b/fs/jffs2/wbuf.c
51487 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
51488 {
51489 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
51490 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
51491 - .totlen = constant_cpu_to_je32(8)
51492 + .totlen = constant_cpu_to_je32(8),
51493 + .hdr_crc = constant_cpu_to_je32(0)
51494 };
51495
51496 /*
51497 diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
51498 index 082e844..52012a1 100644
51499 --- a/fs/jffs2/xattr.c
51500 +++ b/fs/jffs2/xattr.c
51501 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
51502
51503 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
51504
51505 + pax_track_stack();
51506 +
51507 /* Phase.1 : Merge same xref */
51508 for (i=0; i < XREF_TMPHASH_SIZE; i++)
51509 xref_tmphash[i] = NULL;
51510 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
51511 index 2234c73..f6e6e6b 100644
51512 --- a/fs/jfs/super.c
51513 +++ b/fs/jfs/super.c
51514 @@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
51515
51516 jfs_inode_cachep =
51517 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
51518 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
51519 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
51520 init_once);
51521 if (jfs_inode_cachep == NULL)
51522 return -ENOMEM;
51523 diff --git a/fs/libfs.c b/fs/libfs.c
51524 index ba36e93..3153fce 100644
51525 --- a/fs/libfs.c
51526 +++ b/fs/libfs.c
51527 @@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
51528
51529 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
51530 struct dentry *next;
51531 + char d_name[sizeof(next->d_iname)];
51532 + const unsigned char *name;
51533 +
51534 next = list_entry(p, struct dentry, d_u.d_child);
51535 if (d_unhashed(next) || !next->d_inode)
51536 continue;
51537
51538 spin_unlock(&dcache_lock);
51539 - if (filldir(dirent, next->d_name.name,
51540 + name = next->d_name.name;
51541 + if (name == next->d_iname) {
51542 + memcpy(d_name, name, next->d_name.len);
51543 + name = d_name;
51544 + }
51545 + if (filldir(dirent, name,
51546 next->d_name.len, filp->f_pos,
51547 next->d_inode->i_ino,
51548 dt_type(next->d_inode)) < 0)
51549 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
51550 index c325a83..d15b07b 100644
51551 --- a/fs/lockd/clntproc.c
51552 +++ b/fs/lockd/clntproc.c
51553 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
51554 /*
51555 * Cookie counter for NLM requests
51556 */
51557 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
51558 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
51559
51560 void nlmclnt_next_cookie(struct nlm_cookie *c)
51561 {
51562 - u32 cookie = atomic_inc_return(&nlm_cookie);
51563 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
51564
51565 memcpy(c->data, &cookie, 4);
51566 c->len=4;
51567 @@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
51568 struct nlm_rqst reqst, *req;
51569 int status;
51570
51571 + pax_track_stack();
51572 +
51573 req = &reqst;
51574 memset(req, 0, sizeof(*req));
51575 locks_init_lock(&req->a_args.lock.fl);
51576 diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
51577 index 1a54ae1..6a16c27 100644
51578 --- a/fs/lockd/svc.c
51579 +++ b/fs/lockd/svc.c
51580 @@ -43,7 +43,7 @@
51581
51582 static struct svc_program nlmsvc_program;
51583
51584 -struct nlmsvc_binding * nlmsvc_ops;
51585 +const struct nlmsvc_binding * nlmsvc_ops;
51586 EXPORT_SYMBOL_GPL(nlmsvc_ops);
51587
51588 static DEFINE_MUTEX(nlmsvc_mutex);
51589 diff --git a/fs/locks.c b/fs/locks.c
51590 index a8794f2..4041e55 100644
51591 --- a/fs/locks.c
51592 +++ b/fs/locks.c
51593 @@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
51594
51595 static struct kmem_cache *filelock_cache __read_mostly;
51596
51597 +static void locks_init_lock_always(struct file_lock *fl)
51598 +{
51599 + fl->fl_next = NULL;
51600 + fl->fl_fasync = NULL;
51601 + fl->fl_owner = NULL;
51602 + fl->fl_pid = 0;
51603 + fl->fl_nspid = NULL;
51604 + fl->fl_file = NULL;
51605 + fl->fl_flags = 0;
51606 + fl->fl_type = 0;
51607 + fl->fl_start = fl->fl_end = 0;
51608 +}
51609 +
51610 /* Allocate an empty lock structure. */
51611 static struct file_lock *locks_alloc_lock(void)
51612 {
51613 - return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
51614 + struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
51615 +
51616 + if (fl)
51617 + locks_init_lock_always(fl);
51618 +
51619 + return fl;
51620 }
51621
51622 void locks_release_private(struct file_lock *fl)
51623 @@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *fl)
51624 INIT_LIST_HEAD(&fl->fl_link);
51625 INIT_LIST_HEAD(&fl->fl_block);
51626 init_waitqueue_head(&fl->fl_wait);
51627 - fl->fl_next = NULL;
51628 - fl->fl_fasync = NULL;
51629 - fl->fl_owner = NULL;
51630 - fl->fl_pid = 0;
51631 - fl->fl_nspid = NULL;
51632 - fl->fl_file = NULL;
51633 - fl->fl_flags = 0;
51634 - fl->fl_type = 0;
51635 - fl->fl_start = fl->fl_end = 0;
51636 fl->fl_ops = NULL;
51637 fl->fl_lmops = NULL;
51638 + locks_init_lock_always(fl);
51639 }
51640
51641 EXPORT_SYMBOL(locks_init_lock);
51642 @@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *filp)
51643 return;
51644
51645 if (filp->f_op && filp->f_op->flock) {
51646 - struct file_lock fl = {
51647 + struct file_lock flock = {
51648 .fl_pid = current->tgid,
51649 .fl_file = filp,
51650 .fl_flags = FL_FLOCK,
51651 .fl_type = F_UNLCK,
51652 .fl_end = OFFSET_MAX,
51653 };
51654 - filp->f_op->flock(filp, F_SETLKW, &fl);
51655 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
51656 - fl.fl_ops->fl_release_private(&fl);
51657 + filp->f_op->flock(filp, F_SETLKW, &flock);
51658 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
51659 + flock.fl_ops->fl_release_private(&flock);
51660 }
51661
51662 lock_kernel();
51663 diff --git a/fs/mbcache.c b/fs/mbcache.c
51664 index ec88ff3..b843a82 100644
51665 --- a/fs/mbcache.c
51666 +++ b/fs/mbcache.c
51667 @@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct mb_cache_op *cache_op,
51668 if (!cache)
51669 goto fail;
51670 cache->c_name = name;
51671 - cache->c_op.free = NULL;
51672 + *(void **)&cache->c_op.free = NULL;
51673 if (cache_op)
51674 - cache->c_op.free = cache_op->free;
51675 + *(void **)&cache->c_op.free = cache_op->free;
51676 atomic_set(&cache->c_entry_count, 0);
51677 cache->c_bucket_bits = bucket_bits;
51678 #ifdef MB_CACHE_INDEXES_COUNT
51679 diff --git a/fs/namei.c b/fs/namei.c
51680 index b0afbd4..8d065a1 100644
51681 --- a/fs/namei.c
51682 +++ b/fs/namei.c
51683 @@ -224,6 +224,14 @@ int generic_permission(struct inode *inode, int mask,
51684 return ret;
51685
51686 /*
51687 + * Searching includes executable on directories, else just read.
51688 + */
51689 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
51690 + if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
51691 + if (capable(CAP_DAC_READ_SEARCH))
51692 + return 0;
51693 +
51694 + /*
51695 * Read/write DACs are always overridable.
51696 * Executable DACs are overridable if at least one exec bit is set.
51697 */
51698 @@ -231,14 +239,6 @@ int generic_permission(struct inode *inode, int mask,
51699 if (capable(CAP_DAC_OVERRIDE))
51700 return 0;
51701
51702 - /*
51703 - * Searching includes executable on directories, else just read.
51704 - */
51705 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
51706 - if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
51707 - if (capable(CAP_DAC_READ_SEARCH))
51708 - return 0;
51709 -
51710 return -EACCES;
51711 }
51712
51713 @@ -458,7 +458,8 @@ static int exec_permission_lite(struct inode *inode)
51714 if (!ret)
51715 goto ok;
51716
51717 - if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
51718 + if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
51719 + capable(CAP_DAC_OVERRIDE))
51720 goto ok;
51721
51722 return ret;
51723 @@ -638,7 +639,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
51724 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
51725 error = PTR_ERR(cookie);
51726 if (!IS_ERR(cookie)) {
51727 - char *s = nd_get_link(nd);
51728 + const char *s = nd_get_link(nd);
51729 error = 0;
51730 if (s)
51731 error = __vfs_follow_link(nd, s);
51732 @@ -669,6 +670,13 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
51733 err = security_inode_follow_link(path->dentry, nd);
51734 if (err)
51735 goto loop;
51736 +
51737 + if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
51738 + path->dentry->d_inode, path->dentry, nd->path.mnt)) {
51739 + err = -EACCES;
51740 + goto loop;
51741 + }
51742 +
51743 current->link_count++;
51744 current->total_link_count++;
51745 nd->depth++;
51746 @@ -1016,11 +1024,19 @@ return_reval:
51747 break;
51748 }
51749 return_base:
51750 + if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT)) &&
51751 + !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
51752 + path_put(&nd->path);
51753 + return -ENOENT;
51754 + }
51755 return 0;
51756 out_dput:
51757 path_put_conditional(&next, nd);
51758 break;
51759 }
51760 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
51761 + err = -ENOENT;
51762 +
51763 path_put(&nd->path);
51764 return_err:
51765 return err;
51766 @@ -1091,13 +1107,20 @@ static int do_path_lookup(int dfd, const char *name,
51767 int retval = path_init(dfd, name, flags, nd);
51768 if (!retval)
51769 retval = path_walk(name, nd);
51770 - if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
51771 - nd->path.dentry->d_inode))
51772 - audit_inode(name, nd->path.dentry);
51773 +
51774 + if (likely(!retval)) {
51775 + if (nd->path.dentry && nd->path.dentry->d_inode) {
51776 + if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
51777 + retval = -ENOENT;
51778 + if (!audit_dummy_context())
51779 + audit_inode(name, nd->path.dentry);
51780 + }
51781 + }
51782 if (nd->root.mnt) {
51783 path_put(&nd->root);
51784 nd->root.mnt = NULL;
51785 }
51786 +
51787 return retval;
51788 }
51789
51790 @@ -1576,6 +1599,20 @@ int may_open(struct path *path, int acc_mode, int flag)
51791 if (error)
51792 goto err_out;
51793
51794 +
51795 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
51796 + error = -EPERM;
51797 + goto err_out;
51798 + }
51799 + if (gr_handle_rawio(inode)) {
51800 + error = -EPERM;
51801 + goto err_out;
51802 + }
51803 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
51804 + error = -EACCES;
51805 + goto err_out;
51806 + }
51807 +
51808 if (flag & O_TRUNC) {
51809 error = get_write_access(inode);
51810 if (error)
51811 @@ -1620,6 +1657,17 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
51812 {
51813 int error;
51814 struct dentry *dir = nd->path.dentry;
51815 + int acc_mode = ACC_MODE(flag);
51816 +
51817 + if (flag & O_TRUNC)
51818 + acc_mode |= MAY_WRITE;
51819 + if (flag & O_APPEND)
51820 + acc_mode |= MAY_APPEND;
51821 +
51822 + if (!gr_acl_handle_creat(path->dentry, dir, nd->path.mnt, flag, acc_mode, mode)) {
51823 + error = -EACCES;
51824 + goto out_unlock;
51825 + }
51826
51827 if (!IS_POSIXACL(dir->d_inode))
51828 mode &= ~current_umask();
51829 @@ -1627,6 +1675,8 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
51830 if (error)
51831 goto out_unlock;
51832 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
51833 + if (!error)
51834 + gr_handle_create(path->dentry, nd->path.mnt);
51835 out_unlock:
51836 mutex_unlock(&dir->d_inode->i_mutex);
51837 dput(nd->path.dentry);
51838 @@ -1709,6 +1759,22 @@ struct file *do_filp_open(int dfd, const char *pathname,
51839 &nd, flag);
51840 if (error)
51841 return ERR_PTR(error);
51842 +
51843 + if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
51844 + error = -EPERM;
51845 + goto exit;
51846 + }
51847 +
51848 + if (gr_handle_rawio(nd.path.dentry->d_inode)) {
51849 + error = -EPERM;
51850 + goto exit;
51851 + }
51852 +
51853 + if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, acc_mode)) {
51854 + error = -EACCES;
51855 + goto exit;
51856 + }
51857 +
51858 goto ok;
51859 }
51860
51861 @@ -1795,6 +1861,19 @@ do_last:
51862 /*
51863 * It already exists.
51864 */
51865 +
51866 + if (!gr_acl_handle_hidden_file(path.dentry, path.mnt)) {
51867 + error = -ENOENT;
51868 + goto exit_mutex_unlock;
51869 + }
51870 +
51871 + /* only check if O_CREAT is specified, all other checks need
51872 + to go into may_open */
51873 + if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
51874 + error = -EACCES;
51875 + goto exit_mutex_unlock;
51876 + }
51877 +
51878 mutex_unlock(&dir->d_inode->i_mutex);
51879 audit_inode(pathname, path.dentry);
51880
51881 @@ -1887,6 +1966,13 @@ do_link:
51882 error = security_inode_follow_link(path.dentry, &nd);
51883 if (error)
51884 goto exit_dput;
51885 +
51886 + if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
51887 + path.dentry, nd.path.mnt)) {
51888 + error = -EACCES;
51889 + goto exit_dput;
51890 + }
51891 +
51892 error = __do_follow_link(&path, &nd);
51893 if (error) {
51894 /* Does someone understand code flow here? Or it is only
51895 @@ -1984,6 +2070,10 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir)
51896 }
51897 return dentry;
51898 eexist:
51899 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
51900 + dput(dentry);
51901 + return ERR_PTR(-ENOENT);
51902 + }
51903 dput(dentry);
51904 dentry = ERR_PTR(-EEXIST);
51905 fail:
51906 @@ -2061,6 +2151,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
51907 error = may_mknod(mode);
51908 if (error)
51909 goto out_dput;
51910 +
51911 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
51912 + error = -EPERM;
51913 + goto out_dput;
51914 + }
51915 +
51916 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
51917 + error = -EACCES;
51918 + goto out_dput;
51919 + }
51920 +
51921 error = mnt_want_write(nd.path.mnt);
51922 if (error)
51923 goto out_dput;
51924 @@ -2081,6 +2182,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
51925 }
51926 out_drop_write:
51927 mnt_drop_write(nd.path.mnt);
51928 +
51929 + if (!error)
51930 + gr_handle_create(dentry, nd.path.mnt);
51931 out_dput:
51932 dput(dentry);
51933 out_unlock:
51934 @@ -2134,6 +2238,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
51935 if (IS_ERR(dentry))
51936 goto out_unlock;
51937
51938 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
51939 + error = -EACCES;
51940 + goto out_dput;
51941 + }
51942 +
51943 if (!IS_POSIXACL(nd.path.dentry->d_inode))
51944 mode &= ~current_umask();
51945 error = mnt_want_write(nd.path.mnt);
51946 @@ -2145,6 +2254,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
51947 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
51948 out_drop_write:
51949 mnt_drop_write(nd.path.mnt);
51950 +
51951 + if (!error)
51952 + gr_handle_create(dentry, nd.path.mnt);
51953 +
51954 out_dput:
51955 dput(dentry);
51956 out_unlock:
51957 @@ -2226,6 +2339,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
51958 char * name;
51959 struct dentry *dentry;
51960 struct nameidata nd;
51961 + ino_t saved_ino = 0;
51962 + dev_t saved_dev = 0;
51963
51964 error = user_path_parent(dfd, pathname, &nd, &name);
51965 if (error)
51966 @@ -2250,6 +2365,17 @@ static long do_rmdir(int dfd, const char __user *pathname)
51967 error = PTR_ERR(dentry);
51968 if (IS_ERR(dentry))
51969 goto exit2;
51970 +
51971 + if (dentry->d_inode != NULL) {
51972 + saved_ino = dentry->d_inode->i_ino;
51973 + saved_dev = gr_get_dev_from_dentry(dentry);
51974 +
51975 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
51976 + error = -EACCES;
51977 + goto exit3;
51978 + }
51979 + }
51980 +
51981 error = mnt_want_write(nd.path.mnt);
51982 if (error)
51983 goto exit3;
51984 @@ -2257,6 +2383,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
51985 if (error)
51986 goto exit4;
51987 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
51988 + if (!error && (saved_dev || saved_ino))
51989 + gr_handle_delete(saved_ino, saved_dev);
51990 exit4:
51991 mnt_drop_write(nd.path.mnt);
51992 exit3:
51993 @@ -2318,6 +2446,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51994 struct dentry *dentry;
51995 struct nameidata nd;
51996 struct inode *inode = NULL;
51997 + ino_t saved_ino = 0;
51998 + dev_t saved_dev = 0;
51999
52000 error = user_path_parent(dfd, pathname, &nd, &name);
52001 if (error)
52002 @@ -2337,8 +2467,19 @@ static long do_unlinkat(int dfd, const char __user *pathname)
52003 if (nd.last.name[nd.last.len])
52004 goto slashes;
52005 inode = dentry->d_inode;
52006 - if (inode)
52007 + if (inode) {
52008 + if (inode->i_nlink <= 1) {
52009 + saved_ino = inode->i_ino;
52010 + saved_dev = gr_get_dev_from_dentry(dentry);
52011 + }
52012 +
52013 atomic_inc(&inode->i_count);
52014 +
52015 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
52016 + error = -EACCES;
52017 + goto exit2;
52018 + }
52019 + }
52020 error = mnt_want_write(nd.path.mnt);
52021 if (error)
52022 goto exit2;
52023 @@ -2346,6 +2487,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
52024 if (error)
52025 goto exit3;
52026 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
52027 + if (!error && (saved_ino || saved_dev))
52028 + gr_handle_delete(saved_ino, saved_dev);
52029 exit3:
52030 mnt_drop_write(nd.path.mnt);
52031 exit2:
52032 @@ -2424,6 +2567,11 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
52033 if (IS_ERR(dentry))
52034 goto out_unlock;
52035
52036 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
52037 + error = -EACCES;
52038 + goto out_dput;
52039 + }
52040 +
52041 error = mnt_want_write(nd.path.mnt);
52042 if (error)
52043 goto out_dput;
52044 @@ -2431,6 +2579,8 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
52045 if (error)
52046 goto out_drop_write;
52047 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
52048 + if (!error)
52049 + gr_handle_create(dentry, nd.path.mnt);
52050 out_drop_write:
52051 mnt_drop_write(nd.path.mnt);
52052 out_dput:
52053 @@ -2524,6 +2674,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
52054 error = PTR_ERR(new_dentry);
52055 if (IS_ERR(new_dentry))
52056 goto out_unlock;
52057 +
52058 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
52059 + old_path.dentry->d_inode,
52060 + old_path.dentry->d_inode->i_mode, to)) {
52061 + error = -EACCES;
52062 + goto out_dput;
52063 + }
52064 +
52065 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
52066 + old_path.dentry, old_path.mnt, to)) {
52067 + error = -EACCES;
52068 + goto out_dput;
52069 + }
52070 +
52071 error = mnt_want_write(nd.path.mnt);
52072 if (error)
52073 goto out_dput;
52074 @@ -2531,6 +2695,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
52075 if (error)
52076 goto out_drop_write;
52077 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
52078 + if (!error)
52079 + gr_handle_create(new_dentry, nd.path.mnt);
52080 out_drop_write:
52081 mnt_drop_write(nd.path.mnt);
52082 out_dput:
52083 @@ -2708,6 +2874,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
52084 char *to;
52085 int error;
52086
52087 + pax_track_stack();
52088 +
52089 error = user_path_parent(olddfd, oldname, &oldnd, &from);
52090 if (error)
52091 goto exit;
52092 @@ -2764,6 +2932,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
52093 if (new_dentry == trap)
52094 goto exit5;
52095
52096 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
52097 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
52098 + to);
52099 + if (error)
52100 + goto exit5;
52101 +
52102 error = mnt_want_write(oldnd.path.mnt);
52103 if (error)
52104 goto exit5;
52105 @@ -2773,6 +2947,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
52106 goto exit6;
52107 error = vfs_rename(old_dir->d_inode, old_dentry,
52108 new_dir->d_inode, new_dentry);
52109 + if (!error)
52110 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
52111 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
52112 exit6:
52113 mnt_drop_write(oldnd.path.mnt);
52114 exit5:
52115 @@ -2798,6 +2975,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
52116
52117 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
52118 {
52119 + char tmpbuf[64];
52120 + const char *newlink;
52121 int len;
52122
52123 len = PTR_ERR(link);
52124 @@ -2807,7 +2986,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
52125 len = strlen(link);
52126 if (len > (unsigned) buflen)
52127 len = buflen;
52128 - if (copy_to_user(buffer, link, len))
52129 +
52130 + if (len < sizeof(tmpbuf)) {
52131 + memcpy(tmpbuf, link, len);
52132 + newlink = tmpbuf;
52133 + } else
52134 + newlink = link;
52135 +
52136 + if (copy_to_user(buffer, newlink, len))
52137 len = -EFAULT;
52138 out:
52139 return len;
52140 diff --git a/fs/namespace.c b/fs/namespace.c
52141 index 2beb0fb..11a95a5 100644
52142 --- a/fs/namespace.c
52143 +++ b/fs/namespace.c
52144 @@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
52145 if (!(sb->s_flags & MS_RDONLY))
52146 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
52147 up_write(&sb->s_umount);
52148 +
52149 + gr_log_remount(mnt->mnt_devname, retval);
52150 +
52151 return retval;
52152 }
52153
52154 @@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
52155 security_sb_umount_busy(mnt);
52156 up_write(&namespace_sem);
52157 release_mounts(&umount_list);
52158 +
52159 + gr_log_unmount(mnt->mnt_devname, retval);
52160 +
52161 return retval;
52162 }
52163
52164 @@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
52165 if (retval)
52166 goto dput_out;
52167
52168 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
52169 + retval = -EPERM;
52170 + goto dput_out;
52171 + }
52172 +
52173 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
52174 + retval = -EPERM;
52175 + goto dput_out;
52176 + }
52177 +
52178 if (flags & MS_REMOUNT)
52179 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
52180 data_page);
52181 @@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
52182 dev_name, data_page);
52183 dput_out:
52184 path_put(&path);
52185 +
52186 + gr_log_mount(dev_name, dir_name, retval);
52187 +
52188 return retval;
52189 }
52190
52191 @@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
52192 goto out1;
52193 }
52194
52195 + if (gr_handle_chroot_pivot()) {
52196 + error = -EPERM;
52197 + path_put(&old);
52198 + goto out1;
52199 + }
52200 +
52201 read_lock(&current->fs->lock);
52202 root = current->fs->root;
52203 path_get(&current->fs->root);
52204 diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
52205 index b8b5b30..2bd9ccb 100644
52206 --- a/fs/ncpfs/dir.c
52207 +++ b/fs/ncpfs/dir.c
52208 @@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *dentry)
52209 int res, val = 0, len;
52210 __u8 __name[NCP_MAXPATHLEN + 1];
52211
52212 + pax_track_stack();
52213 +
52214 parent = dget_parent(dentry);
52215 dir = parent->d_inode;
52216
52217 @@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
52218 int error, res, len;
52219 __u8 __name[NCP_MAXPATHLEN + 1];
52220
52221 + pax_track_stack();
52222 +
52223 lock_kernel();
52224 error = -EIO;
52225 if (!ncp_conn_valid(server))
52226 @@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
52227 int error, result, len;
52228 int opmode;
52229 __u8 __name[NCP_MAXPATHLEN + 1];
52230 -
52231 +
52232 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
52233 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
52234
52235 + pax_track_stack();
52236 +
52237 error = -EIO;
52238 lock_kernel();
52239 if (!ncp_conn_valid(server))
52240 @@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
52241 int error, len;
52242 __u8 __name[NCP_MAXPATHLEN + 1];
52243
52244 + pax_track_stack();
52245 +
52246 DPRINTK("ncp_mkdir: making %s/%s\n",
52247 dentry->d_parent->d_name.name, dentry->d_name.name);
52248
52249 @@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
52250 if (!ncp_conn_valid(server))
52251 goto out;
52252
52253 + pax_track_stack();
52254 +
52255 ncp_age_dentry(server, dentry);
52256 len = sizeof(__name);
52257 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
52258 @@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
52259 int old_len, new_len;
52260 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
52261
52262 + pax_track_stack();
52263 +
52264 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
52265 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
52266 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
52267 diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
52268 index cf98da1..da890a9 100644
52269 --- a/fs/ncpfs/inode.c
52270 +++ b/fs/ncpfs/inode.c
52271 @@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
52272 #endif
52273 struct ncp_entry_info finfo;
52274
52275 + pax_track_stack();
52276 +
52277 data.wdog_pid = NULL;
52278 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
52279 if (!server)
52280 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
52281 index bfaef7b..e9d03ca 100644
52282 --- a/fs/nfs/inode.c
52283 +++ b/fs/nfs/inode.c
52284 @@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
52285 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
52286 nfsi->attrtimeo_timestamp = jiffies;
52287
52288 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
52289 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
52290 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
52291 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
52292 else
52293 @@ -973,16 +973,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
52294 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
52295 }
52296
52297 -static atomic_long_t nfs_attr_generation_counter;
52298 +static atomic_long_unchecked_t nfs_attr_generation_counter;
52299
52300 static unsigned long nfs_read_attr_generation_counter(void)
52301 {
52302 - return atomic_long_read(&nfs_attr_generation_counter);
52303 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
52304 }
52305
52306 unsigned long nfs_inc_attr_generation_counter(void)
52307 {
52308 - return atomic_long_inc_return(&nfs_attr_generation_counter);
52309 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
52310 }
52311
52312 void nfs_fattr_init(struct nfs_fattr *fattr)
52313 diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
52314 index cc2f505..f6a236f 100644
52315 --- a/fs/nfsd/lockd.c
52316 +++ b/fs/nfsd/lockd.c
52317 @@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
52318 fput(filp);
52319 }
52320
52321 -static struct nlmsvc_binding nfsd_nlm_ops = {
52322 +static const struct nlmsvc_binding nfsd_nlm_ops = {
52323 .fopen = nlm_fopen, /* open file for locking */
52324 .fclose = nlm_fclose, /* close file */
52325 };
52326 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
52327 index cfc3391..dcc083a 100644
52328 --- a/fs/nfsd/nfs4state.c
52329 +++ b/fs/nfsd/nfs4state.c
52330 @@ -3459,6 +3459,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
52331 unsigned int cmd;
52332 int err;
52333
52334 + pax_track_stack();
52335 +
52336 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
52337 (long long) lock->lk_offset,
52338 (long long) lock->lk_length);
52339 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
52340 index 4a82a96..0d5fb49 100644
52341 --- a/fs/nfsd/nfs4xdr.c
52342 +++ b/fs/nfsd/nfs4xdr.c
52343 @@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
52344 struct nfsd4_compoundres *resp = rqstp->rq_resp;
52345 u32 minorversion = resp->cstate.minorversion;
52346
52347 + pax_track_stack();
52348 +
52349 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
52350 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
52351 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
52352 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
52353 index 2e09588..596421d 100644
52354 --- a/fs/nfsd/vfs.c
52355 +++ b/fs/nfsd/vfs.c
52356 @@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
52357 } else {
52358 oldfs = get_fs();
52359 set_fs(KERNEL_DS);
52360 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
52361 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
52362 set_fs(oldfs);
52363 }
52364
52365 @@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
52366
52367 /* Write the data. */
52368 oldfs = get_fs(); set_fs(KERNEL_DS);
52369 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
52370 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
52371 set_fs(oldfs);
52372 if (host_err < 0)
52373 goto out_nfserr;
52374 @@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
52375 */
52376
52377 oldfs = get_fs(); set_fs(KERNEL_DS);
52378 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
52379 + host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
52380 set_fs(oldfs);
52381
52382 if (host_err < 0)
52383 diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
52384 index f6af760..d0adf34 100644
52385 --- a/fs/nilfs2/ioctl.c
52386 +++ b/fs/nilfs2/ioctl.c
52387 @@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
52388 unsigned int cmd, void __user *argp)
52389 {
52390 struct nilfs_argv argv[5];
52391 - const static size_t argsz[5] = {
52392 + static const size_t argsz[5] = {
52393 sizeof(struct nilfs_vdesc),
52394 sizeof(struct nilfs_period),
52395 sizeof(__u64),
52396 @@ -522,6 +522,9 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
52397 if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
52398 goto out_free;
52399
52400 + if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
52401 + goto out_free;
52402 +
52403 len = argv[n].v_size * argv[n].v_nmembs;
52404 base = (void __user *)(unsigned long)argv[n].v_base;
52405 if (len == 0) {
52406 diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
52407 index 7e54e52..9337248 100644
52408 --- a/fs/notify/dnotify/dnotify.c
52409 +++ b/fs/notify/dnotify/dnotify.c
52410 @@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsnotify_mark_entry *entry)
52411 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
52412 }
52413
52414 -static struct fsnotify_ops dnotify_fsnotify_ops = {
52415 +static const struct fsnotify_ops dnotify_fsnotify_ops = {
52416 .handle_event = dnotify_handle_event,
52417 .should_send_event = dnotify_should_send_event,
52418 .free_group_priv = NULL,
52419 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
52420 index b8bf53b..c518688 100644
52421 --- a/fs/notify/notification.c
52422 +++ b/fs/notify/notification.c
52423 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
52424 * get set to 0 so it will never get 'freed'
52425 */
52426 static struct fsnotify_event q_overflow_event;
52427 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52428 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52429
52430 /**
52431 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
52432 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52433 */
52434 u32 fsnotify_get_cookie(void)
52435 {
52436 - return atomic_inc_return(&fsnotify_sync_cookie);
52437 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
52438 }
52439 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
52440
52441 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
52442 index 5a9e344..0f8cd28 100644
52443 --- a/fs/ntfs/dir.c
52444 +++ b/fs/ntfs/dir.c
52445 @@ -1328,7 +1328,7 @@ find_next_index_buffer:
52446 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
52447 ~(s64)(ndir->itype.index.block_size - 1)));
52448 /* Bounds checks. */
52449 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
52450 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
52451 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
52452 "inode 0x%lx or driver bug.", vdir->i_ino);
52453 goto err_out;
52454 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
52455 index 663c0e3..b6868e9 100644
52456 --- a/fs/ntfs/file.c
52457 +++ b/fs/ntfs/file.c
52458 @@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_inode_ops = {
52459 #endif /* NTFS_RW */
52460 };
52461
52462 -const struct file_operations ntfs_empty_file_ops = {};
52463 +const struct file_operations ntfs_empty_file_ops __read_only;
52464
52465 -const struct inode_operations ntfs_empty_inode_ops = {};
52466 +const struct inode_operations ntfs_empty_inode_ops __read_only;
52467 diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
52468 index 1cd2934..880b5d2 100644
52469 --- a/fs/ocfs2/cluster/masklog.c
52470 +++ b/fs/ocfs2/cluster/masklog.c
52471 @@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
52472 return mlog_mask_store(mlog_attr->mask, buf, count);
52473 }
52474
52475 -static struct sysfs_ops mlog_attr_ops = {
52476 +static const struct sysfs_ops mlog_attr_ops = {
52477 .show = mlog_show,
52478 .store = mlog_store,
52479 };
52480 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
52481 index ac10f83..2cd2607 100644
52482 --- a/fs/ocfs2/localalloc.c
52483 +++ b/fs/ocfs2/localalloc.c
52484 @@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
52485 goto bail;
52486 }
52487
52488 - atomic_inc(&osb->alloc_stats.moves);
52489 + atomic_inc_unchecked(&osb->alloc_stats.moves);
52490
52491 status = 0;
52492 bail:
52493 diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
52494 index f010b22..9f9ed34 100644
52495 --- a/fs/ocfs2/namei.c
52496 +++ b/fs/ocfs2/namei.c
52497 @@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *old_dir,
52498 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
52499 struct ocfs2_dir_lookup_result target_insert = { NULL, };
52500
52501 + pax_track_stack();
52502 +
52503 /* At some point it might be nice to break this function up a
52504 * bit. */
52505
52506 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
52507 index d963d86..914cfbd 100644
52508 --- a/fs/ocfs2/ocfs2.h
52509 +++ b/fs/ocfs2/ocfs2.h
52510 @@ -217,11 +217,11 @@ enum ocfs2_vol_state
52511
52512 struct ocfs2_alloc_stats
52513 {
52514 - atomic_t moves;
52515 - atomic_t local_data;
52516 - atomic_t bitmap_data;
52517 - atomic_t bg_allocs;
52518 - atomic_t bg_extends;
52519 + atomic_unchecked_t moves;
52520 + atomic_unchecked_t local_data;
52521 + atomic_unchecked_t bitmap_data;
52522 + atomic_unchecked_t bg_allocs;
52523 + atomic_unchecked_t bg_extends;
52524 };
52525
52526 enum ocfs2_local_alloc_state
52527 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
52528 index 79b5dac..d322952 100644
52529 --- a/fs/ocfs2/suballoc.c
52530 +++ b/fs/ocfs2/suballoc.c
52531 @@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
52532 mlog_errno(status);
52533 goto bail;
52534 }
52535 - atomic_inc(&osb->alloc_stats.bg_extends);
52536 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
52537
52538 /* You should never ask for this much metadata */
52539 BUG_ON(bits_wanted >
52540 @@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_super *osb,
52541 mlog_errno(status);
52542 goto bail;
52543 }
52544 - atomic_inc(&osb->alloc_stats.bg_allocs);
52545 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
52546
52547 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
52548 ac->ac_bits_given += (*num_bits);
52549 @@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_super *osb,
52550 mlog_errno(status);
52551 goto bail;
52552 }
52553 - atomic_inc(&osb->alloc_stats.bg_allocs);
52554 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
52555
52556 BUG_ON(num_bits != 1);
52557
52558 @@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
52559 cluster_start,
52560 num_clusters);
52561 if (!status)
52562 - atomic_inc(&osb->alloc_stats.local_data);
52563 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
52564 } else {
52565 if (min_clusters > (osb->bitmap_cpg - 1)) {
52566 /* The only paths asking for contiguousness
52567 @@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
52568 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
52569 bg_blkno,
52570 bg_bit_off);
52571 - atomic_inc(&osb->alloc_stats.bitmap_data);
52572 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
52573 }
52574 }
52575 if (status < 0) {
52576 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
52577 index 9f55be4..a3f8048 100644
52578 --- a/fs/ocfs2/super.c
52579 +++ b/fs/ocfs2/super.c
52580 @@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
52581 "%10s => GlobalAllocs: %d LocalAllocs: %d "
52582 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
52583 "Stats",
52584 - atomic_read(&osb->alloc_stats.bitmap_data),
52585 - atomic_read(&osb->alloc_stats.local_data),
52586 - atomic_read(&osb->alloc_stats.bg_allocs),
52587 - atomic_read(&osb->alloc_stats.moves),
52588 - atomic_read(&osb->alloc_stats.bg_extends));
52589 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
52590 + atomic_read_unchecked(&osb->alloc_stats.local_data),
52591 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
52592 + atomic_read_unchecked(&osb->alloc_stats.moves),
52593 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
52594
52595 out += snprintf(buf + out, len - out,
52596 "%10s => State: %u Descriptor: %llu Size: %u bits "
52597 @@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
52598 spin_lock_init(&osb->osb_xattr_lock);
52599 ocfs2_init_inode_steal_slot(osb);
52600
52601 - atomic_set(&osb->alloc_stats.moves, 0);
52602 - atomic_set(&osb->alloc_stats.local_data, 0);
52603 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
52604 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
52605 - atomic_set(&osb->alloc_stats.bg_extends, 0);
52606 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
52607 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
52608 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
52609 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
52610 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
52611
52612 /* Copy the blockcheck stats from the superblock probe */
52613 osb->osb_ecc_stats = *stats;
52614 diff --git a/fs/open.c b/fs/open.c
52615 index 4f01e06..2a8057a 100644
52616 --- a/fs/open.c
52617 +++ b/fs/open.c
52618 @@ -275,6 +275,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
52619 error = locks_verify_truncate(inode, NULL, length);
52620 if (!error)
52621 error = security_path_truncate(&path, length, 0);
52622 +
52623 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
52624 + error = -EACCES;
52625 +
52626 if (!error) {
52627 vfs_dq_init(inode);
52628 error = do_truncate(path.dentry, length, 0, NULL);
52629 @@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
52630 if (__mnt_is_readonly(path.mnt))
52631 res = -EROFS;
52632
52633 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
52634 + res = -EACCES;
52635 +
52636 out_path_release:
52637 path_put(&path);
52638 out:
52639 @@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
52640 if (error)
52641 goto dput_and_out;
52642
52643 + gr_log_chdir(path.dentry, path.mnt);
52644 +
52645 set_fs_pwd(current->fs, &path);
52646
52647 dput_and_out:
52648 @@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
52649 goto out_putf;
52650
52651 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
52652 +
52653 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
52654 + error = -EPERM;
52655 +
52656 + if (!error)
52657 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
52658 +
52659 if (!error)
52660 set_fs_pwd(current->fs, &file->f_path);
52661 out_putf:
52662 @@ -588,7 +604,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
52663 if (!capable(CAP_SYS_CHROOT))
52664 goto dput_and_out;
52665
52666 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
52667 + goto dput_and_out;
52668 +
52669 set_fs_root(current->fs, &path);
52670 +
52671 + gr_handle_chroot_chdir(&path);
52672 +
52673 error = 0;
52674 dput_and_out:
52675 path_put(&path);
52676 @@ -596,66 +618,57 @@ out:
52677 return error;
52678 }
52679
52680 +static int chmod_common(struct path *path, umode_t mode)
52681 +{
52682 + struct inode *inode = path->dentry->d_inode;
52683 + struct iattr newattrs;
52684 + int error;
52685 +
52686 + error = mnt_want_write(path->mnt);
52687 + if (error)
52688 + return error;
52689 + mutex_lock(&inode->i_mutex);
52690 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
52691 + error = -EACCES;
52692 + goto out_unlock;
52693 + }
52694 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
52695 + error = -EPERM;
52696 + goto out_unlock;
52697 + }
52698 + newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52699 + newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52700 + error = notify_change(path->dentry, &newattrs);
52701 +out_unlock:
52702 + mutex_unlock(&inode->i_mutex);
52703 + mnt_drop_write(path->mnt);
52704 + return error;
52705 +}
52706 +
52707 SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
52708 {
52709 - struct inode * inode;
52710 - struct dentry * dentry;
52711 struct file * file;
52712 int err = -EBADF;
52713 - struct iattr newattrs;
52714
52715 file = fget(fd);
52716 - if (!file)
52717 - goto out;
52718 -
52719 - dentry = file->f_path.dentry;
52720 - inode = dentry->d_inode;
52721 -
52722 - audit_inode(NULL, dentry);
52723 -
52724 - err = mnt_want_write_file(file);
52725 - if (err)
52726 - goto out_putf;
52727 - mutex_lock(&inode->i_mutex);
52728 - if (mode == (mode_t) -1)
52729 - mode = inode->i_mode;
52730 - newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52731 - newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52732 - err = notify_change(dentry, &newattrs);
52733 - mutex_unlock(&inode->i_mutex);
52734 - mnt_drop_write(file->f_path.mnt);
52735 -out_putf:
52736 - fput(file);
52737 -out:
52738 + if (file) {
52739 + audit_inode(NULL, file->f_path.dentry);
52740 + err = chmod_common(&file->f_path, mode);
52741 + fput(file);
52742 + }
52743 return err;
52744 }
52745
52746 SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
52747 {
52748 struct path path;
52749 - struct inode *inode;
52750 int error;
52751 - struct iattr newattrs;
52752
52753 error = user_path_at(dfd, filename, LOOKUP_FOLLOW, &path);
52754 - if (error)
52755 - goto out;
52756 - inode = path.dentry->d_inode;
52757 -
52758 - error = mnt_want_write(path.mnt);
52759 - if (error)
52760 - goto dput_and_out;
52761 - mutex_lock(&inode->i_mutex);
52762 - if (mode == (mode_t) -1)
52763 - mode = inode->i_mode;
52764 - newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52765 - newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52766 - error = notify_change(path.dentry, &newattrs);
52767 - mutex_unlock(&inode->i_mutex);
52768 - mnt_drop_write(path.mnt);
52769 -dput_and_out:
52770 - path_put(&path);
52771 -out:
52772 + if (!error) {
52773 + error = chmod_common(&path, mode);
52774 + path_put(&path);
52775 + }
52776 return error;
52777 }
52778
52779 @@ -664,12 +677,15 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
52780 return sys_fchmodat(AT_FDCWD, filename, mode);
52781 }
52782
52783 -static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
52784 +static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
52785 {
52786 struct inode *inode = dentry->d_inode;
52787 int error;
52788 struct iattr newattrs;
52789
52790 + if (!gr_acl_handle_chown(dentry, mnt))
52791 + return -EACCES;
52792 +
52793 newattrs.ia_valid = ATTR_CTIME;
52794 if (user != (uid_t) -1) {
52795 newattrs.ia_valid |= ATTR_UID;
52796 @@ -700,7 +716,7 @@ SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
52797 error = mnt_want_write(path.mnt);
52798 if (error)
52799 goto out_release;
52800 - error = chown_common(path.dentry, user, group);
52801 + error = chown_common(path.dentry, user, group, path.mnt);
52802 mnt_drop_write(path.mnt);
52803 out_release:
52804 path_put(&path);
52805 @@ -725,7 +741,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
52806 error = mnt_want_write(path.mnt);
52807 if (error)
52808 goto out_release;
52809 - error = chown_common(path.dentry, user, group);
52810 + error = chown_common(path.dentry, user, group, path.mnt);
52811 mnt_drop_write(path.mnt);
52812 out_release:
52813 path_put(&path);
52814 @@ -744,7 +760,7 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group
52815 error = mnt_want_write(path.mnt);
52816 if (error)
52817 goto out_release;
52818 - error = chown_common(path.dentry, user, group);
52819 + error = chown_common(path.dentry, user, group, path.mnt);
52820 mnt_drop_write(path.mnt);
52821 out_release:
52822 path_put(&path);
52823 @@ -767,7 +783,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
52824 goto out_fput;
52825 dentry = file->f_path.dentry;
52826 audit_inode(NULL, dentry);
52827 - error = chown_common(dentry, user, group);
52828 + error = chown_common(dentry, user, group, file->f_path.mnt);
52829 mnt_drop_write(file->f_path.mnt);
52830 out_fput:
52831 fput(file);
52832 @@ -1036,7 +1052,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
52833 if (!IS_ERR(tmp)) {
52834 fd = get_unused_fd_flags(flags);
52835 if (fd >= 0) {
52836 - struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
52837 + struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
52838 if (IS_ERR(f)) {
52839 put_unused_fd(fd);
52840 fd = PTR_ERR(f);
52841 diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
52842 index 6ab70f4..f4103d1 100644
52843 --- a/fs/partitions/efi.c
52844 +++ b/fs/partitions/efi.c
52845 @@ -231,14 +231,14 @@ alloc_read_gpt_entries(struct block_device *bdev, gpt_header *gpt)
52846 if (!bdev || !gpt)
52847 return NULL;
52848
52849 + if (!le32_to_cpu(gpt->num_partition_entries))
52850 + return NULL;
52851 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
52852 + if (!pte)
52853 + return NULL;
52854 +
52855 count = le32_to_cpu(gpt->num_partition_entries) *
52856 le32_to_cpu(gpt->sizeof_partition_entry);
52857 - if (!count)
52858 - return NULL;
52859 - pte = kzalloc(count, GFP_KERNEL);
52860 - if (!pte)
52861 - return NULL;
52862 -
52863 if (read_lba(bdev, le64_to_cpu(gpt->partition_entry_lba),
52864 (u8 *) pte,
52865 count) < count) {
52866 diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
52867 index dd6efdb..3babc6c 100644
52868 --- a/fs/partitions/ldm.c
52869 +++ b/fs/partitions/ldm.c
52870 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
52871 ldm_error ("A VBLK claims to have %d parts.", num);
52872 return false;
52873 }
52874 +
52875 if (rec >= num) {
52876 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
52877 return false;
52878 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
52879 goto found;
52880 }
52881
52882 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
52883 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
52884 if (!f) {
52885 ldm_crit ("Out of memory.");
52886 return false;
52887 diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
52888 index 5765198..7f8e9e0 100644
52889 --- a/fs/partitions/mac.c
52890 +++ b/fs/partitions/mac.c
52891 @@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitions *state, struct block_device *bdev)
52892 return 0; /* not a MacOS disk */
52893 }
52894 blocks_in_map = be32_to_cpu(part->map_count);
52895 - if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
52896 - put_dev_sector(sect);
52897 - return 0;
52898 - }
52899 printk(" [mac]");
52900 + if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
52901 + put_dev_sector(sect);
52902 + return 0;
52903 + }
52904 for (slot = 1; slot <= blocks_in_map; ++slot) {
52905 int pos = slot * secsize;
52906 put_dev_sector(sect);
52907 diff --git a/fs/pipe.c b/fs/pipe.c
52908 index d0cc080..8a6f211 100644
52909 --- a/fs/pipe.c
52910 +++ b/fs/pipe.c
52911 @@ -401,9 +401,9 @@ redo:
52912 }
52913 if (bufs) /* More to do? */
52914 continue;
52915 - if (!pipe->writers)
52916 + if (!atomic_read(&pipe->writers))
52917 break;
52918 - if (!pipe->waiting_writers) {
52919 + if (!atomic_read(&pipe->waiting_writers)) {
52920 /* syscall merging: Usually we must not sleep
52921 * if O_NONBLOCK is set, or if we got some data.
52922 * But if a writer sleeps in kernel space, then
52923 @@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
52924 mutex_lock(&inode->i_mutex);
52925 pipe = inode->i_pipe;
52926
52927 - if (!pipe->readers) {
52928 + if (!atomic_read(&pipe->readers)) {
52929 send_sig(SIGPIPE, current, 0);
52930 ret = -EPIPE;
52931 goto out;
52932 @@ -511,7 +511,7 @@ redo1:
52933 for (;;) {
52934 int bufs;
52935
52936 - if (!pipe->readers) {
52937 + if (!atomic_read(&pipe->readers)) {
52938 send_sig(SIGPIPE, current, 0);
52939 if (!ret)
52940 ret = -EPIPE;
52941 @@ -597,9 +597,9 @@ redo2:
52942 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
52943 do_wakeup = 0;
52944 }
52945 - pipe->waiting_writers++;
52946 + atomic_inc(&pipe->waiting_writers);
52947 pipe_wait(pipe);
52948 - pipe->waiting_writers--;
52949 + atomic_dec(&pipe->waiting_writers);
52950 }
52951 out:
52952 mutex_unlock(&inode->i_mutex);
52953 @@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table *wait)
52954 mask = 0;
52955 if (filp->f_mode & FMODE_READ) {
52956 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
52957 - if (!pipe->writers && filp->f_version != pipe->w_counter)
52958 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
52959 mask |= POLLHUP;
52960 }
52961
52962 @@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table *wait)
52963 * Most Unices do not set POLLERR for FIFOs but on Linux they
52964 * behave exactly like pipes for poll().
52965 */
52966 - if (!pipe->readers)
52967 + if (!atomic_read(&pipe->readers))
52968 mask |= POLLERR;
52969 }
52970
52971 @@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int decr, int decw)
52972
52973 mutex_lock(&inode->i_mutex);
52974 pipe = inode->i_pipe;
52975 - pipe->readers -= decr;
52976 - pipe->writers -= decw;
52977 + atomic_sub(decr, &pipe->readers);
52978 + atomic_sub(decw, &pipe->writers);
52979
52980 - if (!pipe->readers && !pipe->writers) {
52981 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
52982 free_pipe_info(inode);
52983 } else {
52984 wake_up_interruptible_sync(&pipe->wait);
52985 @@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
52986
52987 if (inode->i_pipe) {
52988 ret = 0;
52989 - inode->i_pipe->readers++;
52990 + atomic_inc(&inode->i_pipe->readers);
52991 }
52992
52993 mutex_unlock(&inode->i_mutex);
52994 @@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
52995
52996 if (inode->i_pipe) {
52997 ret = 0;
52998 - inode->i_pipe->writers++;
52999 + atomic_inc(&inode->i_pipe->writers);
53000 }
53001
53002 mutex_unlock(&inode->i_mutex);
53003 @@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
53004 if (inode->i_pipe) {
53005 ret = 0;
53006 if (filp->f_mode & FMODE_READ)
53007 - inode->i_pipe->readers++;
53008 + atomic_inc(&inode->i_pipe->readers);
53009 if (filp->f_mode & FMODE_WRITE)
53010 - inode->i_pipe->writers++;
53011 + atomic_inc(&inode->i_pipe->writers);
53012 }
53013
53014 mutex_unlock(&inode->i_mutex);
53015 @@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
53016 inode->i_pipe = NULL;
53017 }
53018
53019 -static struct vfsmount *pipe_mnt __read_mostly;
53020 +struct vfsmount *pipe_mnt __read_mostly;
53021 static int pipefs_delete_dentry(struct dentry *dentry)
53022 {
53023 /*
53024 @@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(void)
53025 goto fail_iput;
53026 inode->i_pipe = pipe;
53027
53028 - pipe->readers = pipe->writers = 1;
53029 + atomic_set(&pipe->readers, 1);
53030 + atomic_set(&pipe->writers, 1);
53031 inode->i_fop = &rdwr_pipefifo_fops;
53032
53033 /*
53034 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
53035 index 50f8f06..c5755df 100644
53036 --- a/fs/proc/Kconfig
53037 +++ b/fs/proc/Kconfig
53038 @@ -30,12 +30,12 @@ config PROC_FS
53039
53040 config PROC_KCORE
53041 bool "/proc/kcore support" if !ARM
53042 - depends on PROC_FS && MMU
53043 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
53044
53045 config PROC_VMCORE
53046 bool "/proc/vmcore support (EXPERIMENTAL)"
53047 - depends on PROC_FS && CRASH_DUMP
53048 - default y
53049 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
53050 + default n
53051 help
53052 Exports the dump image of crashed kernel in ELF format.
53053
53054 @@ -59,8 +59,8 @@ config PROC_SYSCTL
53055 limited in memory.
53056
53057 config PROC_PAGE_MONITOR
53058 - default y
53059 - depends on PROC_FS && MMU
53060 + default n
53061 + depends on PROC_FS && MMU && !GRKERNSEC
53062 bool "Enable /proc page monitoring" if EMBEDDED
53063 help
53064 Various /proc files exist to monitor process memory utilization:
53065 diff --git a/fs/proc/array.c b/fs/proc/array.c
53066 index c5ef152..24a1b87 100644
53067 --- a/fs/proc/array.c
53068 +++ b/fs/proc/array.c
53069 @@ -60,6 +60,7 @@
53070 #include <linux/tty.h>
53071 #include <linux/string.h>
53072 #include <linux/mman.h>
53073 +#include <linux/grsecurity.h>
53074 #include <linux/proc_fs.h>
53075 #include <linux/ioport.h>
53076 #include <linux/uaccess.h>
53077 @@ -321,6 +322,21 @@ static inline void task_context_switch_counts(struct seq_file *m,
53078 p->nivcsw);
53079 }
53080
53081 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53082 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
53083 +{
53084 + if (p->mm)
53085 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
53086 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
53087 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
53088 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
53089 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
53090 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
53091 + else
53092 + seq_printf(m, "PaX:\t-----\n");
53093 +}
53094 +#endif
53095 +
53096 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53097 struct pid *pid, struct task_struct *task)
53098 {
53099 @@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53100 task_cap(m, task);
53101 cpuset_task_status_allowed(m, task);
53102 task_context_switch_counts(m, task);
53103 +
53104 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53105 + task_pax(m, task);
53106 +#endif
53107 +
53108 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
53109 + task_grsec_rbac(m, task);
53110 +#endif
53111 +
53112 return 0;
53113 }
53114
53115 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53116 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
53117 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
53118 + _mm->pax_flags & MF_PAX_SEGMEXEC))
53119 +#endif
53120 +
53121 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53122 struct pid *pid, struct task_struct *task, int whole)
53123 {
53124 @@ -358,9 +389,18 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53125 cputime_t cutime, cstime, utime, stime;
53126 cputime_t cgtime, gtime;
53127 unsigned long rsslim = 0;
53128 - char tcomm[sizeof(task->comm)];
53129 + char tcomm[sizeof(task->comm)] = { 0 };
53130 unsigned long flags;
53131
53132 + pax_track_stack();
53133 +
53134 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53135 + if (current->exec_id != m->exec_id) {
53136 + gr_log_badprocpid("stat");
53137 + return 0;
53138 + }
53139 +#endif
53140 +
53141 state = *get_task_state(task);
53142 vsize = eip = esp = 0;
53143 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
53144 @@ -433,6 +473,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53145 gtime = task_gtime(task);
53146 }
53147
53148 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53149 + if (PAX_RAND_FLAGS(mm)) {
53150 + eip = 0;
53151 + esp = 0;
53152 + wchan = 0;
53153 + }
53154 +#endif
53155 +#ifdef CONFIG_GRKERNSEC_HIDESYM
53156 + wchan = 0;
53157 + eip =0;
53158 + esp =0;
53159 +#endif
53160 +
53161 /* scale priority and nice values from timeslices to -20..20 */
53162 /* to make it look like a "normal" Unix priority/nice value */
53163 priority = task_prio(task);
53164 @@ -473,9 +526,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53165 vsize,
53166 mm ? get_mm_rss(mm) : 0,
53167 rsslim,
53168 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53169 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
53170 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
53171 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
53172 +#else
53173 mm ? (permitted ? mm->start_code : 1) : 0,
53174 mm ? (permitted ? mm->end_code : 1) : 0,
53175 (permitted && mm) ? mm->start_stack : 0,
53176 +#endif
53177 esp,
53178 eip,
53179 /* The signal information here is obsolete.
53180 @@ -519,6 +578,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53181 int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0;
53182 struct mm_struct *mm = get_task_mm(task);
53183
53184 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53185 + if (current->exec_id != m->exec_id) {
53186 + gr_log_badprocpid("statm");
53187 + return 0;
53188 + }
53189 +#endif
53190 +
53191 if (mm) {
53192 size = task_statm(mm, &shared, &text, &data, &resident);
53193 mmput(mm);
53194 @@ -528,3 +594,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53195
53196 return 0;
53197 }
53198 +
53199 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53200 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
53201 +{
53202 + u32 curr_ip = 0;
53203 + unsigned long flags;
53204 +
53205 + if (lock_task_sighand(task, &flags)) {
53206 + curr_ip = task->signal->curr_ip;
53207 + unlock_task_sighand(task, &flags);
53208 + }
53209 +
53210 + return sprintf(buffer, "%pI4\n", &curr_ip);
53211 +}
53212 +#endif
53213 diff --git a/fs/proc/base.c b/fs/proc/base.c
53214 index 67f7dc0..a86ad9a 100644
53215 --- a/fs/proc/base.c
53216 +++ b/fs/proc/base.c
53217 @@ -102,6 +102,22 @@ struct pid_entry {
53218 union proc_op op;
53219 };
53220
53221 +struct getdents_callback {
53222 + struct linux_dirent __user * current_dir;
53223 + struct linux_dirent __user * previous;
53224 + struct file * file;
53225 + int count;
53226 + int error;
53227 +};
53228 +
53229 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
53230 + loff_t offset, u64 ino, unsigned int d_type)
53231 +{
53232 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
53233 + buf->error = -EINVAL;
53234 + return 0;
53235 +}
53236 +
53237 #define NOD(NAME, MODE, IOP, FOP, OP) { \
53238 .name = (NAME), \
53239 .len = sizeof(NAME) - 1, \
53240 @@ -213,6 +229,9 @@ static int check_mem_permission(struct task_struct *task)
53241 if (task == current)
53242 return 0;
53243
53244 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
53245 + return -EPERM;
53246 +
53247 /*
53248 * If current is actively ptrace'ing, and would also be
53249 * permitted to freshly attach with ptrace now, permit it.
53250 @@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
53251 if (!mm->arg_end)
53252 goto out_mm; /* Shh! No looking before we're done */
53253
53254 + if (gr_acl_handle_procpidmem(task))
53255 + goto out_mm;
53256 +
53257 len = mm->arg_end - mm->arg_start;
53258
53259 if (len > PAGE_SIZE)
53260 @@ -287,12 +309,28 @@ out:
53261 return res;
53262 }
53263
53264 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53265 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
53266 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
53267 + _mm->pax_flags & MF_PAX_SEGMEXEC))
53268 +#endif
53269 +
53270 static int proc_pid_auxv(struct task_struct *task, char *buffer)
53271 {
53272 int res = 0;
53273 struct mm_struct *mm = get_task_mm(task);
53274 if (mm) {
53275 unsigned int nwords = 0;
53276 +
53277 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53278 + /* allow if we're currently ptracing this task */
53279 + if (PAX_RAND_FLAGS(mm) &&
53280 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
53281 + mmput(mm);
53282 + return 0;
53283 + }
53284 +#endif
53285 +
53286 do {
53287 nwords += 2;
53288 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
53289 @@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
53290 }
53291
53292
53293 -#ifdef CONFIG_KALLSYMS
53294 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53295 /*
53296 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
53297 * Returns the resolved symbol. If that fails, simply return the address.
53298 @@ -345,7 +383,7 @@ static void unlock_trace(struct task_struct *task)
53299 mutex_unlock(&task->cred_guard_mutex);
53300 }
53301
53302 -#ifdef CONFIG_STACKTRACE
53303 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53304
53305 #define MAX_STACK_TRACE_DEPTH 64
53306
53307 @@ -545,7 +583,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
53308 return count;
53309 }
53310
53311 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53312 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53313 static int proc_pid_syscall(struct task_struct *task, char *buffer)
53314 {
53315 long nr;
53316 @@ -574,7 +612,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
53317 /************************************************************************/
53318
53319 /* permission checks */
53320 -static int proc_fd_access_allowed(struct inode *inode)
53321 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
53322 {
53323 struct task_struct *task;
53324 int allowed = 0;
53325 @@ -584,7 +622,10 @@ static int proc_fd_access_allowed(struct inode *inode)
53326 */
53327 task = get_proc_task(inode);
53328 if (task) {
53329 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
53330 + if (log)
53331 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
53332 + else
53333 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
53334 put_task_struct(task);
53335 }
53336 return allowed;
53337 @@ -806,9 +847,16 @@ static const struct file_operations proc_single_file_operations = {
53338 static int mem_open(struct inode* inode, struct file* file)
53339 {
53340 file->private_data = (void*)((long)current->self_exec_id);
53341 +
53342 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53343 + file->f_version = current->exec_id;
53344 +#endif
53345 +
53346 return 0;
53347 }
53348
53349 +static int task_dumpable(struct task_struct *task);
53350 +
53351 static ssize_t mem_read(struct file * file, char __user * buf,
53352 size_t count, loff_t *ppos)
53353 {
53354 @@ -818,6 +866,13 @@ static ssize_t mem_read(struct file * file, char __user * buf,
53355 int ret = -ESRCH;
53356 struct mm_struct *mm;
53357
53358 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53359 + if (file->f_version != current->exec_id) {
53360 + gr_log_badprocpid("mem");
53361 + return 0;
53362 + }
53363 +#endif
53364 +
53365 if (!task)
53366 goto out_no_task;
53367
53368 @@ -963,6 +1018,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
53369 if (!task)
53370 goto out_no_task;
53371
53372 + if (gr_acl_handle_procpidmem(task))
53373 + goto out;
53374 +
53375 if (!ptrace_may_access(task, PTRACE_MODE_READ))
53376 goto out;
53377
53378 @@ -1377,7 +1435,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
53379 path_put(&nd->path);
53380
53381 /* Are we allowed to snoop on the tasks file descriptors? */
53382 - if (!proc_fd_access_allowed(inode))
53383 + if (!proc_fd_access_allowed(inode,0))
53384 goto out;
53385
53386 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
53387 @@ -1417,8 +1475,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
53388 struct path path;
53389
53390 /* Are we allowed to snoop on the tasks file descriptors? */
53391 - if (!proc_fd_access_allowed(inode))
53392 - goto out;
53393 + /* logging this is needed for learning on chromium to work properly,
53394 + but we don't want to flood the logs from 'ps' which does a readlink
53395 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
53396 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
53397 + */
53398 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
53399 + if (!proc_fd_access_allowed(inode,0))
53400 + goto out;
53401 + } else {
53402 + if (!proc_fd_access_allowed(inode,1))
53403 + goto out;
53404 + }
53405
53406 error = PROC_I(inode)->op.proc_get_link(inode, &path);
53407 if (error)
53408 @@ -1483,7 +1551,11 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st
53409 rcu_read_lock();
53410 cred = __task_cred(task);
53411 inode->i_uid = cred->euid;
53412 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53413 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53414 +#else
53415 inode->i_gid = cred->egid;
53416 +#endif
53417 rcu_read_unlock();
53418 }
53419 security_task_to_inode(task, inode);
53420 @@ -1501,6 +1573,9 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
53421 struct inode *inode = dentry->d_inode;
53422 struct task_struct *task;
53423 const struct cred *cred;
53424 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53425 + const struct cred *tmpcred = current_cred();
53426 +#endif
53427
53428 generic_fillattr(inode, stat);
53429
53430 @@ -1508,13 +1583,41 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
53431 stat->uid = 0;
53432 stat->gid = 0;
53433 task = pid_task(proc_pid(inode), PIDTYPE_PID);
53434 +
53435 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
53436 + rcu_read_unlock();
53437 + return -ENOENT;
53438 + }
53439 +
53440 if (task) {
53441 + cred = __task_cred(task);
53442 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53443 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
53444 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53445 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
53446 +#endif
53447 + ) {
53448 +#endif
53449 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
53450 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53451 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
53452 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53453 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
53454 +#endif
53455 task_dumpable(task)) {
53456 - cred = __task_cred(task);
53457 stat->uid = cred->euid;
53458 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53459 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
53460 +#else
53461 stat->gid = cred->egid;
53462 +#endif
53463 }
53464 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53465 + } else {
53466 + rcu_read_unlock();
53467 + return -ENOENT;
53468 + }
53469 +#endif
53470 }
53471 rcu_read_unlock();
53472 return 0;
53473 @@ -1545,11 +1648,20 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
53474
53475 if (task) {
53476 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
53477 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53478 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
53479 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53480 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
53481 +#endif
53482 task_dumpable(task)) {
53483 rcu_read_lock();
53484 cred = __task_cred(task);
53485 inode->i_uid = cred->euid;
53486 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53487 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53488 +#else
53489 inode->i_gid = cred->egid;
53490 +#endif
53491 rcu_read_unlock();
53492 } else {
53493 inode->i_uid = 0;
53494 @@ -1670,7 +1782,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
53495 int fd = proc_fd(inode);
53496
53497 if (task) {
53498 - files = get_files_struct(task);
53499 + if (!gr_acl_handle_procpidmem(task))
53500 + files = get_files_struct(task);
53501 put_task_struct(task);
53502 }
53503 if (files) {
53504 @@ -1922,12 +2035,22 @@ static const struct file_operations proc_fd_operations = {
53505 static int proc_fd_permission(struct inode *inode, int mask)
53506 {
53507 int rv;
53508 + struct task_struct *task;
53509
53510 rv = generic_permission(inode, mask, NULL);
53511 - if (rv == 0)
53512 - return 0;
53513 +
53514 if (task_pid(current) == proc_pid(inode))
53515 rv = 0;
53516 +
53517 + task = get_proc_task(inode);
53518 + if (task == NULL)
53519 + return rv;
53520 +
53521 + if (gr_acl_handle_procpidmem(task))
53522 + rv = -EACCES;
53523 +
53524 + put_task_struct(task);
53525 +
53526 return rv;
53527 }
53528
53529 @@ -2036,6 +2159,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
53530 if (!task)
53531 goto out_no_task;
53532
53533 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53534 + goto out;
53535 +
53536 /*
53537 * Yes, it does not scale. And it should not. Don't add
53538 * new entries into /proc/<tgid>/ without very good reasons.
53539 @@ -2080,6 +2206,9 @@ static int proc_pident_readdir(struct file *filp,
53540 if (!task)
53541 goto out_no_task;
53542
53543 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53544 + goto out;
53545 +
53546 ret = 0;
53547 i = filp->f_pos;
53548 switch (i) {
53549 @@ -2347,7 +2476,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
53550 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
53551 void *cookie)
53552 {
53553 - char *s = nd_get_link(nd);
53554 + const char *s = nd_get_link(nd);
53555 if (!IS_ERR(s))
53556 __putname(s);
53557 }
53558 @@ -2553,7 +2682,7 @@ static const struct pid_entry tgid_base_stuff[] = {
53559 #ifdef CONFIG_SCHED_DEBUG
53560 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
53561 #endif
53562 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53563 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53564 INF("syscall", S_IRUGO, proc_pid_syscall),
53565 #endif
53566 INF("cmdline", S_IRUGO, proc_pid_cmdline),
53567 @@ -2578,10 +2707,10 @@ static const struct pid_entry tgid_base_stuff[] = {
53568 #ifdef CONFIG_SECURITY
53569 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
53570 #endif
53571 -#ifdef CONFIG_KALLSYMS
53572 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53573 INF("wchan", S_IRUGO, proc_pid_wchan),
53574 #endif
53575 -#ifdef CONFIG_STACKTRACE
53576 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53577 ONE("stack", S_IRUGO, proc_pid_stack),
53578 #endif
53579 #ifdef CONFIG_SCHEDSTATS
53580 @@ -2611,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = {
53581 #ifdef CONFIG_TASK_IO_ACCOUNTING
53582 INF("io", S_IRUSR, proc_tgid_io_accounting),
53583 #endif
53584 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53585 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
53586 +#endif
53587 };
53588
53589 static int proc_tgid_base_readdir(struct file * filp,
53590 @@ -2735,7 +2867,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
53591 if (!inode)
53592 goto out;
53593
53594 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53595 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
53596 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53597 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53598 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
53599 +#else
53600 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
53601 +#endif
53602 inode->i_op = &proc_tgid_base_inode_operations;
53603 inode->i_fop = &proc_tgid_base_operations;
53604 inode->i_flags|=S_IMMUTABLE;
53605 @@ -2777,7 +2916,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
53606 if (!task)
53607 goto out;
53608
53609 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53610 + goto out_put_task;
53611 +
53612 result = proc_pid_instantiate(dir, dentry, task, NULL);
53613 +out_put_task:
53614 put_task_struct(task);
53615 out:
53616 return result;
53617 @@ -2842,6 +2985,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
53618 {
53619 unsigned int nr;
53620 struct task_struct *reaper;
53621 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53622 + const struct cred *tmpcred = current_cred();
53623 + const struct cred *itercred;
53624 +#endif
53625 + filldir_t __filldir = filldir;
53626 struct tgid_iter iter;
53627 struct pid_namespace *ns;
53628
53629 @@ -2865,8 +3013,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
53630 for (iter = next_tgid(ns, iter);
53631 iter.task;
53632 iter.tgid += 1, iter = next_tgid(ns, iter)) {
53633 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53634 + rcu_read_lock();
53635 + itercred = __task_cred(iter.task);
53636 +#endif
53637 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
53638 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53639 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
53640 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53641 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
53642 +#endif
53643 + )
53644 +#endif
53645 + )
53646 + __filldir = &gr_fake_filldir;
53647 + else
53648 + __filldir = filldir;
53649 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53650 + rcu_read_unlock();
53651 +#endif
53652 filp->f_pos = iter.tgid + TGID_OFFSET;
53653 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
53654 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
53655 put_task_struct(iter.task);
53656 goto out;
53657 }
53658 @@ -2892,7 +3059,7 @@ static const struct pid_entry tid_base_stuff[] = {
53659 #ifdef CONFIG_SCHED_DEBUG
53660 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
53661 #endif
53662 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53663 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53664 INF("syscall", S_IRUGO, proc_pid_syscall),
53665 #endif
53666 INF("cmdline", S_IRUGO, proc_pid_cmdline),
53667 @@ -2916,10 +3083,10 @@ static const struct pid_entry tid_base_stuff[] = {
53668 #ifdef CONFIG_SECURITY
53669 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
53670 #endif
53671 -#ifdef CONFIG_KALLSYMS
53672 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53673 INF("wchan", S_IRUGO, proc_pid_wchan),
53674 #endif
53675 -#ifdef CONFIG_STACKTRACE
53676 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53677 ONE("stack", S_IRUGO, proc_pid_stack),
53678 #endif
53679 #ifdef CONFIG_SCHEDSTATS
53680 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
53681 index 82676e3..5f8518a 100644
53682 --- a/fs/proc/cmdline.c
53683 +++ b/fs/proc/cmdline.c
53684 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
53685
53686 static int __init proc_cmdline_init(void)
53687 {
53688 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
53689 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
53690 +#else
53691 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
53692 +#endif
53693 return 0;
53694 }
53695 module_init(proc_cmdline_init);
53696 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
53697 index 59ee7da..469b4b6 100644
53698 --- a/fs/proc/devices.c
53699 +++ b/fs/proc/devices.c
53700 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
53701
53702 static int __init proc_devices_init(void)
53703 {
53704 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
53705 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
53706 +#else
53707 proc_create("devices", 0, NULL, &proc_devinfo_operations);
53708 +#endif
53709 return 0;
53710 }
53711 module_init(proc_devices_init);
53712 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
53713 index d78ade3..81767f9 100644
53714 --- a/fs/proc/inode.c
53715 +++ b/fs/proc/inode.c
53716 @@ -18,12 +18,19 @@
53717 #include <linux/module.h>
53718 #include <linux/smp_lock.h>
53719 #include <linux/sysctl.h>
53720 +#include <linux/grsecurity.h>
53721
53722 #include <asm/system.h>
53723 #include <asm/uaccess.h>
53724
53725 #include "internal.h"
53726
53727 +#ifdef CONFIG_PROC_SYSCTL
53728 +extern const struct inode_operations proc_sys_inode_operations;
53729 +extern const struct inode_operations proc_sys_dir_operations;
53730 +#endif
53731 +
53732 +
53733 struct proc_dir_entry *de_get(struct proc_dir_entry *de)
53734 {
53735 atomic_inc(&de->count);
53736 @@ -62,6 +69,13 @@ static void proc_delete_inode(struct inode *inode)
53737 de_put(de);
53738 if (PROC_I(inode)->sysctl)
53739 sysctl_head_put(PROC_I(inode)->sysctl);
53740 +
53741 +#ifdef CONFIG_PROC_SYSCTL
53742 + if (inode->i_op == &proc_sys_inode_operations ||
53743 + inode->i_op == &proc_sys_dir_operations)
53744 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
53745 +#endif
53746 +
53747 clear_inode(inode);
53748 }
53749
53750 @@ -457,7 +471,11 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
53751 if (de->mode) {
53752 inode->i_mode = de->mode;
53753 inode->i_uid = de->uid;
53754 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53755 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53756 +#else
53757 inode->i_gid = de->gid;
53758 +#endif
53759 }
53760 if (de->size)
53761 inode->i_size = de->size;
53762 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
53763 index 753ca37..26bcf3b 100644
53764 --- a/fs/proc/internal.h
53765 +++ b/fs/proc/internal.h
53766 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53767 struct pid *pid, struct task_struct *task);
53768 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53769 struct pid *pid, struct task_struct *task);
53770 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53771 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
53772 +#endif
53773 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
53774
53775 extern const struct file_operations proc_maps_operations;
53776 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
53777 index b442dac..aab29cb 100644
53778 --- a/fs/proc/kcore.c
53779 +++ b/fs/proc/kcore.c
53780 @@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
53781 off_t offset = 0;
53782 struct kcore_list *m;
53783
53784 + pax_track_stack();
53785 +
53786 /* setup ELF header */
53787 elf = (struct elfhdr *) bufp;
53788 bufp += sizeof(struct elfhdr);
53789 @@ -477,9 +479,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53790 * the addresses in the elf_phdr on our list.
53791 */
53792 start = kc_offset_to_vaddr(*fpos - elf_buflen);
53793 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
53794 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
53795 + if (tsz > buflen)
53796 tsz = buflen;
53797 -
53798 +
53799 while (buflen) {
53800 struct kcore_list *m;
53801
53802 @@ -508,20 +511,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53803 kfree(elf_buf);
53804 } else {
53805 if (kern_addr_valid(start)) {
53806 - unsigned long n;
53807 + char *elf_buf;
53808 + mm_segment_t oldfs;
53809
53810 - n = copy_to_user(buffer, (char *)start, tsz);
53811 - /*
53812 - * We cannot distingush between fault on source
53813 - * and fault on destination. When this happens
53814 - * we clear too and hope it will trigger the
53815 - * EFAULT again.
53816 - */
53817 - if (n) {
53818 - if (clear_user(buffer + tsz - n,
53819 - n))
53820 + elf_buf = kmalloc(tsz, GFP_KERNEL);
53821 + if (!elf_buf)
53822 + return -ENOMEM;
53823 + oldfs = get_fs();
53824 + set_fs(KERNEL_DS);
53825 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
53826 + set_fs(oldfs);
53827 + if (copy_to_user(buffer, elf_buf, tsz)) {
53828 + kfree(elf_buf);
53829 return -EFAULT;
53830 + }
53831 }
53832 + set_fs(oldfs);
53833 + kfree(elf_buf);
53834 } else {
53835 if (clear_user(buffer, tsz))
53836 return -EFAULT;
53837 @@ -541,6 +547,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53838
53839 static int open_kcore(struct inode *inode, struct file *filp)
53840 {
53841 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
53842 + return -EPERM;
53843 +#endif
53844 if (!capable(CAP_SYS_RAWIO))
53845 return -EPERM;
53846 if (kcore_need_update)
53847 diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
53848 index 7ca7834..cfe90a4 100644
53849 --- a/fs/proc/kmsg.c
53850 +++ b/fs/proc/kmsg.c
53851 @@ -12,37 +12,37 @@
53852 #include <linux/poll.h>
53853 #include <linux/proc_fs.h>
53854 #include <linux/fs.h>
53855 +#include <linux/syslog.h>
53856
53857 #include <asm/uaccess.h>
53858 #include <asm/io.h>
53859
53860 extern wait_queue_head_t log_wait;
53861
53862 -extern int do_syslog(int type, char __user *bug, int count);
53863 -
53864 static int kmsg_open(struct inode * inode, struct file * file)
53865 {
53866 - return do_syslog(1,NULL,0);
53867 + return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE);
53868 }
53869
53870 static int kmsg_release(struct inode * inode, struct file * file)
53871 {
53872 - (void) do_syslog(0,NULL,0);
53873 + (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE);
53874 return 0;
53875 }
53876
53877 static ssize_t kmsg_read(struct file *file, char __user *buf,
53878 size_t count, loff_t *ppos)
53879 {
53880 - if ((file->f_flags & O_NONBLOCK) && !do_syslog(9, NULL, 0))
53881 + if ((file->f_flags & O_NONBLOCK) &&
53882 + !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
53883 return -EAGAIN;
53884 - return do_syslog(2, buf, count);
53885 + return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE);
53886 }
53887
53888 static unsigned int kmsg_poll(struct file *file, poll_table *wait)
53889 {
53890 poll_wait(file, &log_wait, wait);
53891 - if (do_syslog(9, NULL, 0))
53892 + if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
53893 return POLLIN | POLLRDNORM;
53894 return 0;
53895 }
53896 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
53897 index a65239c..ad1182a 100644
53898 --- a/fs/proc/meminfo.c
53899 +++ b/fs/proc/meminfo.c
53900 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
53901 unsigned long pages[NR_LRU_LISTS];
53902 int lru;
53903
53904 + pax_track_stack();
53905 +
53906 /*
53907 * display in kilobytes.
53908 */
53909 @@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
53910 vmi.used >> 10,
53911 vmi.largest_chunk >> 10
53912 #ifdef CONFIG_MEMORY_FAILURE
53913 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
53914 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
53915 #endif
53916 );
53917
53918 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
53919 index 9fe7d7e..cdb62c9 100644
53920 --- a/fs/proc/nommu.c
53921 +++ b/fs/proc/nommu.c
53922 @@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
53923 if (len < 1)
53924 len = 1;
53925 seq_printf(m, "%*c", len, ' ');
53926 - seq_path(m, &file->f_path, "");
53927 + seq_path(m, &file->f_path, "\n\\");
53928 }
53929
53930 seq_putc(m, '\n');
53931 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
53932 index 04d1270..25e1173 100644
53933 --- a/fs/proc/proc_net.c
53934 +++ b/fs/proc/proc_net.c
53935 @@ -104,6 +104,17 @@ static struct net *get_proc_task_net(struct inode *dir)
53936 struct task_struct *task;
53937 struct nsproxy *ns;
53938 struct net *net = NULL;
53939 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53940 + const struct cred *cred = current_cred();
53941 +#endif
53942 +
53943 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53944 + if (cred->fsuid)
53945 + return net;
53946 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53947 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
53948 + return net;
53949 +#endif
53950
53951 rcu_read_lock();
53952 task = pid_task(proc_pid(dir), PIDTYPE_PID);
53953 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
53954 index f667e8a..55f4d96 100644
53955 --- a/fs/proc/proc_sysctl.c
53956 +++ b/fs/proc/proc_sysctl.c
53957 @@ -7,11 +7,13 @@
53958 #include <linux/security.h>
53959 #include "internal.h"
53960
53961 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
53962 +
53963 static const struct dentry_operations proc_sys_dentry_operations;
53964 static const struct file_operations proc_sys_file_operations;
53965 -static const struct inode_operations proc_sys_inode_operations;
53966 +const struct inode_operations proc_sys_inode_operations;
53967 static const struct file_operations proc_sys_dir_file_operations;
53968 -static const struct inode_operations proc_sys_dir_operations;
53969 +const struct inode_operations proc_sys_dir_operations;
53970
53971 static struct inode *proc_sys_make_inode(struct super_block *sb,
53972 struct ctl_table_header *head, struct ctl_table *table)
53973 @@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
53974 if (!p)
53975 goto out;
53976
53977 + if (gr_handle_sysctl(p, MAY_EXEC))
53978 + goto out;
53979 +
53980 err = ERR_PTR(-ENOMEM);
53981 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
53982 if (h)
53983 @@ -119,6 +124,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
53984
53985 err = NULL;
53986 dentry->d_op = &proc_sys_dentry_operations;
53987 +
53988 + gr_handle_proc_create(dentry, inode);
53989 +
53990 d_add(dentry, inode);
53991
53992 out:
53993 @@ -200,6 +208,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
53994 return -ENOMEM;
53995 } else {
53996 child->d_op = &proc_sys_dentry_operations;
53997 +
53998 + gr_handle_proc_create(child, inode);
53999 +
54000 d_add(child, inode);
54001 }
54002 } else {
54003 @@ -228,6 +239,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
54004 if (*pos < file->f_pos)
54005 continue;
54006
54007 + if (gr_handle_sysctl(table, 0))
54008 + continue;
54009 +
54010 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
54011 if (res)
54012 return res;
54013 @@ -344,6 +358,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
54014 if (IS_ERR(head))
54015 return PTR_ERR(head);
54016
54017 + if (table && gr_handle_sysctl(table, MAY_EXEC))
54018 + return -ENOENT;
54019 +
54020 generic_fillattr(inode, stat);
54021 if (table)
54022 stat->mode = (stat->mode & S_IFMT) | table->mode;
54023 @@ -358,17 +375,18 @@ static const struct file_operations proc_sys_file_operations = {
54024 };
54025
54026 static const struct file_operations proc_sys_dir_file_operations = {
54027 + .read = generic_read_dir,
54028 .readdir = proc_sys_readdir,
54029 .llseek = generic_file_llseek,
54030 };
54031
54032 -static const struct inode_operations proc_sys_inode_operations = {
54033 +const struct inode_operations proc_sys_inode_operations = {
54034 .permission = proc_sys_permission,
54035 .setattr = proc_sys_setattr,
54036 .getattr = proc_sys_getattr,
54037 };
54038
54039 -static const struct inode_operations proc_sys_dir_operations = {
54040 +const struct inode_operations proc_sys_dir_operations = {
54041 .lookup = proc_sys_lookup,
54042 .permission = proc_sys_permission,
54043 .setattr = proc_sys_setattr,
54044 diff --git a/fs/proc/root.c b/fs/proc/root.c
54045 index b080b79..d957e63 100644
54046 --- a/fs/proc/root.c
54047 +++ b/fs/proc/root.c
54048 @@ -134,7 +134,15 @@ void __init proc_root_init(void)
54049 #ifdef CONFIG_PROC_DEVICETREE
54050 proc_device_tree_init();
54051 #endif
54052 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
54053 +#ifdef CONFIG_GRKERNSEC_PROC_USER
54054 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
54055 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54056 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
54057 +#endif
54058 +#else
54059 proc_mkdir("bus", NULL);
54060 +#endif
54061 proc_sys_init();
54062 }
54063
54064 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
54065 index 3b7b82a..4b420b0 100644
54066 --- a/fs/proc/task_mmu.c
54067 +++ b/fs/proc/task_mmu.c
54068 @@ -8,6 +8,7 @@
54069 #include <linux/mempolicy.h>
54070 #include <linux/swap.h>
54071 #include <linux/swapops.h>
54072 +#include <linux/grsecurity.h>
54073
54074 #include <asm/elf.h>
54075 #include <asm/uaccess.h>
54076 @@ -46,15 +47,26 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
54077 "VmStk:\t%8lu kB\n"
54078 "VmExe:\t%8lu kB\n"
54079 "VmLib:\t%8lu kB\n"
54080 - "VmPTE:\t%8lu kB\n",
54081 - hiwater_vm << (PAGE_SHIFT-10),
54082 + "VmPTE:\t%8lu kB\n"
54083 +
54084 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
54085 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
54086 +#endif
54087 +
54088 + ,hiwater_vm << (PAGE_SHIFT-10),
54089 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
54090 mm->locked_vm << (PAGE_SHIFT-10),
54091 hiwater_rss << (PAGE_SHIFT-10),
54092 total_rss << (PAGE_SHIFT-10),
54093 data << (PAGE_SHIFT-10),
54094 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
54095 - (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
54096 + (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
54097 +
54098 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
54099 + , mm->context.user_cs_base, mm->context.user_cs_limit
54100 +#endif
54101 +
54102 + );
54103 }
54104
54105 unsigned long task_vsize(struct mm_struct *mm)
54106 @@ -175,7 +187,8 @@ static void m_stop(struct seq_file *m, void *v)
54107 struct proc_maps_private *priv = m->private;
54108 struct vm_area_struct *vma = v;
54109
54110 - vma_stop(priv, vma);
54111 + if (!IS_ERR(vma))
54112 + vma_stop(priv, vma);
54113 if (priv->task)
54114 put_task_struct(priv->task);
54115 }
54116 @@ -199,6 +212,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
54117 return ret;
54118 }
54119
54120 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54121 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
54122 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
54123 + _mm->pax_flags & MF_PAX_SEGMEXEC))
54124 +#endif
54125 +
54126 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54127 {
54128 struct mm_struct *mm = vma->vm_mm;
54129 @@ -206,7 +225,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54130 int flags = vma->vm_flags;
54131 unsigned long ino = 0;
54132 unsigned long long pgoff = 0;
54133 - unsigned long start;
54134 dev_t dev = 0;
54135 int len;
54136
54137 @@ -217,20 +235,23 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54138 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
54139 }
54140
54141 - /* We don't show the stack guard page in /proc/maps */
54142 - start = vma->vm_start;
54143 - if (vma->vm_flags & VM_GROWSDOWN)
54144 - if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
54145 - start += PAGE_SIZE;
54146 -
54147 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
54148 - start,
54149 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54150 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
54151 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
54152 +#else
54153 + vma->vm_start,
54154 vma->vm_end,
54155 +#endif
54156 flags & VM_READ ? 'r' : '-',
54157 flags & VM_WRITE ? 'w' : '-',
54158 flags & VM_EXEC ? 'x' : '-',
54159 flags & VM_MAYSHARE ? 's' : 'p',
54160 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54161 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
54162 +#else
54163 pgoff,
54164 +#endif
54165 MAJOR(dev), MINOR(dev), ino, &len);
54166
54167 /*
54168 @@ -239,7 +260,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54169 */
54170 if (file) {
54171 pad_len_spaces(m, len);
54172 - seq_path(m, &file->f_path, "\n");
54173 + seq_path(m, &file->f_path, "\n\\");
54174 } else {
54175 const char *name = arch_vma_name(vma);
54176 if (!name) {
54177 @@ -247,8 +268,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54178 if (vma->vm_start <= mm->brk &&
54179 vma->vm_end >= mm->start_brk) {
54180 name = "[heap]";
54181 - } else if (vma->vm_start <= mm->start_stack &&
54182 - vma->vm_end >= mm->start_stack) {
54183 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
54184 + (vma->vm_start <= mm->start_stack &&
54185 + vma->vm_end >= mm->start_stack)) {
54186 name = "[stack]";
54187 }
54188 } else {
54189 @@ -269,6 +291,13 @@ static int show_map(struct seq_file *m, void *v)
54190 struct proc_maps_private *priv = m->private;
54191 struct task_struct *task = priv->task;
54192
54193 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54194 + if (current->exec_id != m->exec_id) {
54195 + gr_log_badprocpid("maps");
54196 + return 0;
54197 + }
54198 +#endif
54199 +
54200 show_map_vma(m, vma);
54201
54202 if (m->count < m->size) /* vma is copied successfully */
54203 @@ -390,10 +419,23 @@ static int show_smap(struct seq_file *m, void *v)
54204 .private = &mss,
54205 };
54206
54207 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54208 + if (current->exec_id != m->exec_id) {
54209 + gr_log_badprocpid("smaps");
54210 + return 0;
54211 + }
54212 +#endif
54213 memset(&mss, 0, sizeof mss);
54214 - mss.vma = vma;
54215 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
54216 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
54217 +
54218 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54219 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
54220 +#endif
54221 + mss.vma = vma;
54222 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
54223 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
54224 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54225 + }
54226 +#endif
54227
54228 show_map_vma(m, vma);
54229
54230 @@ -409,7 +451,11 @@ static int show_smap(struct seq_file *m, void *v)
54231 "Swap: %8lu kB\n"
54232 "KernelPageSize: %8lu kB\n"
54233 "MMUPageSize: %8lu kB\n",
54234 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54235 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
54236 +#else
54237 (vma->vm_end - vma->vm_start) >> 10,
54238 +#endif
54239 mss.resident >> 10,
54240 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
54241 mss.shared_clean >> 10,
54242 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
54243 index 8f5c05d..c99c76d 100644
54244 --- a/fs/proc/task_nommu.c
54245 +++ b/fs/proc/task_nommu.c
54246 @@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
54247 else
54248 bytes += kobjsize(mm);
54249
54250 - if (current->fs && current->fs->users > 1)
54251 + if (current->fs && atomic_read(&current->fs->users) > 1)
54252 sbytes += kobjsize(current->fs);
54253 else
54254 bytes += kobjsize(current->fs);
54255 @@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
54256 if (len < 1)
54257 len = 1;
54258 seq_printf(m, "%*c", len, ' ');
54259 - seq_path(m, &file->f_path, "");
54260 + seq_path(m, &file->f_path, "\n\\");
54261 }
54262
54263 seq_putc(m, '\n');
54264 diff --git a/fs/readdir.c b/fs/readdir.c
54265 index 7723401..30059a6 100644
54266 --- a/fs/readdir.c
54267 +++ b/fs/readdir.c
54268 @@ -16,6 +16,7 @@
54269 #include <linux/security.h>
54270 #include <linux/syscalls.h>
54271 #include <linux/unistd.h>
54272 +#include <linux/namei.h>
54273
54274 #include <asm/uaccess.h>
54275
54276 @@ -67,6 +68,7 @@ struct old_linux_dirent {
54277
54278 struct readdir_callback {
54279 struct old_linux_dirent __user * dirent;
54280 + struct file * file;
54281 int result;
54282 };
54283
54284 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
54285 buf->result = -EOVERFLOW;
54286 return -EOVERFLOW;
54287 }
54288 +
54289 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
54290 + return 0;
54291 +
54292 buf->result++;
54293 dirent = buf->dirent;
54294 if (!access_ok(VERIFY_WRITE, dirent,
54295 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
54296
54297 buf.result = 0;
54298 buf.dirent = dirent;
54299 + buf.file = file;
54300
54301 error = vfs_readdir(file, fillonedir, &buf);
54302 if (buf.result)
54303 @@ -142,6 +149,7 @@ struct linux_dirent {
54304 struct getdents_callback {
54305 struct linux_dirent __user * current_dir;
54306 struct linux_dirent __user * previous;
54307 + struct file * file;
54308 int count;
54309 int error;
54310 };
54311 @@ -162,6 +170,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
54312 buf->error = -EOVERFLOW;
54313 return -EOVERFLOW;
54314 }
54315 +
54316 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
54317 + return 0;
54318 +
54319 dirent = buf->previous;
54320 if (dirent) {
54321 if (__put_user(offset, &dirent->d_off))
54322 @@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
54323 buf.previous = NULL;
54324 buf.count = count;
54325 buf.error = 0;
54326 + buf.file = file;
54327
54328 error = vfs_readdir(file, filldir, &buf);
54329 if (error >= 0)
54330 @@ -228,6 +241,7 @@ out:
54331 struct getdents_callback64 {
54332 struct linux_dirent64 __user * current_dir;
54333 struct linux_dirent64 __user * previous;
54334 + struct file *file;
54335 int count;
54336 int error;
54337 };
54338 @@ -242,6 +256,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
54339 buf->error = -EINVAL; /* only used if we fail.. */
54340 if (reclen > buf->count)
54341 return -EINVAL;
54342 +
54343 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
54344 + return 0;
54345 +
54346 dirent = buf->previous;
54347 if (dirent) {
54348 if (__put_user(offset, &dirent->d_off))
54349 @@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
54350
54351 buf.current_dir = dirent;
54352 buf.previous = NULL;
54353 + buf.file = file;
54354 buf.count = count;
54355 buf.error = 0;
54356
54357 @@ -297,7 +316,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
54358 error = buf.error;
54359 lastdirent = buf.previous;
54360 if (lastdirent) {
54361 - typeof(lastdirent->d_off) d_off = file->f_pos;
54362 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
54363 if (__put_user(d_off, &lastdirent->d_off))
54364 error = -EFAULT;
54365 else
54366 diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
54367 index d42c30c..4fd8718 100644
54368 --- a/fs/reiserfs/dir.c
54369 +++ b/fs/reiserfs/dir.c
54370 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
54371 struct reiserfs_dir_entry de;
54372 int ret = 0;
54373
54374 + pax_track_stack();
54375 +
54376 reiserfs_write_lock(inode->i_sb);
54377
54378 reiserfs_check_lock_depth(inode->i_sb, "readdir");
54379 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
54380 index 128d3f7..8840d44 100644
54381 --- a/fs/reiserfs/do_balan.c
54382 +++ b/fs/reiserfs/do_balan.c
54383 @@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
54384 return;
54385 }
54386
54387 - atomic_inc(&(fs_generation(tb->tb_sb)));
54388 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
54389 do_balance_starts(tb);
54390
54391 /* balance leaf returns 0 except if combining L R and S into
54392 diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
54393 index 72cb1cc..d0e3181 100644
54394 --- a/fs/reiserfs/item_ops.c
54395 +++ b/fs/reiserfs/item_ops.c
54396 @@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_item *vi)
54397 vi->vi_index, vi->vi_type, vi->vi_ih);
54398 }
54399
54400 -static struct item_operations stat_data_ops = {
54401 +static const struct item_operations stat_data_ops = {
54402 .bytes_number = sd_bytes_number,
54403 .decrement_key = sd_decrement_key,
54404 .is_left_mergeable = sd_is_left_mergeable,
54405 @@ -196,7 +196,7 @@ static void direct_print_vi(struct virtual_item *vi)
54406 vi->vi_index, vi->vi_type, vi->vi_ih);
54407 }
54408
54409 -static struct item_operations direct_ops = {
54410 +static const struct item_operations direct_ops = {
54411 .bytes_number = direct_bytes_number,
54412 .decrement_key = direct_decrement_key,
54413 .is_left_mergeable = direct_is_left_mergeable,
54414 @@ -341,7 +341,7 @@ static void indirect_print_vi(struct virtual_item *vi)
54415 vi->vi_index, vi->vi_type, vi->vi_ih);
54416 }
54417
54418 -static struct item_operations indirect_ops = {
54419 +static const struct item_operations indirect_ops = {
54420 .bytes_number = indirect_bytes_number,
54421 .decrement_key = indirect_decrement_key,
54422 .is_left_mergeable = indirect_is_left_mergeable,
54423 @@ -628,7 +628,7 @@ static void direntry_print_vi(struct virtual_item *vi)
54424 printk("\n");
54425 }
54426
54427 -static struct item_operations direntry_ops = {
54428 +static const struct item_operations direntry_ops = {
54429 .bytes_number = direntry_bytes_number,
54430 .decrement_key = direntry_decrement_key,
54431 .is_left_mergeable = direntry_is_left_mergeable,
54432 @@ -724,7 +724,7 @@ static void errcatch_print_vi(struct virtual_item *vi)
54433 "Invalid item type observed, run fsck ASAP");
54434 }
54435
54436 -static struct item_operations errcatch_ops = {
54437 +static const struct item_operations errcatch_ops = {
54438 errcatch_bytes_number,
54439 errcatch_decrement_key,
54440 errcatch_is_left_mergeable,
54441 @@ -746,7 +746,7 @@ static struct item_operations errcatch_ops = {
54442 #error Item types must use disk-format assigned values.
54443 #endif
54444
54445 -struct item_operations *item_ops[TYPE_ANY + 1] = {
54446 +const struct item_operations * const item_ops[TYPE_ANY + 1] = {
54447 &stat_data_ops,
54448 &indirect_ops,
54449 &direct_ops,
54450 diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
54451 index b5fe0aa..e0e25c4 100644
54452 --- a/fs/reiserfs/journal.c
54453 +++ b/fs/reiserfs/journal.c
54454 @@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
54455 struct buffer_head *bh;
54456 int i, j;
54457
54458 + pax_track_stack();
54459 +
54460 bh = __getblk(dev, block, bufsize);
54461 if (buffer_uptodate(bh))
54462 return (bh);
54463 diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
54464 index 2715791..b8996db 100644
54465 --- a/fs/reiserfs/namei.c
54466 +++ b/fs/reiserfs/namei.c
54467 @@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
54468 unsigned long savelink = 1;
54469 struct timespec ctime;
54470
54471 + pax_track_stack();
54472 +
54473 /* three balancings: (1) old name removal, (2) new name insertion
54474 and (3) maybe "save" link insertion
54475 stat data updates: (1) old directory,
54476 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
54477 index 9229e55..3d2e3b7 100644
54478 --- a/fs/reiserfs/procfs.c
54479 +++ b/fs/reiserfs/procfs.c
54480 @@ -123,7 +123,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
54481 "SMALL_TAILS " : "NO_TAILS ",
54482 replay_only(sb) ? "REPLAY_ONLY " : "",
54483 convert_reiserfs(sb) ? "CONV " : "",
54484 - atomic_read(&r->s_generation_counter),
54485 + atomic_read_unchecked(&r->s_generation_counter),
54486 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
54487 SF(s_do_balance), SF(s_unneeded_left_neighbor),
54488 SF(s_good_search_by_key_reada), SF(s_bmaps),
54489 @@ -309,6 +309,8 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
54490 struct journal_params *jp = &rs->s_v1.s_journal;
54491 char b[BDEVNAME_SIZE];
54492
54493 + pax_track_stack();
54494 +
54495 seq_printf(m, /* on-disk fields */
54496 "jp_journal_1st_block: \t%i\n"
54497 "jp_journal_dev: \t%s[%x]\n"
54498 diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
54499 index d036ee5..4c7dca1 100644
54500 --- a/fs/reiserfs/stree.c
54501 +++ b/fs/reiserfs/stree.c
54502 @@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
54503 int iter = 0;
54504 #endif
54505
54506 + pax_track_stack();
54507 +
54508 BUG_ON(!th->t_trans_id);
54509
54510 init_tb_struct(th, &s_del_balance, sb, path,
54511 @@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
54512 int retval;
54513 int quota_cut_bytes = 0;
54514
54515 + pax_track_stack();
54516 +
54517 BUG_ON(!th->t_trans_id);
54518
54519 le_key2cpu_key(&cpu_key, key);
54520 @@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
54521 int quota_cut_bytes;
54522 loff_t tail_pos = 0;
54523
54524 + pax_track_stack();
54525 +
54526 BUG_ON(!th->t_trans_id);
54527
54528 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
54529 @@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
54530 int retval;
54531 int fs_gen;
54532
54533 + pax_track_stack();
54534 +
54535 BUG_ON(!th->t_trans_id);
54536
54537 fs_gen = get_generation(inode->i_sb);
54538 @@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
54539 int fs_gen = 0;
54540 int quota_bytes = 0;
54541
54542 + pax_track_stack();
54543 +
54544 BUG_ON(!th->t_trans_id);
54545
54546 if (inode) { /* Do we count quotas for item? */
54547 diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
54548 index 7cb1285..c726cd0 100644
54549 --- a/fs/reiserfs/super.c
54550 +++ b/fs/reiserfs/super.c
54551 @@ -916,6 +916,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
54552 {.option_name = NULL}
54553 };
54554
54555 + pax_track_stack();
54556 +
54557 *blocks = 0;
54558 if (!options || !*options)
54559 /* use default configuration: create tails, journaling on, no
54560 diff --git a/fs/select.c b/fs/select.c
54561 index fd38ce2..f5381b8 100644
54562 --- a/fs/select.c
54563 +++ b/fs/select.c
54564 @@ -20,6 +20,7 @@
54565 #include <linux/module.h>
54566 #include <linux/slab.h>
54567 #include <linux/poll.h>
54568 +#include <linux/security.h>
54569 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
54570 #include <linux/file.h>
54571 #include <linux/fdtable.h>
54572 @@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
54573 int retval, i, timed_out = 0;
54574 unsigned long slack = 0;
54575
54576 + pax_track_stack();
54577 +
54578 rcu_read_lock();
54579 retval = max_select_fd(n, fds);
54580 rcu_read_unlock();
54581 @@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
54582 /* Allocate small arguments on the stack to save memory and be faster */
54583 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
54584
54585 + pax_track_stack();
54586 +
54587 ret = -EINVAL;
54588 if (n < 0)
54589 goto out_nofds;
54590 @@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
54591 struct poll_list *walk = head;
54592 unsigned long todo = nfds;
54593
54594 + pax_track_stack();
54595 +
54596 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
54597 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
54598 return -EINVAL;
54599
54600 diff --git a/fs/seq_file.c b/fs/seq_file.c
54601 index eae7d9d..4ddabe2 100644
54602 --- a/fs/seq_file.c
54603 +++ b/fs/seq_file.c
54604 @@ -9,6 +9,7 @@
54605 #include <linux/module.h>
54606 #include <linux/seq_file.h>
54607 #include <linux/slab.h>
54608 +#include <linux/sched.h>
54609
54610 #include <asm/uaccess.h>
54611 #include <asm/page.h>
54612 @@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
54613 memset(p, 0, sizeof(*p));
54614 mutex_init(&p->lock);
54615 p->op = op;
54616 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54617 + p->exec_id = current->exec_id;
54618 +#endif
54619
54620 /*
54621 * Wrappers around seq_open(e.g. swaps_open) need to be
54622 @@ -76,7 +80,8 @@ static int traverse(struct seq_file *m, loff_t offset)
54623 return 0;
54624 }
54625 if (!m->buf) {
54626 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
54627 + m->size = PAGE_SIZE;
54628 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
54629 if (!m->buf)
54630 return -ENOMEM;
54631 }
54632 @@ -116,7 +121,8 @@ static int traverse(struct seq_file *m, loff_t offset)
54633 Eoverflow:
54634 m->op->stop(m, p);
54635 kfree(m->buf);
54636 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
54637 + m->size <<= 1;
54638 + m->buf = kmalloc(m->size, GFP_KERNEL);
54639 return !m->buf ? -ENOMEM : -EAGAIN;
54640 }
54641
54642 @@ -169,7 +175,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
54643 m->version = file->f_version;
54644 /* grab buffer if we didn't have one */
54645 if (!m->buf) {
54646 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
54647 + m->size = PAGE_SIZE;
54648 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
54649 if (!m->buf)
54650 goto Enomem;
54651 }
54652 @@ -210,7 +217,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
54653 goto Fill;
54654 m->op->stop(m, p);
54655 kfree(m->buf);
54656 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
54657 + m->size <<= 1;
54658 + m->buf = kmalloc(m->size, GFP_KERNEL);
54659 if (!m->buf)
54660 goto Enomem;
54661 m->count = 0;
54662 @@ -551,7 +559,7 @@ static void single_stop(struct seq_file *p, void *v)
54663 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
54664 void *data)
54665 {
54666 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
54667 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
54668 int res = -ENOMEM;
54669
54670 if (op) {
54671 diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
54672 index 71c29b6..54694dd 100644
54673 --- a/fs/smbfs/proc.c
54674 +++ b/fs/smbfs/proc.c
54675 @@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *server, struct smb_nls_codepage *cp)
54676
54677 out:
54678 if (server->local_nls != NULL && server->remote_nls != NULL)
54679 - server->ops->convert = convert_cp;
54680 + *(void **)&server->ops->convert = convert_cp;
54681 else
54682 - server->ops->convert = convert_memcpy;
54683 + *(void **)&server->ops->convert = convert_memcpy;
54684
54685 smb_unlock_server(server);
54686 return n;
54687 @@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt)
54688
54689 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
54690 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
54691 - server->ops->getattr = smb_proc_getattr_core;
54692 + *(void **)&server->ops->getattr = smb_proc_getattr_core;
54693 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
54694 - server->ops->getattr = smb_proc_getattr_ff;
54695 + *(void **)&server->ops->getattr = smb_proc_getattr_ff;
54696 }
54697
54698 /* Decode server capabilities */
54699 @@ -3439,7 +3439,7 @@ out:
54700 static void
54701 install_ops(struct smb_ops *dst, struct smb_ops *src)
54702 {
54703 - memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
54704 + memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
54705 }
54706
54707 /* < LANMAN2 */
54708 diff --git a/fs/smbfs/symlink.c b/fs/smbfs/symlink.c
54709 index 00b2909..2ace383 100644
54710 --- a/fs/smbfs/symlink.c
54711 +++ b/fs/smbfs/symlink.c
54712 @@ -55,7 +55,7 @@ static void *smb_follow_link(struct dentry *dentry, struct nameidata *nd)
54713
54714 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
54715 {
54716 - char *s = nd_get_link(nd);
54717 + const char *s = nd_get_link(nd);
54718 if (!IS_ERR(s))
54719 __putname(s);
54720 }
54721 diff --git a/fs/splice.c b/fs/splice.c
54722 index bb92b7c..5aa72b0 100644
54723 --- a/fs/splice.c
54724 +++ b/fs/splice.c
54725 @@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
54726 pipe_lock(pipe);
54727
54728 for (;;) {
54729 - if (!pipe->readers) {
54730 + if (!atomic_read(&pipe->readers)) {
54731 send_sig(SIGPIPE, current, 0);
54732 if (!ret)
54733 ret = -EPIPE;
54734 @@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
54735 do_wakeup = 0;
54736 }
54737
54738 - pipe->waiting_writers++;
54739 + atomic_inc(&pipe->waiting_writers);
54740 pipe_wait(pipe);
54741 - pipe->waiting_writers--;
54742 + atomic_dec(&pipe->waiting_writers);
54743 }
54744
54745 pipe_unlock(pipe);
54746 @@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
54747 .spd_release = spd_release_page,
54748 };
54749
54750 + pax_track_stack();
54751 +
54752 index = *ppos >> PAGE_CACHE_SHIFT;
54753 loff = *ppos & ~PAGE_CACHE_MASK;
54754 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
54755 @@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
54756 old_fs = get_fs();
54757 set_fs(get_ds());
54758 /* The cast to a user pointer is valid due to the set_fs() */
54759 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
54760 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
54761 set_fs(old_fs);
54762
54763 return res;
54764 @@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
54765 old_fs = get_fs();
54766 set_fs(get_ds());
54767 /* The cast to a user pointer is valid due to the set_fs() */
54768 - res = vfs_write(file, (const char __user *)buf, count, &pos);
54769 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
54770 set_fs(old_fs);
54771
54772 return res;
54773 @@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
54774 .spd_release = spd_release_page,
54775 };
54776
54777 + pax_track_stack();
54778 +
54779 index = *ppos >> PAGE_CACHE_SHIFT;
54780 offset = *ppos & ~PAGE_CACHE_MASK;
54781 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
54782 @@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
54783 goto err;
54784
54785 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
54786 - vec[i].iov_base = (void __user *) page_address(page);
54787 + vec[i].iov_base = (__force void __user *) page_address(page);
54788 vec[i].iov_len = this_len;
54789 pages[i] = page;
54790 spd.nr_pages++;
54791 @@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
54792 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
54793 {
54794 while (!pipe->nrbufs) {
54795 - if (!pipe->writers)
54796 + if (!atomic_read(&pipe->writers))
54797 return 0;
54798
54799 - if (!pipe->waiting_writers && sd->num_spliced)
54800 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
54801 return 0;
54802
54803 if (sd->flags & SPLICE_F_NONBLOCK)
54804 @@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
54805 * out of the pipe right after the splice_to_pipe(). So set
54806 * PIPE_READERS appropriately.
54807 */
54808 - pipe->readers = 1;
54809 + atomic_set(&pipe->readers, 1);
54810
54811 current->splice_pipe = pipe;
54812 }
54813 @@ -1593,6 +1597,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
54814 .spd_release = spd_release_page,
54815 };
54816
54817 + pax_track_stack();
54818 +
54819 pipe = pipe_info(file->f_path.dentry->d_inode);
54820 if (!pipe)
54821 return -EBADF;
54822 @@ -1701,9 +1707,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54823 ret = -ERESTARTSYS;
54824 break;
54825 }
54826 - if (!pipe->writers)
54827 + if (!atomic_read(&pipe->writers))
54828 break;
54829 - if (!pipe->waiting_writers) {
54830 + if (!atomic_read(&pipe->waiting_writers)) {
54831 if (flags & SPLICE_F_NONBLOCK) {
54832 ret = -EAGAIN;
54833 break;
54834 @@ -1735,7 +1741,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54835 pipe_lock(pipe);
54836
54837 while (pipe->nrbufs >= PIPE_BUFFERS) {
54838 - if (!pipe->readers) {
54839 + if (!atomic_read(&pipe->readers)) {
54840 send_sig(SIGPIPE, current, 0);
54841 ret = -EPIPE;
54842 break;
54843 @@ -1748,9 +1754,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54844 ret = -ERESTARTSYS;
54845 break;
54846 }
54847 - pipe->waiting_writers++;
54848 + atomic_inc(&pipe->waiting_writers);
54849 pipe_wait(pipe);
54850 - pipe->waiting_writers--;
54851 + atomic_dec(&pipe->waiting_writers);
54852 }
54853
54854 pipe_unlock(pipe);
54855 @@ -1786,14 +1792,14 @@ retry:
54856 pipe_double_lock(ipipe, opipe);
54857
54858 do {
54859 - if (!opipe->readers) {
54860 + if (!atomic_read(&opipe->readers)) {
54861 send_sig(SIGPIPE, current, 0);
54862 if (!ret)
54863 ret = -EPIPE;
54864 break;
54865 }
54866
54867 - if (!ipipe->nrbufs && !ipipe->writers)
54868 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
54869 break;
54870
54871 /*
54872 @@ -1893,7 +1899,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
54873 pipe_double_lock(ipipe, opipe);
54874
54875 do {
54876 - if (!opipe->readers) {
54877 + if (!atomic_read(&opipe->readers)) {
54878 send_sig(SIGPIPE, current, 0);
54879 if (!ret)
54880 ret = -EPIPE;
54881 @@ -1938,7 +1944,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
54882 * return EAGAIN if we have the potential of some data in the
54883 * future, otherwise just return 0
54884 */
54885 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
54886 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
54887 ret = -EAGAIN;
54888
54889 pipe_unlock(ipipe);
54890 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
54891 index e020183..18d64b4 100644
54892 --- a/fs/sysfs/dir.c
54893 +++ b/fs/sysfs/dir.c
54894 @@ -678,6 +678,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
54895 struct sysfs_dirent *sd;
54896 int rc;
54897
54898 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
54899 + const char *parent_name = parent_sd->s_name;
54900 +
54901 + mode = S_IFDIR | S_IRWXU;
54902 +
54903 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
54904 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
54905 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
54906 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
54907 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
54908 +#endif
54909 +
54910 /* allocate */
54911 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
54912 if (!sd)
54913 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
54914 index 7118a38..70af853 100644
54915 --- a/fs/sysfs/file.c
54916 +++ b/fs/sysfs/file.c
54917 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
54918
54919 struct sysfs_open_dirent {
54920 atomic_t refcnt;
54921 - atomic_t event;
54922 + atomic_unchecked_t event;
54923 wait_queue_head_t poll;
54924 struct list_head buffers; /* goes through sysfs_buffer.list */
54925 };
54926 @@ -53,7 +53,7 @@ struct sysfs_buffer {
54927 size_t count;
54928 loff_t pos;
54929 char * page;
54930 - struct sysfs_ops * ops;
54931 + const struct sysfs_ops * ops;
54932 struct mutex mutex;
54933 int needs_read_fill;
54934 int event;
54935 @@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
54936 {
54937 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
54938 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
54939 - struct sysfs_ops * ops = buffer->ops;
54940 + const struct sysfs_ops * ops = buffer->ops;
54941 int ret = 0;
54942 ssize_t count;
54943
54944 @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
54945 if (!sysfs_get_active_two(attr_sd))
54946 return -ENODEV;
54947
54948 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
54949 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
54950 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
54951
54952 sysfs_put_active_two(attr_sd);
54953 @@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t
54954 {
54955 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
54956 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
54957 - struct sysfs_ops * ops = buffer->ops;
54958 + const struct sysfs_ops * ops = buffer->ops;
54959 int rc;
54960
54961 /* need attr_sd for attr and ops, its parent for kobj */
54962 @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
54963 return -ENOMEM;
54964
54965 atomic_set(&new_od->refcnt, 0);
54966 - atomic_set(&new_od->event, 1);
54967 + atomic_set_unchecked(&new_od->event, 1);
54968 init_waitqueue_head(&new_od->poll);
54969 INIT_LIST_HEAD(&new_od->buffers);
54970 goto retry;
54971 @@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
54972 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
54973 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
54974 struct sysfs_buffer *buffer;
54975 - struct sysfs_ops *ops;
54976 + const struct sysfs_ops *ops;
54977 int error = -EACCES;
54978 char *p;
54979
54980 @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
54981
54982 sysfs_put_active_two(attr_sd);
54983
54984 - if (buffer->event != atomic_read(&od->event))
54985 + if (buffer->event != atomic_read_unchecked(&od->event))
54986 goto trigger;
54987
54988 return DEFAULT_POLLMASK;
54989 @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
54990
54991 od = sd->s_attr.open;
54992 if (od) {
54993 - atomic_inc(&od->event);
54994 + atomic_inc_unchecked(&od->event);
54995 wake_up_interruptible(&od->poll);
54996 }
54997
54998 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
54999 index c5081ad..342ea86 100644
55000 --- a/fs/sysfs/symlink.c
55001 +++ b/fs/sysfs/symlink.c
55002 @@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
55003
55004 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
55005 {
55006 - char *page = nd_get_link(nd);
55007 + const char *page = nd_get_link(nd);
55008 if (!IS_ERR(page))
55009 free_page((unsigned long)page);
55010 }
55011 diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
55012 index 1e06853..b06d325 100644
55013 --- a/fs/udf/balloc.c
55014 +++ b/fs/udf/balloc.c
55015 @@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
55016
55017 mutex_lock(&sbi->s_alloc_mutex);
55018 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
55019 - if (bloc->logicalBlockNum < 0 ||
55020 - (bloc->logicalBlockNum + count) >
55021 - partmap->s_partition_len) {
55022 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
55023 udf_debug("%d < %d || %d + %d > %d\n",
55024 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
55025 count, partmap->s_partition_len);
55026 @@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct super_block *sb,
55027
55028 mutex_lock(&sbi->s_alloc_mutex);
55029 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
55030 - if (bloc->logicalBlockNum < 0 ||
55031 - (bloc->logicalBlockNum + count) >
55032 - partmap->s_partition_len) {
55033 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
55034 udf_debug("%d < %d || %d + %d > %d\n",
55035 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
55036 partmap->s_partition_len);
55037 diff --git a/fs/udf/inode.c b/fs/udf/inode.c
55038 index 6d24c2c..fff470f 100644
55039 --- a/fs/udf/inode.c
55040 +++ b/fs/udf/inode.c
55041 @@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
55042 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
55043 int lastblock = 0;
55044
55045 + pax_track_stack();
55046 +
55047 prev_epos.offset = udf_file_entry_alloc_offset(inode);
55048 prev_epos.block = iinfo->i_location;
55049 prev_epos.bh = NULL;
55050 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
55051 index 9215700..bf1f68e 100644
55052 --- a/fs/udf/misc.c
55053 +++ b/fs/udf/misc.c
55054 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
55055
55056 u8 udf_tag_checksum(const struct tag *t)
55057 {
55058 - u8 *data = (u8 *)t;
55059 + const u8 *data = (const u8 *)t;
55060 u8 checksum = 0;
55061 int i;
55062 for (i = 0; i < sizeof(struct tag); ++i)
55063 diff --git a/fs/utimes.c b/fs/utimes.c
55064 index e4c75db..b4df0e0 100644
55065 --- a/fs/utimes.c
55066 +++ b/fs/utimes.c
55067 @@ -1,6 +1,7 @@
55068 #include <linux/compiler.h>
55069 #include <linux/file.h>
55070 #include <linux/fs.h>
55071 +#include <linux/security.h>
55072 #include <linux/linkage.h>
55073 #include <linux/mount.h>
55074 #include <linux/namei.h>
55075 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
55076 goto mnt_drop_write_and_out;
55077 }
55078 }
55079 +
55080 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
55081 + error = -EACCES;
55082 + goto mnt_drop_write_and_out;
55083 + }
55084 +
55085 mutex_lock(&inode->i_mutex);
55086 error = notify_change(path->dentry, &newattrs);
55087 mutex_unlock(&inode->i_mutex);
55088 diff --git a/fs/xattr.c b/fs/xattr.c
55089 index 6d4f6d3..cda3958 100644
55090 --- a/fs/xattr.c
55091 +++ b/fs/xattr.c
55092 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
55093 * Extended attribute SET operations
55094 */
55095 static long
55096 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
55097 +setxattr(struct path *path, const char __user *name, const void __user *value,
55098 size_t size, int flags)
55099 {
55100 int error;
55101 @@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
55102 return PTR_ERR(kvalue);
55103 }
55104
55105 - error = vfs_setxattr(d, kname, kvalue, size, flags);
55106 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
55107 + error = -EACCES;
55108 + goto out;
55109 + }
55110 +
55111 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
55112 +out:
55113 kfree(kvalue);
55114 return error;
55115 }
55116 @@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
55117 return error;
55118 error = mnt_want_write(path.mnt);
55119 if (!error) {
55120 - error = setxattr(path.dentry, name, value, size, flags);
55121 + error = setxattr(&path, name, value, size, flags);
55122 mnt_drop_write(path.mnt);
55123 }
55124 path_put(&path);
55125 @@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
55126 return error;
55127 error = mnt_want_write(path.mnt);
55128 if (!error) {
55129 - error = setxattr(path.dentry, name, value, size, flags);
55130 + error = setxattr(&path, name, value, size, flags);
55131 mnt_drop_write(path.mnt);
55132 }
55133 path_put(&path);
55134 @@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
55135 const void __user *,value, size_t, size, int, flags)
55136 {
55137 struct file *f;
55138 - struct dentry *dentry;
55139 int error = -EBADF;
55140
55141 f = fget(fd);
55142 if (!f)
55143 return error;
55144 - dentry = f->f_path.dentry;
55145 - audit_inode(NULL, dentry);
55146 + audit_inode(NULL, f->f_path.dentry);
55147 error = mnt_want_write_file(f);
55148 if (!error) {
55149 - error = setxattr(dentry, name, value, size, flags);
55150 + error = setxattr(&f->f_path, name, value, size, flags);
55151 mnt_drop_write(f->f_path.mnt);
55152 }
55153 fput(f);
55154 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
55155 index c6ad7c7..f2847a7 100644
55156 --- a/fs/xattr_acl.c
55157 +++ b/fs/xattr_acl.c
55158 @@ -17,8 +17,8 @@
55159 struct posix_acl *
55160 posix_acl_from_xattr(const void *value, size_t size)
55161 {
55162 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
55163 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
55164 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
55165 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
55166 int count;
55167 struct posix_acl *acl;
55168 struct posix_acl_entry *acl_e;
55169 diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
55170 index 942362f..88f96f5 100644
55171 --- a/fs/xfs/linux-2.6/xfs_ioctl.c
55172 +++ b/fs/xfs/linux-2.6/xfs_ioctl.c
55173 @@ -134,7 +134,7 @@ xfs_find_handle(
55174 }
55175
55176 error = -EFAULT;
55177 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
55178 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
55179 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
55180 goto out_put;
55181
55182 @@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
55183 if (IS_ERR(dentry))
55184 return PTR_ERR(dentry);
55185
55186 - kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
55187 + kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
55188 if (!kbuf)
55189 goto out_dput;
55190
55191 @@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
55192 xfs_mount_t *mp,
55193 void __user *arg)
55194 {
55195 - xfs_fsop_geom_t fsgeo;
55196 + xfs_fsop_geom_t fsgeo;
55197 int error;
55198
55199 error = xfs_fs_geometry(mp, &fsgeo, 3);
55200 diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
55201 index bad485a..479bd32 100644
55202 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c
55203 +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
55204 @@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
55205 xfs_fsop_geom_t fsgeo;
55206 int error;
55207
55208 + memset(&fsgeo, 0, sizeof(fsgeo));
55209 error = xfs_fs_geometry(mp, &fsgeo, 3);
55210 if (error)
55211 return -error;
55212 diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
55213 index 1f3b4b8..6102f6d 100644
55214 --- a/fs/xfs/linux-2.6/xfs_iops.c
55215 +++ b/fs/xfs/linux-2.6/xfs_iops.c
55216 @@ -468,7 +468,7 @@ xfs_vn_put_link(
55217 struct nameidata *nd,
55218 void *p)
55219 {
55220 - char *s = nd_get_link(nd);
55221 + const char *s = nd_get_link(nd);
55222
55223 if (!IS_ERR(s))
55224 kfree(s);
55225 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
55226 index 8971fb0..5fc1eb2 100644
55227 --- a/fs/xfs/xfs_bmap.c
55228 +++ b/fs/xfs/xfs_bmap.c
55229 @@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
55230 int nmap,
55231 int ret_nmap);
55232 #else
55233 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
55234 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
55235 #endif /* DEBUG */
55236
55237 #if defined(XFS_RW_TRACE)
55238 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
55239 index e89734e..5e84d8d 100644
55240 --- a/fs/xfs/xfs_dir2_sf.c
55241 +++ b/fs/xfs/xfs_dir2_sf.c
55242 @@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
55243 }
55244
55245 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
55246 - if (filldir(dirent, sfep->name, sfep->namelen,
55247 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
55248 + char name[sfep->namelen];
55249 + memcpy(name, sfep->name, sfep->namelen);
55250 + if (filldir(dirent, name, sfep->namelen,
55251 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
55252 + *offset = off & 0x7fffffff;
55253 + return 0;
55254 + }
55255 + } else if (filldir(dirent, sfep->name, sfep->namelen,
55256 off & 0x7fffffff, ino, DT_UNKNOWN)) {
55257 *offset = off & 0x7fffffff;
55258 return 0;
55259 diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
55260 index 8f32f50..b6a41e8 100644
55261 --- a/fs/xfs/xfs_vnodeops.c
55262 +++ b/fs/xfs/xfs_vnodeops.c
55263 @@ -564,13 +564,18 @@ xfs_readlink(
55264
55265 xfs_ilock(ip, XFS_ILOCK_SHARED);
55266
55267 - ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFLNK);
55268 - ASSERT(ip->i_d.di_size <= MAXPATHLEN);
55269 -
55270 pathlen = ip->i_d.di_size;
55271 if (!pathlen)
55272 goto out;
55273
55274 + if (pathlen > MAXPATHLEN) {
55275 + xfs_fs_cmn_err(CE_ALERT, mp, "%s: inode (%llu) symlink length (%d) too long",
55276 + __func__, (unsigned long long)ip->i_ino, pathlen);
55277 + ASSERT(0);
55278 + error = XFS_ERROR(EFSCORRUPTED);
55279 + goto out;
55280 + }
55281 +
55282 if (ip->i_df.if_flags & XFS_IFINLINE) {
55283 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
55284 link[pathlen] = '\0';
55285 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
55286 new file mode 100644
55287 index 0000000..7026cbd
55288 --- /dev/null
55289 +++ b/grsecurity/Kconfig
55290 @@ -0,0 +1,1074 @@
55291 +#
55292 +# grecurity configuration
55293 +#
55294 +
55295 +menu "Grsecurity"
55296 +
55297 +config GRKERNSEC
55298 + bool "Grsecurity"
55299 + select CRYPTO
55300 + select CRYPTO_SHA256
55301 + help
55302 + If you say Y here, you will be able to configure many features
55303 + that will enhance the security of your system. It is highly
55304 + recommended that you say Y here and read through the help
55305 + for each option so that you fully understand the features and
55306 + can evaluate their usefulness for your machine.
55307 +
55308 +choice
55309 + prompt "Security Level"
55310 + depends on GRKERNSEC
55311 + default GRKERNSEC_CUSTOM
55312 +
55313 +config GRKERNSEC_LOW
55314 + bool "Low"
55315 + select GRKERNSEC_LINK
55316 + select GRKERNSEC_FIFO
55317 + select GRKERNSEC_RANDNET
55318 + select GRKERNSEC_DMESG
55319 + select GRKERNSEC_CHROOT
55320 + select GRKERNSEC_CHROOT_CHDIR
55321 +
55322 + help
55323 + If you choose this option, several of the grsecurity options will
55324 + be enabled that will give you greater protection against a number
55325 + of attacks, while assuring that none of your software will have any
55326 + conflicts with the additional security measures. If you run a lot
55327 + of unusual software, or you are having problems with the higher
55328 + security levels, you should say Y here. With this option, the
55329 + following features are enabled:
55330 +
55331 + - Linking restrictions
55332 + - FIFO restrictions
55333 + - Restricted dmesg
55334 + - Enforced chdir("/") on chroot
55335 + - Runtime module disabling
55336 +
55337 +config GRKERNSEC_MEDIUM
55338 + bool "Medium"
55339 + select PAX
55340 + select PAX_EI_PAX
55341 + select PAX_PT_PAX_FLAGS
55342 + select PAX_HAVE_ACL_FLAGS
55343 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55344 + select GRKERNSEC_CHROOT
55345 + select GRKERNSEC_CHROOT_SYSCTL
55346 + select GRKERNSEC_LINK
55347 + select GRKERNSEC_FIFO
55348 + select GRKERNSEC_DMESG
55349 + select GRKERNSEC_RANDNET
55350 + select GRKERNSEC_FORKFAIL
55351 + select GRKERNSEC_TIME
55352 + select GRKERNSEC_SIGNAL
55353 + select GRKERNSEC_CHROOT
55354 + select GRKERNSEC_CHROOT_UNIX
55355 + select GRKERNSEC_CHROOT_MOUNT
55356 + select GRKERNSEC_CHROOT_PIVOT
55357 + select GRKERNSEC_CHROOT_DOUBLE
55358 + select GRKERNSEC_CHROOT_CHDIR
55359 + select GRKERNSEC_CHROOT_MKNOD
55360 + select GRKERNSEC_PROC
55361 + select GRKERNSEC_PROC_USERGROUP
55362 + select PAX_RANDUSTACK
55363 + select PAX_ASLR
55364 + select PAX_RANDMMAP
55365 + select PAX_REFCOUNT if (X86 || SPARC64)
55366 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55367 +
55368 + help
55369 + If you say Y here, several features in addition to those included
55370 + in the low additional security level will be enabled. These
55371 + features provide even more security to your system, though in rare
55372 + cases they may be incompatible with very old or poorly written
55373 + software. If you enable this option, make sure that your auth
55374 + service (identd) is running as gid 1001. With this option,
55375 + the following features (in addition to those provided in the
55376 + low additional security level) will be enabled:
55377 +
55378 + - Failed fork logging
55379 + - Time change logging
55380 + - Signal logging
55381 + - Deny mounts in chroot
55382 + - Deny double chrooting
55383 + - Deny sysctl writes in chroot
55384 + - Deny mknod in chroot
55385 + - Deny access to abstract AF_UNIX sockets out of chroot
55386 + - Deny pivot_root in chroot
55387 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
55388 + - /proc restrictions with special GID set to 10 (usually wheel)
55389 + - Address Space Layout Randomization (ASLR)
55390 + - Prevent exploitation of most refcount overflows
55391 + - Bounds checking of copying between the kernel and userland
55392 +
55393 +config GRKERNSEC_HIGH
55394 + bool "High"
55395 + select GRKERNSEC_LINK
55396 + select GRKERNSEC_FIFO
55397 + select GRKERNSEC_DMESG
55398 + select GRKERNSEC_FORKFAIL
55399 + select GRKERNSEC_TIME
55400 + select GRKERNSEC_SIGNAL
55401 + select GRKERNSEC_CHROOT
55402 + select GRKERNSEC_CHROOT_SHMAT
55403 + select GRKERNSEC_CHROOT_UNIX
55404 + select GRKERNSEC_CHROOT_MOUNT
55405 + select GRKERNSEC_CHROOT_FCHDIR
55406 + select GRKERNSEC_CHROOT_PIVOT
55407 + select GRKERNSEC_CHROOT_DOUBLE
55408 + select GRKERNSEC_CHROOT_CHDIR
55409 + select GRKERNSEC_CHROOT_MKNOD
55410 + select GRKERNSEC_CHROOT_CAPS
55411 + select GRKERNSEC_CHROOT_SYSCTL
55412 + select GRKERNSEC_CHROOT_FINDTASK
55413 + select GRKERNSEC_SYSFS_RESTRICT
55414 + select GRKERNSEC_PROC
55415 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55416 + select GRKERNSEC_HIDESYM
55417 + select GRKERNSEC_BRUTE
55418 + select GRKERNSEC_PROC_USERGROUP
55419 + select GRKERNSEC_KMEM
55420 + select GRKERNSEC_RESLOG
55421 + select GRKERNSEC_RANDNET
55422 + select GRKERNSEC_PROC_ADD
55423 + select GRKERNSEC_CHROOT_CHMOD
55424 + select GRKERNSEC_CHROOT_NICE
55425 + select GRKERNSEC_SETXID
55426 + select GRKERNSEC_AUDIT_MOUNT
55427 + select GRKERNSEC_MODHARDEN if (MODULES)
55428 + select GRKERNSEC_HARDEN_PTRACE
55429 + select GRKERNSEC_PTRACE_READEXEC
55430 + select GRKERNSEC_VM86 if (X86_32)
55431 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
55432 + select PAX
55433 + select PAX_RANDUSTACK
55434 + select PAX_ASLR
55435 + select PAX_RANDMMAP
55436 + select PAX_NOEXEC
55437 + select PAX_MPROTECT
55438 + select PAX_EI_PAX
55439 + select PAX_PT_PAX_FLAGS
55440 + select PAX_HAVE_ACL_FLAGS
55441 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
55442 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
55443 + select PAX_RANDKSTACK if (X86_TSC && X86)
55444 + select PAX_SEGMEXEC if (X86_32)
55445 + select PAX_PAGEEXEC
55446 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
55447 + select PAX_EMUTRAMP if (PARISC)
55448 + select PAX_EMUSIGRT if (PARISC)
55449 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
55450 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
55451 + select PAX_REFCOUNT if (X86 || SPARC64)
55452 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55453 + help
55454 + If you say Y here, many of the features of grsecurity will be
55455 + enabled, which will protect you against many kinds of attacks
55456 + against your system. The heightened security comes at a cost
55457 + of an increased chance of incompatibilities with rare software
55458 + on your machine. Since this security level enables PaX, you should
55459 + view <http://pax.grsecurity.net> and read about the PaX
55460 + project. While you are there, download chpax and run it on
55461 + binaries that cause problems with PaX. Also remember that
55462 + since the /proc restrictions are enabled, you must run your
55463 + identd as gid 1001. This security level enables the following
55464 + features in addition to those listed in the low and medium
55465 + security levels:
55466 +
55467 + - Additional /proc restrictions
55468 + - Chmod restrictions in chroot
55469 + - No signals, ptrace, or viewing of processes outside of chroot
55470 + - Capability restrictions in chroot
55471 + - Deny fchdir out of chroot
55472 + - Priority restrictions in chroot
55473 + - Segmentation-based implementation of PaX
55474 + - Mprotect restrictions
55475 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
55476 + - Kernel stack randomization
55477 + - Mount/unmount/remount logging
55478 + - Kernel symbol hiding
55479 + - Hardening of module auto-loading
55480 + - Ptrace restrictions
55481 + - Restricted vm86 mode
55482 + - Restricted sysfs/debugfs
55483 + - Active kernel exploit response
55484 +
55485 +config GRKERNSEC_CUSTOM
55486 + bool "Custom"
55487 + help
55488 + If you say Y here, you will be able to configure every grsecurity
55489 + option, which allows you to enable many more features that aren't
55490 + covered in the basic security levels. These additional features
55491 + include TPE, socket restrictions, and the sysctl system for
55492 + grsecurity. It is advised that you read through the help for
55493 + each option to determine its usefulness in your situation.
55494 +
55495 +endchoice
55496 +
55497 +menu "Memory Protections"
55498 +depends on GRKERNSEC
55499 +
55500 +config GRKERNSEC_KMEM
55501 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
55502 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
55503 + help
55504 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
55505 + be written to or read from to modify or leak the contents of the running
55506 + kernel. /dev/port will also not be allowed to be opened. If you have module
55507 + support disabled, enabling this will close up four ways that are
55508 + currently used to insert malicious code into the running kernel.
55509 + Even with all these features enabled, we still highly recommend that
55510 + you use the RBAC system, as it is still possible for an attacker to
55511 + modify the running kernel through privileged I/O granted by ioperm/iopl.
55512 + If you are not using XFree86, you may be able to stop this additional
55513 + case by enabling the 'Disable privileged I/O' option. Though nothing
55514 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
55515 + but only to video memory, which is the only writing we allow in this
55516 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
55517 + not be allowed to mprotect it with PROT_WRITE later.
55518 + It is highly recommended that you say Y here if you meet all the
55519 + conditions above.
55520 +
55521 +config GRKERNSEC_VM86
55522 + bool "Restrict VM86 mode"
55523 + depends on X86_32
55524 +
55525 + help
55526 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
55527 + make use of a special execution mode on 32bit x86 processors called
55528 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
55529 + video cards and will still work with this option enabled. The purpose
55530 + of the option is to prevent exploitation of emulation errors in
55531 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
55532 + Nearly all users should be able to enable this option.
55533 +
55534 +config GRKERNSEC_IO
55535 + bool "Disable privileged I/O"
55536 + depends on X86
55537 + select RTC_CLASS
55538 + select RTC_INTF_DEV
55539 + select RTC_DRV_CMOS
55540 +
55541 + help
55542 + If you say Y here, all ioperm and iopl calls will return an error.
55543 + Ioperm and iopl can be used to modify the running kernel.
55544 + Unfortunately, some programs need this access to operate properly,
55545 + the most notable of which are XFree86 and hwclock. hwclock can be
55546 + remedied by having RTC support in the kernel, so real-time
55547 + clock support is enabled if this option is enabled, to ensure
55548 + that hwclock operates correctly. XFree86 still will not
55549 + operate correctly with this option enabled, so DO NOT CHOOSE Y
55550 + IF YOU USE XFree86. If you use XFree86 and you still want to
55551 + protect your kernel against modification, use the RBAC system.
55552 +
55553 +config GRKERNSEC_PROC_MEMMAP
55554 + bool "Harden ASLR against information leaks and entropy reduction"
55555 + default y if (PAX_NOEXEC || PAX_ASLR)
55556 + depends on PAX_NOEXEC || PAX_ASLR
55557 + help
55558 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
55559 + give no information about the addresses of its mappings if
55560 + PaX features that rely on random addresses are enabled on the task.
55561 + In addition to sanitizing this information and disabling other
55562 + dangerous sources of information, this option causes reads of sensitive
55563 + /proc/<pid> entries where the file descriptor was opened in a different
55564 + task than the one performing the read. Such attempts are logged.
55565 + Finally, this option limits argv/env strings for suid/sgid binaries
55566 + to 1MB to prevent a complete exhaustion of the stack entropy provided
55567 + by ASLR.
55568 + If you use PaX it is essential that you say Y here as it closes up
55569 + several holes that make full ASLR useless for suid/sgid binaries.
55570 +
55571 +config GRKERNSEC_BRUTE
55572 + bool "Deter exploit bruteforcing"
55573 + help
55574 + If you say Y here, attempts to bruteforce exploits against forking
55575 + daemons such as apache or sshd, as well as against suid/sgid binaries
55576 + will be deterred. When a child of a forking daemon is killed by PaX
55577 + or crashes due to an illegal instruction or other suspicious signal,
55578 + the parent process will be delayed 30 seconds upon every subsequent
55579 + fork until the administrator is able to assess the situation and
55580 + restart the daemon.
55581 + In the suid/sgid case, the attempt is logged, the user has all their
55582 + processes terminated, and they are prevented from executing any further
55583 + processes for 15 minutes.
55584 + It is recommended that you also enable signal logging in the auditing
55585 + section so that logs are generated when a process triggers a suspicious
55586 + signal.
55587 + If the sysctl option is enabled, a sysctl option with name
55588 + "deter_bruteforce" is created.
55589 +
55590 +config GRKERNSEC_MODHARDEN
55591 + bool "Harden module auto-loading"
55592 + depends on MODULES
55593 + help
55594 + If you say Y here, module auto-loading in response to use of some
55595 + feature implemented by an unloaded module will be restricted to
55596 + root users. Enabling this option helps defend against attacks
55597 + by unprivileged users who abuse the auto-loading behavior to
55598 + cause a vulnerable module to load that is then exploited.
55599 +
55600 + If this option prevents a legitimate use of auto-loading for a
55601 + non-root user, the administrator can execute modprobe manually
55602 + with the exact name of the module mentioned in the alert log.
55603 + Alternatively, the administrator can add the module to the list
55604 + of modules loaded at boot by modifying init scripts.
55605 +
55606 + Modification of init scripts will most likely be needed on
55607 + Ubuntu servers with encrypted home directory support enabled,
55608 + as the first non-root user logging in will cause the ecb(aes),
55609 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
55610 +
55611 +config GRKERNSEC_HIDESYM
55612 + bool "Hide kernel symbols"
55613 + help
55614 + If you say Y here, getting information on loaded modules, and
55615 + displaying all kernel symbols through a syscall will be restricted
55616 + to users with CAP_SYS_MODULE. For software compatibility reasons,
55617 + /proc/kallsyms will be restricted to the root user. The RBAC
55618 + system can hide that entry even from root.
55619 +
55620 + This option also prevents leaking of kernel addresses through
55621 + several /proc entries.
55622 +
55623 + Note that this option is only effective provided the following
55624 + conditions are met:
55625 + 1) The kernel using grsecurity is not precompiled by some distribution
55626 + 2) You have also enabled GRKERNSEC_DMESG
55627 + 3) You are using the RBAC system and hiding other files such as your
55628 + kernel image and System.map. Alternatively, enabling this option
55629 + causes the permissions on /boot, /lib/modules, and the kernel
55630 + source directory to change at compile time to prevent
55631 + reading by non-root users.
55632 + If the above conditions are met, this option will aid in providing a
55633 + useful protection against local kernel exploitation of overflows
55634 + and arbitrary read/write vulnerabilities.
55635 +
55636 +config GRKERNSEC_KERN_LOCKOUT
55637 + bool "Active kernel exploit response"
55638 + depends on X86 || ARM || PPC || SPARC
55639 + help
55640 + If you say Y here, when a PaX alert is triggered due to suspicious
55641 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
55642 + or an OOPs occurs due to bad memory accesses, instead of just
55643 + terminating the offending process (and potentially allowing
55644 + a subsequent exploit from the same user), we will take one of two
55645 + actions:
55646 + If the user was root, we will panic the system
55647 + If the user was non-root, we will log the attempt, terminate
55648 + all processes owned by the user, then prevent them from creating
55649 + any new processes until the system is restarted
55650 + This deters repeated kernel exploitation/bruteforcing attempts
55651 + and is useful for later forensics.
55652 +
55653 +endmenu
55654 +menu "Role Based Access Control Options"
55655 +depends on GRKERNSEC
55656 +
55657 +config GRKERNSEC_RBAC_DEBUG
55658 + bool
55659 +
55660 +config GRKERNSEC_NO_RBAC
55661 + bool "Disable RBAC system"
55662 + help
55663 + If you say Y here, the /dev/grsec device will be removed from the kernel,
55664 + preventing the RBAC system from being enabled. You should only say Y
55665 + here if you have no intention of using the RBAC system, so as to prevent
55666 + an attacker with root access from misusing the RBAC system to hide files
55667 + and processes when loadable module support and /dev/[k]mem have been
55668 + locked down.
55669 +
55670 +config GRKERNSEC_ACL_HIDEKERN
55671 + bool "Hide kernel processes"
55672 + help
55673 + If you say Y here, all kernel threads will be hidden to all
55674 + processes but those whose subject has the "view hidden processes"
55675 + flag.
55676 +
55677 +config GRKERNSEC_ACL_MAXTRIES
55678 + int "Maximum tries before password lockout"
55679 + default 3
55680 + help
55681 + This option enforces the maximum number of times a user can attempt
55682 + to authorize themselves with the grsecurity RBAC system before being
55683 + denied the ability to attempt authorization again for a specified time.
55684 + The lower the number, the harder it will be to brute-force a password.
55685 +
55686 +config GRKERNSEC_ACL_TIMEOUT
55687 + int "Time to wait after max password tries, in seconds"
55688 + default 30
55689 + help
55690 + This option specifies the time the user must wait after attempting to
55691 + authorize to the RBAC system with the maximum number of invalid
55692 + passwords. The higher the number, the harder it will be to brute-force
55693 + a password.
55694 +
55695 +endmenu
55696 +menu "Filesystem Protections"
55697 +depends on GRKERNSEC
55698 +
55699 +config GRKERNSEC_PROC
55700 + bool "Proc restrictions"
55701 + help
55702 + If you say Y here, the permissions of the /proc filesystem
55703 + will be altered to enhance system security and privacy. You MUST
55704 + choose either a user only restriction or a user and group restriction.
55705 + Depending upon the option you choose, you can either restrict users to
55706 + see only the processes they themselves run, or choose a group that can
55707 + view all processes and files normally restricted to root if you choose
55708 + the "restrict to user only" option. NOTE: If you're running identd as
55709 + a non-root user, you will have to run it as the group you specify here.
55710 +
55711 +config GRKERNSEC_PROC_USER
55712 + bool "Restrict /proc to user only"
55713 + depends on GRKERNSEC_PROC
55714 + help
55715 + If you say Y here, non-root users will only be able to view their own
55716 + processes, and restricts them from viewing network-related information,
55717 + and viewing kernel symbol and module information.
55718 +
55719 +config GRKERNSEC_PROC_USERGROUP
55720 + bool "Allow special group"
55721 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
55722 + help
55723 + If you say Y here, you will be able to select a group that will be
55724 + able to view all processes and network-related information. If you've
55725 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
55726 + remain hidden. This option is useful if you want to run identd as
55727 + a non-root user.
55728 +
55729 +config GRKERNSEC_PROC_GID
55730 + int "GID for special group"
55731 + depends on GRKERNSEC_PROC_USERGROUP
55732 + default 1001
55733 +
55734 +config GRKERNSEC_PROC_ADD
55735 + bool "Additional restrictions"
55736 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
55737 + help
55738 + If you say Y here, additional restrictions will be placed on
55739 + /proc that keep normal users from viewing device information and
55740 + slabinfo information that could be useful for exploits.
55741 +
55742 +config GRKERNSEC_LINK
55743 + bool "Linking restrictions"
55744 + help
55745 + If you say Y here, /tmp race exploits will be prevented, since users
55746 + will no longer be able to follow symlinks owned by other users in
55747 + world-writable +t directories (e.g. /tmp), unless the owner of the
55748 + symlink is the owner of the directory. users will also not be
55749 + able to hardlink to files they do not own. If the sysctl option is
55750 + enabled, a sysctl option with name "linking_restrictions" is created.
55751 +
55752 +config GRKERNSEC_FIFO
55753 + bool "FIFO restrictions"
55754 + help
55755 + If you say Y here, users will not be able to write to FIFOs they don't
55756 + own in world-writable +t directories (e.g. /tmp), unless the owner of
55757 + the FIFO is the same owner of the directory it's held in. If the sysctl
55758 + option is enabled, a sysctl option with name "fifo_restrictions" is
55759 + created.
55760 +
55761 +config GRKERNSEC_SYSFS_RESTRICT
55762 + bool "Sysfs/debugfs restriction"
55763 + depends on SYSFS
55764 + help
55765 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
55766 + any filesystem normally mounted under it (e.g. debugfs) will be
55767 + mostly accessible only by root. These filesystems generally provide access
55768 + to hardware and debug information that isn't appropriate for unprivileged
55769 + users of the system. Sysfs and debugfs have also become a large source
55770 + of new vulnerabilities, ranging from infoleaks to local compromise.
55771 + There has been very little oversight with an eye toward security involved
55772 + in adding new exporters of information to these filesystems, so their
55773 + use is discouraged.
55774 + For reasons of compatibility, a few directories have been whitelisted
55775 + for access by non-root users:
55776 + /sys/fs/selinux
55777 + /sys/fs/fuse
55778 + /sys/devices/system/cpu
55779 +
55780 +config GRKERNSEC_ROFS
55781 + bool "Runtime read-only mount protection"
55782 + help
55783 + If you say Y here, a sysctl option with name "romount_protect" will
55784 + be created. By setting this option to 1 at runtime, filesystems
55785 + will be protected in the following ways:
55786 + * No new writable mounts will be allowed
55787 + * Existing read-only mounts won't be able to be remounted read/write
55788 + * Write operations will be denied on all block devices
55789 + This option acts independently of grsec_lock: once it is set to 1,
55790 + it cannot be turned off. Therefore, please be mindful of the resulting
55791 + behavior if this option is enabled in an init script on a read-only
55792 + filesystem. This feature is mainly intended for secure embedded systems.
55793 +
55794 +config GRKERNSEC_CHROOT
55795 + bool "Chroot jail restrictions"
55796 + help
55797 + If you say Y here, you will be able to choose several options that will
55798 + make breaking out of a chrooted jail much more difficult. If you
55799 + encounter no software incompatibilities with the following options, it
55800 + is recommended that you enable each one.
55801 +
55802 +config GRKERNSEC_CHROOT_MOUNT
55803 + bool "Deny mounts"
55804 + depends on GRKERNSEC_CHROOT
55805 + help
55806 + If you say Y here, processes inside a chroot will not be able to
55807 + mount or remount filesystems. If the sysctl option is enabled, a
55808 + sysctl option with name "chroot_deny_mount" is created.
55809 +
55810 +config GRKERNSEC_CHROOT_DOUBLE
55811 + bool "Deny double-chroots"
55812 + depends on GRKERNSEC_CHROOT
55813 + help
55814 + If you say Y here, processes inside a chroot will not be able to chroot
55815 + again outside the chroot. This is a widely used method of breaking
55816 + out of a chroot jail and should not be allowed. If the sysctl
55817 + option is enabled, a sysctl option with name
55818 + "chroot_deny_chroot" is created.
55819 +
55820 +config GRKERNSEC_CHROOT_PIVOT
55821 + bool "Deny pivot_root in chroot"
55822 + depends on GRKERNSEC_CHROOT
55823 + help
55824 + If you say Y here, processes inside a chroot will not be able to use
55825 + a function called pivot_root() that was introduced in Linux 2.3.41. It
55826 + works similar to chroot in that it changes the root filesystem. This
55827 + function could be misused in a chrooted process to attempt to break out
55828 + of the chroot, and therefore should not be allowed. If the sysctl
55829 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
55830 + created.
55831 +
55832 +config GRKERNSEC_CHROOT_CHDIR
55833 + bool "Enforce chdir(\"/\") on all chroots"
55834 + depends on GRKERNSEC_CHROOT
55835 + help
55836 + If you say Y here, the current working directory of all newly-chrooted
55837 + applications will be set to the the root directory of the chroot.
55838 + The man page on chroot(2) states:
55839 + Note that this call does not change the current working
55840 + directory, so that `.' can be outside the tree rooted at
55841 + `/'. In particular, the super-user can escape from a
55842 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
55843 +
55844 + It is recommended that you say Y here, since it's not known to break
55845 + any software. If the sysctl option is enabled, a sysctl option with
55846 + name "chroot_enforce_chdir" is created.
55847 +
55848 +config GRKERNSEC_CHROOT_CHMOD
55849 + bool "Deny (f)chmod +s"
55850 + depends on GRKERNSEC_CHROOT
55851 + help
55852 + If you say Y here, processes inside a chroot will not be able to chmod
55853 + or fchmod files to make them have suid or sgid bits. This protects
55854 + against another published method of breaking a chroot. If the sysctl
55855 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
55856 + created.
55857 +
55858 +config GRKERNSEC_CHROOT_FCHDIR
55859 + bool "Deny fchdir out of chroot"
55860 + depends on GRKERNSEC_CHROOT
55861 + help
55862 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
55863 + to a file descriptor of the chrooting process that points to a directory
55864 + outside the filesystem will be stopped. If the sysctl option
55865 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
55866 +
55867 +config GRKERNSEC_CHROOT_MKNOD
55868 + bool "Deny mknod"
55869 + depends on GRKERNSEC_CHROOT
55870 + help
55871 + If you say Y here, processes inside a chroot will not be allowed to
55872 + mknod. The problem with using mknod inside a chroot is that it
55873 + would allow an attacker to create a device entry that is the same
55874 + as one on the physical root of your system, which could range from
55875 + anything from the console device to a device for your harddrive (which
55876 + they could then use to wipe the drive or steal data). It is recommended
55877 + that you say Y here, unless you run into software incompatibilities.
55878 + If the sysctl option is enabled, a sysctl option with name
55879 + "chroot_deny_mknod" is created.
55880 +
55881 +config GRKERNSEC_CHROOT_SHMAT
55882 + bool "Deny shmat() out of chroot"
55883 + depends on GRKERNSEC_CHROOT
55884 + help
55885 + If you say Y here, processes inside a chroot will not be able to attach
55886 + to shared memory segments that were created outside of the chroot jail.
55887 + It is recommended that you say Y here. If the sysctl option is enabled,
55888 + a sysctl option with name "chroot_deny_shmat" is created.
55889 +
55890 +config GRKERNSEC_CHROOT_UNIX
55891 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
55892 + depends on GRKERNSEC_CHROOT
55893 + help
55894 + If you say Y here, processes inside a chroot will not be able to
55895 + connect to abstract (meaning not belonging to a filesystem) Unix
55896 + domain sockets that were bound outside of a chroot. It is recommended
55897 + that you say Y here. If the sysctl option is enabled, a sysctl option
55898 + with name "chroot_deny_unix" is created.
55899 +
55900 +config GRKERNSEC_CHROOT_FINDTASK
55901 + bool "Protect outside processes"
55902 + depends on GRKERNSEC_CHROOT
55903 + help
55904 + If you say Y here, processes inside a chroot will not be able to
55905 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
55906 + getsid, or view any process outside of the chroot. If the sysctl
55907 + option is enabled, a sysctl option with name "chroot_findtask" is
55908 + created.
55909 +
55910 +config GRKERNSEC_CHROOT_NICE
55911 + bool "Restrict priority changes"
55912 + depends on GRKERNSEC_CHROOT
55913 + help
55914 + If you say Y here, processes inside a chroot will not be able to raise
55915 + the priority of processes in the chroot, or alter the priority of
55916 + processes outside the chroot. This provides more security than simply
55917 + removing CAP_SYS_NICE from the process' capability set. If the
55918 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
55919 + is created.
55920 +
55921 +config GRKERNSEC_CHROOT_SYSCTL
55922 + bool "Deny sysctl writes"
55923 + depends on GRKERNSEC_CHROOT
55924 + help
55925 + If you say Y here, an attacker in a chroot will not be able to
55926 + write to sysctl entries, either by sysctl(2) or through a /proc
55927 + interface. It is strongly recommended that you say Y here. If the
55928 + sysctl option is enabled, a sysctl option with name
55929 + "chroot_deny_sysctl" is created.
55930 +
55931 +config GRKERNSEC_CHROOT_CAPS
55932 + bool "Capability restrictions"
55933 + depends on GRKERNSEC_CHROOT
55934 + help
55935 + If you say Y here, the capabilities on all processes within a
55936 + chroot jail will be lowered to stop module insertion, raw i/o,
55937 + system and net admin tasks, rebooting the system, modifying immutable
55938 + files, modifying IPC owned by another, and changing the system time.
55939 + This is left an option because it can break some apps. Disable this
55940 + if your chrooted apps are having problems performing those kinds of
55941 + tasks. If the sysctl option is enabled, a sysctl option with
55942 + name "chroot_caps" is created.
55943 +
55944 +endmenu
55945 +menu "Kernel Auditing"
55946 +depends on GRKERNSEC
55947 +
55948 +config GRKERNSEC_AUDIT_GROUP
55949 + bool "Single group for auditing"
55950 + help
55951 + If you say Y here, the exec, chdir, and (un)mount logging features
55952 + will only operate on a group you specify. This option is recommended
55953 + if you only want to watch certain users instead of having a large
55954 + amount of logs from the entire system. If the sysctl option is enabled,
55955 + a sysctl option with name "audit_group" is created.
55956 +
55957 +config GRKERNSEC_AUDIT_GID
55958 + int "GID for auditing"
55959 + depends on GRKERNSEC_AUDIT_GROUP
55960 + default 1007
55961 +
55962 +config GRKERNSEC_EXECLOG
55963 + bool "Exec logging"
55964 + help
55965 + If you say Y here, all execve() calls will be logged (since the
55966 + other exec*() calls are frontends to execve(), all execution
55967 + will be logged). Useful for shell-servers that like to keep track
55968 + of their users. If the sysctl option is enabled, a sysctl option with
55969 + name "exec_logging" is created.
55970 + WARNING: This option when enabled will produce a LOT of logs, especially
55971 + on an active system.
55972 +
55973 +config GRKERNSEC_RESLOG
55974 + bool "Resource logging"
55975 + help
55976 + If you say Y here, all attempts to overstep resource limits will
55977 + be logged with the resource name, the requested size, and the current
55978 + limit. It is highly recommended that you say Y here. If the sysctl
55979 + option is enabled, a sysctl option with name "resource_logging" is
55980 + created. If the RBAC system is enabled, the sysctl value is ignored.
55981 +
55982 +config GRKERNSEC_CHROOT_EXECLOG
55983 + bool "Log execs within chroot"
55984 + help
55985 + If you say Y here, all executions inside a chroot jail will be logged
55986 + to syslog. This can cause a large amount of logs if certain
55987 + applications (eg. djb's daemontools) are installed on the system, and
55988 + is therefore left as an option. If the sysctl option is enabled, a
55989 + sysctl option with name "chroot_execlog" is created.
55990 +
55991 +config GRKERNSEC_AUDIT_PTRACE
55992 + bool "Ptrace logging"
55993 + help
55994 + If you say Y here, all attempts to attach to a process via ptrace
55995 + will be logged. If the sysctl option is enabled, a sysctl option
55996 + with name "audit_ptrace" is created.
55997 +
55998 +config GRKERNSEC_AUDIT_CHDIR
55999 + bool "Chdir logging"
56000 + help
56001 + If you say Y here, all chdir() calls will be logged. If the sysctl
56002 + option is enabled, a sysctl option with name "audit_chdir" is created.
56003 +
56004 +config GRKERNSEC_AUDIT_MOUNT
56005 + bool "(Un)Mount logging"
56006 + help
56007 + If you say Y here, all mounts and unmounts will be logged. If the
56008 + sysctl option is enabled, a sysctl option with name "audit_mount" is
56009 + created.
56010 +
56011 +config GRKERNSEC_SIGNAL
56012 + bool "Signal logging"
56013 + help
56014 + If you say Y here, certain important signals will be logged, such as
56015 + SIGSEGV, which will as a result inform you of when a error in a program
56016 + occurred, which in some cases could mean a possible exploit attempt.
56017 + If the sysctl option is enabled, a sysctl option with name
56018 + "signal_logging" is created.
56019 +
56020 +config GRKERNSEC_FORKFAIL
56021 + bool "Fork failure logging"
56022 + help
56023 + If you say Y here, all failed fork() attempts will be logged.
56024 + This could suggest a fork bomb, or someone attempting to overstep
56025 + their process limit. If the sysctl option is enabled, a sysctl option
56026 + with name "forkfail_logging" is created.
56027 +
56028 +config GRKERNSEC_TIME
56029 + bool "Time change logging"
56030 + help
56031 + If you say Y here, any changes of the system clock will be logged.
56032 + If the sysctl option is enabled, a sysctl option with name
56033 + "timechange_logging" is created.
56034 +
56035 +config GRKERNSEC_PROC_IPADDR
56036 + bool "/proc/<pid>/ipaddr support"
56037 + help
56038 + If you say Y here, a new entry will be added to each /proc/<pid>
56039 + directory that contains the IP address of the person using the task.
56040 + The IP is carried across local TCP and AF_UNIX stream sockets.
56041 + This information can be useful for IDS/IPSes to perform remote response
56042 + to a local attack. The entry is readable by only the owner of the
56043 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
56044 + the RBAC system), and thus does not create privacy concerns.
56045 +
56046 +config GRKERNSEC_RWXMAP_LOG
56047 + bool 'Denied RWX mmap/mprotect logging'
56048 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
56049 + help
56050 + If you say Y here, calls to mmap() and mprotect() with explicit
56051 + usage of PROT_WRITE and PROT_EXEC together will be logged when
56052 + denied by the PAX_MPROTECT feature. If the sysctl option is
56053 + enabled, a sysctl option with name "rwxmap_logging" is created.
56054 +
56055 +config GRKERNSEC_AUDIT_TEXTREL
56056 + bool 'ELF text relocations logging (READ HELP)'
56057 + depends on PAX_MPROTECT
56058 + help
56059 + If you say Y here, text relocations will be logged with the filename
56060 + of the offending library or binary. The purpose of the feature is
56061 + to help Linux distribution developers get rid of libraries and
56062 + binaries that need text relocations which hinder the future progress
56063 + of PaX. Only Linux distribution developers should say Y here, and
56064 + never on a production machine, as this option creates an information
56065 + leak that could aid an attacker in defeating the randomization of
56066 + a single memory region. If the sysctl option is enabled, a sysctl
56067 + option with name "audit_textrel" is created.
56068 +
56069 +endmenu
56070 +
56071 +menu "Executable Protections"
56072 +depends on GRKERNSEC
56073 +
56074 +config GRKERNSEC_DMESG
56075 + bool "Dmesg(8) restriction"
56076 + help
56077 + If you say Y here, non-root users will not be able to use dmesg(8)
56078 + to view up to the last 4kb of messages in the kernel's log buffer.
56079 + The kernel's log buffer often contains kernel addresses and other
56080 + identifying information useful to an attacker in fingerprinting a
56081 + system for a targeted exploit.
56082 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
56083 + created.
56084 +
56085 +config GRKERNSEC_HARDEN_PTRACE
56086 + bool "Deter ptrace-based process snooping"
56087 + help
56088 + If you say Y here, TTY sniffers and other malicious monitoring
56089 + programs implemented through ptrace will be defeated. If you
56090 + have been using the RBAC system, this option has already been
56091 + enabled for several years for all users, with the ability to make
56092 + fine-grained exceptions.
56093 +
56094 + This option only affects the ability of non-root users to ptrace
56095 + processes that are not a descendent of the ptracing process.
56096 + This means that strace ./binary and gdb ./binary will still work,
56097 + but attaching to arbitrary processes will not. If the sysctl
56098 + option is enabled, a sysctl option with name "harden_ptrace" is
56099 + created.
56100 +
56101 +config GRKERNSEC_PTRACE_READEXEC
56102 + bool "Require read access to ptrace sensitive binaries"
56103 + help
56104 + If you say Y here, unprivileged users will not be able to ptrace unreadable
56105 + binaries. This option is useful in environments that
56106 + remove the read bits (e.g. file mode 4711) from suid binaries to
56107 + prevent infoleaking of their contents. This option adds
56108 + consistency to the use of that file mode, as the binary could normally
56109 + be read out when run without privileges while ptracing.
56110 +
56111 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
56112 + is created.
56113 +
56114 +config GRKERNSEC_SETXID
56115 + bool "Enforce consistent multithreaded privileges"
56116 + help
56117 + If you say Y here, a change from a root uid to a non-root uid
56118 + in a multithreaded application will cause the resulting uids,
56119 + gids, supplementary groups, and capabilities in that thread
56120 + to be propagated to the other threads of the process. In most
56121 + cases this is unnecessary, as glibc will emulate this behavior
56122 + on behalf of the application. Other libcs do not act in the
56123 + same way, allowing the other threads of the process to continue
56124 + running with root privileges. If the sysctl option is enabled,
56125 + a sysctl option with name "consistent_setxid" is created.
56126 +
56127 +config GRKERNSEC_TPE
56128 + bool "Trusted Path Execution (TPE)"
56129 + help
56130 + If you say Y here, you will be able to choose a gid to add to the
56131 + supplementary groups of users you want to mark as "untrusted."
56132 + These users will not be able to execute any files that are not in
56133 + root-owned directories writable only by root. If the sysctl option
56134 + is enabled, a sysctl option with name "tpe" is created.
56135 +
56136 +config GRKERNSEC_TPE_ALL
56137 + bool "Partially restrict all non-root users"
56138 + depends on GRKERNSEC_TPE
56139 + help
56140 + If you say Y here, all non-root users will be covered under
56141 + a weaker TPE restriction. This is separate from, and in addition to,
56142 + the main TPE options that you have selected elsewhere. Thus, if a
56143 + "trusted" GID is chosen, this restriction applies to even that GID.
56144 + Under this restriction, all non-root users will only be allowed to
56145 + execute files in directories they own that are not group or
56146 + world-writable, or in directories owned by root and writable only by
56147 + root. If the sysctl option is enabled, a sysctl option with name
56148 + "tpe_restrict_all" is created.
56149 +
56150 +config GRKERNSEC_TPE_INVERT
56151 + bool "Invert GID option"
56152 + depends on GRKERNSEC_TPE
56153 + help
56154 + If you say Y here, the group you specify in the TPE configuration will
56155 + decide what group TPE restrictions will be *disabled* for. This
56156 + option is useful if you want TPE restrictions to be applied to most
56157 + users on the system. If the sysctl option is enabled, a sysctl option
56158 + with name "tpe_invert" is created. Unlike other sysctl options, this
56159 + entry will default to on for backward-compatibility.
56160 +
56161 +config GRKERNSEC_TPE_GID
56162 + int "GID for untrusted users"
56163 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56164 + default 1005
56165 + help
56166 + Setting this GID determines what group TPE restrictions will be
56167 + *enabled* for. If the sysctl option is enabled, a sysctl option
56168 + with name "tpe_gid" is created.
56169 +
56170 +config GRKERNSEC_TPE_GID
56171 + int "GID for trusted users"
56172 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56173 + default 1005
56174 + help
56175 + Setting this GID determines what group TPE restrictions will be
56176 + *disabled* for. If the sysctl option is enabled, a sysctl option
56177 + with name "tpe_gid" is created.
56178 +
56179 +endmenu
56180 +menu "Network Protections"
56181 +depends on GRKERNSEC
56182 +
56183 +config GRKERNSEC_RANDNET
56184 + bool "Larger entropy pools"
56185 + help
56186 + If you say Y here, the entropy pools used for many features of Linux
56187 + and grsecurity will be doubled in size. Since several grsecurity
56188 + features use additional randomness, it is recommended that you say Y
56189 + here. Saying Y here has a similar effect as modifying
56190 + /proc/sys/kernel/random/poolsize.
56191 +
56192 +config GRKERNSEC_BLACKHOLE
56193 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56194 + depends on NET
56195 + help
56196 + If you say Y here, neither TCP resets nor ICMP
56197 + destination-unreachable packets will be sent in response to packets
56198 + sent to ports for which no associated listening process exists.
56199 + This feature supports both IPV4 and IPV6 and exempts the
56200 + loopback interface from blackholing. Enabling this feature
56201 + makes a host more resilient to DoS attacks and reduces network
56202 + visibility against scanners.
56203 +
56204 + The blackhole feature as-implemented is equivalent to the FreeBSD
56205 + blackhole feature, as it prevents RST responses to all packets, not
56206 + just SYNs. Under most application behavior this causes no
56207 + problems, but applications (like haproxy) may not close certain
56208 + connections in a way that cleanly terminates them on the remote
56209 + end, leaving the remote host in LAST_ACK state. Because of this
56210 + side-effect and to prevent intentional LAST_ACK DoSes, this
56211 + feature also adds automatic mitigation against such attacks.
56212 + The mitigation drastically reduces the amount of time a socket
56213 + can spend in LAST_ACK state. If you're using haproxy and not
56214 + all servers it connects to have this option enabled, consider
56215 + disabling this feature on the haproxy host.
56216 +
56217 + If the sysctl option is enabled, two sysctl options with names
56218 + "ip_blackhole" and "lastack_retries" will be created.
56219 + While "ip_blackhole" takes the standard zero/non-zero on/off
56220 + toggle, "lastack_retries" uses the same kinds of values as
56221 + "tcp_retries1" and "tcp_retries2". The default value of 4
56222 + prevents a socket from lasting more than 45 seconds in LAST_ACK
56223 + state.
56224 +
56225 +config GRKERNSEC_SOCKET
56226 + bool "Socket restrictions"
56227 + depends on NET
56228 + help
56229 + If you say Y here, you will be able to choose from several options.
56230 + If you assign a GID on your system and add it to the supplementary
56231 + groups of users you want to restrict socket access to, this patch
56232 + will perform up to three things, based on the option(s) you choose.
56233 +
56234 +config GRKERNSEC_SOCKET_ALL
56235 + bool "Deny any sockets to group"
56236 + depends on GRKERNSEC_SOCKET
56237 + help
56238 + If you say Y here, you will be able to choose a GID of whose users will
56239 + be unable to connect to other hosts from your machine or run server
56240 + applications from your machine. If the sysctl option is enabled, a
56241 + sysctl option with name "socket_all" is created.
56242 +
56243 +config GRKERNSEC_SOCKET_ALL_GID
56244 + int "GID to deny all sockets for"
56245 + depends on GRKERNSEC_SOCKET_ALL
56246 + default 1004
56247 + help
56248 + Here you can choose the GID to disable socket access for. Remember to
56249 + add the users you want socket access disabled for to the GID
56250 + specified here. If the sysctl option is enabled, a sysctl option
56251 + with name "socket_all_gid" is created.
56252 +
56253 +config GRKERNSEC_SOCKET_CLIENT
56254 + bool "Deny client sockets to group"
56255 + depends on GRKERNSEC_SOCKET
56256 + help
56257 + If you say Y here, you will be able to choose a GID of whose users will
56258 + be unable to connect to other hosts from your machine, but will be
56259 + able to run servers. If this option is enabled, all users in the group
56260 + you specify will have to use passive mode when initiating ftp transfers
56261 + from the shell on your machine. If the sysctl option is enabled, a
56262 + sysctl option with name "socket_client" is created.
56263 +
56264 +config GRKERNSEC_SOCKET_CLIENT_GID
56265 + int "GID to deny client sockets for"
56266 + depends on GRKERNSEC_SOCKET_CLIENT
56267 + default 1003
56268 + help
56269 + Here you can choose the GID to disable client socket access for.
56270 + Remember to add the users you want client socket access disabled for to
56271 + the GID specified here. If the sysctl option is enabled, a sysctl
56272 + option with name "socket_client_gid" is created.
56273 +
56274 +config GRKERNSEC_SOCKET_SERVER
56275 + bool "Deny server sockets to group"
56276 + depends on GRKERNSEC_SOCKET
56277 + help
56278 + If you say Y here, you will be able to choose a GID of whose users will
56279 + be unable to run server applications from your machine. If the sysctl
56280 + option is enabled, a sysctl option with name "socket_server" is created.
56281 +
56282 +config GRKERNSEC_SOCKET_SERVER_GID
56283 + int "GID to deny server sockets for"
56284 + depends on GRKERNSEC_SOCKET_SERVER
56285 + default 1002
56286 + help
56287 + Here you can choose the GID to disable server socket access for.
56288 + Remember to add the users you want server socket access disabled for to
56289 + the GID specified here. If the sysctl option is enabled, a sysctl
56290 + option with name "socket_server_gid" is created.
56291 +
56292 +endmenu
56293 +menu "Sysctl support"
56294 +depends on GRKERNSEC && SYSCTL
56295 +
56296 +config GRKERNSEC_SYSCTL
56297 + bool "Sysctl support"
56298 + help
56299 + If you say Y here, you will be able to change the options that
56300 + grsecurity runs with at bootup, without having to recompile your
56301 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
56302 + to enable (1) or disable (0) various features. All the sysctl entries
56303 + are mutable until the "grsec_lock" entry is set to a non-zero value.
56304 + All features enabled in the kernel configuration are disabled at boot
56305 + if you do not say Y to the "Turn on features by default" option.
56306 + All options should be set at startup, and the grsec_lock entry should
56307 + be set to a non-zero value after all the options are set.
56308 + *THIS IS EXTREMELY IMPORTANT*
56309 +
56310 +config GRKERNSEC_SYSCTL_DISTRO
56311 + bool "Extra sysctl support for distro makers (READ HELP)"
56312 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
56313 + help
56314 + If you say Y here, additional sysctl options will be created
56315 + for features that affect processes running as root. Therefore,
56316 + it is critical when using this option that the grsec_lock entry be
56317 + enabled after boot. Only distros with prebuilt kernel packages
56318 + with this option enabled that can ensure grsec_lock is enabled
56319 + after boot should use this option.
56320 + *Failure to set grsec_lock after boot makes all grsec features
56321 + this option covers useless*
56322 +
56323 + Currently this option creates the following sysctl entries:
56324 + "Disable Privileged I/O": "disable_priv_io"
56325 +
56326 +config GRKERNSEC_SYSCTL_ON
56327 + bool "Turn on features by default"
56328 + depends on GRKERNSEC_SYSCTL
56329 + help
56330 + If you say Y here, instead of having all features enabled in the
56331 + kernel configuration disabled at boot time, the features will be
56332 + enabled at boot time. It is recommended you say Y here unless
56333 + there is some reason you would want all sysctl-tunable features to
56334 + be disabled by default. As mentioned elsewhere, it is important
56335 + to enable the grsec_lock entry once you have finished modifying
56336 + the sysctl entries.
56337 +
56338 +endmenu
56339 +menu "Logging Options"
56340 +depends on GRKERNSEC
56341 +
56342 +config GRKERNSEC_FLOODTIME
56343 + int "Seconds in between log messages (minimum)"
56344 + default 10
56345 + help
56346 + This option allows you to enforce the number of seconds between
56347 + grsecurity log messages. The default should be suitable for most
56348 + people, however, if you choose to change it, choose a value small enough
56349 + to allow informative logs to be produced, but large enough to
56350 + prevent flooding.
56351 +
56352 +config GRKERNSEC_FLOODBURST
56353 + int "Number of messages in a burst (maximum)"
56354 + default 6
56355 + help
56356 + This option allows you to choose the maximum number of messages allowed
56357 + within the flood time interval you chose in a separate option. The
56358 + default should be suitable for most people, however if you find that
56359 + many of your logs are being interpreted as flooding, you may want to
56360 + raise this value.
56361 +
56362 +endmenu
56363 +
56364 +endmenu
56365 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
56366 new file mode 100644
56367 index 0000000..1b9afa9
56368 --- /dev/null
56369 +++ b/grsecurity/Makefile
56370 @@ -0,0 +1,38 @@
56371 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
56372 +# during 2001-2009 it has been completely redesigned by Brad Spengler
56373 +# into an RBAC system
56374 +#
56375 +# All code in this directory and various hooks inserted throughout the kernel
56376 +# are copyright Brad Spengler - Open Source Security, Inc., and released
56377 +# under the GPL v2 or higher
56378 +
56379 +KBUILD_CFLAGS += -Werror
56380 +
56381 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
56382 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
56383 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
56384 +
56385 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
56386 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
56387 + gracl_learn.o grsec_log.o
56388 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
56389 +
56390 +ifdef CONFIG_NET
56391 +obj-y += grsec_sock.o
56392 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
56393 +endif
56394 +
56395 +ifndef CONFIG_GRKERNSEC
56396 +obj-y += grsec_disabled.o
56397 +endif
56398 +
56399 +ifdef CONFIG_GRKERNSEC_HIDESYM
56400 +extra-y := grsec_hidesym.o
56401 +$(obj)/grsec_hidesym.o:
56402 + @-chmod -f 500 /boot
56403 + @-chmod -f 500 /lib/modules
56404 + @-chmod -f 500 /lib64/modules
56405 + @-chmod -f 500 /lib32/modules
56406 + @-chmod -f 700 .
56407 + @echo ' grsec: protected kernel image paths'
56408 +endif
56409 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
56410 new file mode 100644
56411 index 0000000..b1c4f4d
56412 --- /dev/null
56413 +++ b/grsecurity/gracl.c
56414 @@ -0,0 +1,4149 @@
56415 +#include <linux/kernel.h>
56416 +#include <linux/module.h>
56417 +#include <linux/sched.h>
56418 +#include <linux/mm.h>
56419 +#include <linux/file.h>
56420 +#include <linux/fs.h>
56421 +#include <linux/namei.h>
56422 +#include <linux/mount.h>
56423 +#include <linux/tty.h>
56424 +#include <linux/proc_fs.h>
56425 +#include <linux/smp_lock.h>
56426 +#include <linux/slab.h>
56427 +#include <linux/vmalloc.h>
56428 +#include <linux/types.h>
56429 +#include <linux/sysctl.h>
56430 +#include <linux/netdevice.h>
56431 +#include <linux/ptrace.h>
56432 +#include <linux/gracl.h>
56433 +#include <linux/gralloc.h>
56434 +#include <linux/security.h>
56435 +#include <linux/grinternal.h>
56436 +#include <linux/pid_namespace.h>
56437 +#include <linux/fdtable.h>
56438 +#include <linux/percpu.h>
56439 +
56440 +#include <asm/uaccess.h>
56441 +#include <asm/errno.h>
56442 +#include <asm/mman.h>
56443 +
56444 +static struct acl_role_db acl_role_set;
56445 +static struct name_db name_set;
56446 +static struct inodev_db inodev_set;
56447 +
56448 +/* for keeping track of userspace pointers used for subjects, so we
56449 + can share references in the kernel as well
56450 +*/
56451 +
56452 +static struct dentry *real_root;
56453 +static struct vfsmount *real_root_mnt;
56454 +
56455 +static struct acl_subj_map_db subj_map_set;
56456 +
56457 +static struct acl_role_label *default_role;
56458 +
56459 +static struct acl_role_label *role_list;
56460 +
56461 +static u16 acl_sp_role_value;
56462 +
56463 +extern char *gr_shared_page[4];
56464 +static DEFINE_MUTEX(gr_dev_mutex);
56465 +DEFINE_RWLOCK(gr_inode_lock);
56466 +
56467 +struct gr_arg *gr_usermode;
56468 +
56469 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
56470 +
56471 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
56472 +extern void gr_clear_learn_entries(void);
56473 +
56474 +#ifdef CONFIG_GRKERNSEC_RESLOG
56475 +extern void gr_log_resource(const struct task_struct *task,
56476 + const int res, const unsigned long wanted, const int gt);
56477 +#endif
56478 +
56479 +unsigned char *gr_system_salt;
56480 +unsigned char *gr_system_sum;
56481 +
56482 +static struct sprole_pw **acl_special_roles = NULL;
56483 +static __u16 num_sprole_pws = 0;
56484 +
56485 +static struct acl_role_label *kernel_role = NULL;
56486 +
56487 +static unsigned int gr_auth_attempts = 0;
56488 +static unsigned long gr_auth_expires = 0UL;
56489 +
56490 +#ifdef CONFIG_NET
56491 +extern struct vfsmount *sock_mnt;
56492 +#endif
56493 +extern struct vfsmount *pipe_mnt;
56494 +extern struct vfsmount *shm_mnt;
56495 +#ifdef CONFIG_HUGETLBFS
56496 +extern struct vfsmount *hugetlbfs_vfsmount;
56497 +#endif
56498 +
56499 +static struct acl_object_label *fakefs_obj_rw;
56500 +static struct acl_object_label *fakefs_obj_rwx;
56501 +
56502 +extern int gr_init_uidset(void);
56503 +extern void gr_free_uidset(void);
56504 +extern void gr_remove_uid(uid_t uid);
56505 +extern int gr_find_uid(uid_t uid);
56506 +
56507 +__inline__ int
56508 +gr_acl_is_enabled(void)
56509 +{
56510 + return (gr_status & GR_READY);
56511 +}
56512 +
56513 +#ifdef CONFIG_BTRFS_FS
56514 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
56515 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
56516 +#endif
56517 +
56518 +static inline dev_t __get_dev(const struct dentry *dentry)
56519 +{
56520 +#ifdef CONFIG_BTRFS_FS
56521 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
56522 + return get_btrfs_dev_from_inode(dentry->d_inode);
56523 + else
56524 +#endif
56525 + return dentry->d_inode->i_sb->s_dev;
56526 +}
56527 +
56528 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
56529 +{
56530 + return __get_dev(dentry);
56531 +}
56532 +
56533 +static char gr_task_roletype_to_char(struct task_struct *task)
56534 +{
56535 + switch (task->role->roletype &
56536 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
56537 + GR_ROLE_SPECIAL)) {
56538 + case GR_ROLE_DEFAULT:
56539 + return 'D';
56540 + case GR_ROLE_USER:
56541 + return 'U';
56542 + case GR_ROLE_GROUP:
56543 + return 'G';
56544 + case GR_ROLE_SPECIAL:
56545 + return 'S';
56546 + }
56547 +
56548 + return 'X';
56549 +}
56550 +
56551 +char gr_roletype_to_char(void)
56552 +{
56553 + return gr_task_roletype_to_char(current);
56554 +}
56555 +
56556 +__inline__ int
56557 +gr_acl_tpe_check(void)
56558 +{
56559 + if (unlikely(!(gr_status & GR_READY)))
56560 + return 0;
56561 + if (current->role->roletype & GR_ROLE_TPE)
56562 + return 1;
56563 + else
56564 + return 0;
56565 +}
56566 +
56567 +int
56568 +gr_handle_rawio(const struct inode *inode)
56569 +{
56570 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56571 + if (inode && S_ISBLK(inode->i_mode) &&
56572 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
56573 + !capable(CAP_SYS_RAWIO))
56574 + return 1;
56575 +#endif
56576 + return 0;
56577 +}
56578 +
56579 +static int
56580 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
56581 +{
56582 + if (likely(lena != lenb))
56583 + return 0;
56584 +
56585 + return !memcmp(a, b, lena);
56586 +}
56587 +
56588 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
56589 +{
56590 + *buflen -= namelen;
56591 + if (*buflen < 0)
56592 + return -ENAMETOOLONG;
56593 + *buffer -= namelen;
56594 + memcpy(*buffer, str, namelen);
56595 + return 0;
56596 +}
56597 +
56598 +/* this must be called with vfsmount_lock and dcache_lock held */
56599 +
56600 +static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
56601 + struct dentry *root, struct vfsmount *rootmnt,
56602 + char *buffer, int buflen)
56603 +{
56604 + char * end = buffer+buflen;
56605 + char * retval;
56606 + int namelen;
56607 +
56608 + *--end = '\0';
56609 + buflen--;
56610 +
56611 + if (buflen < 1)
56612 + goto Elong;
56613 + /* Get '/' right */
56614 + retval = end-1;
56615 + *retval = '/';
56616 +
56617 + for (;;) {
56618 + struct dentry * parent;
56619 +
56620 + if (dentry == root && vfsmnt == rootmnt)
56621 + break;
56622 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
56623 + /* Global root? */
56624 + if (vfsmnt->mnt_parent == vfsmnt)
56625 + goto global_root;
56626 + dentry = vfsmnt->mnt_mountpoint;
56627 + vfsmnt = vfsmnt->mnt_parent;
56628 + continue;
56629 + }
56630 + parent = dentry->d_parent;
56631 + prefetch(parent);
56632 + namelen = dentry->d_name.len;
56633 + buflen -= namelen + 1;
56634 + if (buflen < 0)
56635 + goto Elong;
56636 + end -= namelen;
56637 + memcpy(end, dentry->d_name.name, namelen);
56638 + *--end = '/';
56639 + retval = end;
56640 + dentry = parent;
56641 + }
56642 +
56643 +out:
56644 + return retval;
56645 +
56646 +global_root:
56647 + namelen = dentry->d_name.len;
56648 + buflen -= namelen;
56649 + if (buflen < 0)
56650 + goto Elong;
56651 + retval -= namelen-1; /* hit the slash */
56652 + memcpy(retval, dentry->d_name.name, namelen);
56653 + goto out;
56654 +Elong:
56655 + retval = ERR_PTR(-ENAMETOOLONG);
56656 + goto out;
56657 +}
56658 +
56659 +static char *
56660 +gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
56661 + struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
56662 +{
56663 + char *retval;
56664 +
56665 + retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
56666 + if (unlikely(IS_ERR(retval)))
56667 + retval = strcpy(buf, "<path too long>");
56668 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
56669 + retval[1] = '\0';
56670 +
56671 + return retval;
56672 +}
56673 +
56674 +static char *
56675 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
56676 + char *buf, int buflen)
56677 +{
56678 + char *res;
56679 +
56680 + /* we can use real_root, real_root_mnt, because this is only called
56681 + by the RBAC system */
56682 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
56683 +
56684 + return res;
56685 +}
56686 +
56687 +static char *
56688 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
56689 + char *buf, int buflen)
56690 +{
56691 + char *res;
56692 + struct dentry *root;
56693 + struct vfsmount *rootmnt;
56694 + struct task_struct *reaper = &init_task;
56695 +
56696 + /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
56697 + read_lock(&reaper->fs->lock);
56698 + root = dget(reaper->fs->root.dentry);
56699 + rootmnt = mntget(reaper->fs->root.mnt);
56700 + read_unlock(&reaper->fs->lock);
56701 +
56702 + spin_lock(&dcache_lock);
56703 + spin_lock(&vfsmount_lock);
56704 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
56705 + spin_unlock(&vfsmount_lock);
56706 + spin_unlock(&dcache_lock);
56707 +
56708 + dput(root);
56709 + mntput(rootmnt);
56710 + return res;
56711 +}
56712 +
56713 +static char *
56714 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
56715 +{
56716 + char *ret;
56717 + spin_lock(&dcache_lock);
56718 + spin_lock(&vfsmount_lock);
56719 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
56720 + PAGE_SIZE);
56721 + spin_unlock(&vfsmount_lock);
56722 + spin_unlock(&dcache_lock);
56723 + return ret;
56724 +}
56725 +
56726 +static char *
56727 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
56728 +{
56729 + char *ret;
56730 + char *buf;
56731 + int buflen;
56732 +
56733 + spin_lock(&dcache_lock);
56734 + spin_lock(&vfsmount_lock);
56735 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
56736 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
56737 + buflen = (int)(ret - buf);
56738 + if (buflen >= 5)
56739 + prepend(&ret, &buflen, "/proc", 5);
56740 + else
56741 + ret = strcpy(buf, "<path too long>");
56742 + spin_unlock(&vfsmount_lock);
56743 + spin_unlock(&dcache_lock);
56744 + return ret;
56745 +}
56746 +
56747 +char *
56748 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
56749 +{
56750 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
56751 + PAGE_SIZE);
56752 +}
56753 +
56754 +char *
56755 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
56756 +{
56757 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
56758 + PAGE_SIZE);
56759 +}
56760 +
56761 +char *
56762 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
56763 +{
56764 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
56765 + PAGE_SIZE);
56766 +}
56767 +
56768 +char *
56769 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
56770 +{
56771 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
56772 + PAGE_SIZE);
56773 +}
56774 +
56775 +char *
56776 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
56777 +{
56778 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
56779 + PAGE_SIZE);
56780 +}
56781 +
56782 +__inline__ __u32
56783 +to_gr_audit(const __u32 reqmode)
56784 +{
56785 + /* masks off auditable permission flags, then shifts them to create
56786 + auditing flags, and adds the special case of append auditing if
56787 + we're requesting write */
56788 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
56789 +}
56790 +
56791 +struct acl_subject_label *
56792 +lookup_subject_map(const struct acl_subject_label *userp)
56793 +{
56794 + unsigned int index = shash(userp, subj_map_set.s_size);
56795 + struct subject_map *match;
56796 +
56797 + match = subj_map_set.s_hash[index];
56798 +
56799 + while (match && match->user != userp)
56800 + match = match->next;
56801 +
56802 + if (match != NULL)
56803 + return match->kernel;
56804 + else
56805 + return NULL;
56806 +}
56807 +
56808 +static void
56809 +insert_subj_map_entry(struct subject_map *subjmap)
56810 +{
56811 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
56812 + struct subject_map **curr;
56813 +
56814 + subjmap->prev = NULL;
56815 +
56816 + curr = &subj_map_set.s_hash[index];
56817 + if (*curr != NULL)
56818 + (*curr)->prev = subjmap;
56819 +
56820 + subjmap->next = *curr;
56821 + *curr = subjmap;
56822 +
56823 + return;
56824 +}
56825 +
56826 +static struct acl_role_label *
56827 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
56828 + const gid_t gid)
56829 +{
56830 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
56831 + struct acl_role_label *match;
56832 + struct role_allowed_ip *ipp;
56833 + unsigned int x;
56834 + u32 curr_ip = task->signal->curr_ip;
56835 +
56836 + task->signal->saved_ip = curr_ip;
56837 +
56838 + match = acl_role_set.r_hash[index];
56839 +
56840 + while (match) {
56841 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
56842 + for (x = 0; x < match->domain_child_num; x++) {
56843 + if (match->domain_children[x] == uid)
56844 + goto found;
56845 + }
56846 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
56847 + break;
56848 + match = match->next;
56849 + }
56850 +found:
56851 + if (match == NULL) {
56852 + try_group:
56853 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
56854 + match = acl_role_set.r_hash[index];
56855 +
56856 + while (match) {
56857 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
56858 + for (x = 0; x < match->domain_child_num; x++) {
56859 + if (match->domain_children[x] == gid)
56860 + goto found2;
56861 + }
56862 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
56863 + break;
56864 + match = match->next;
56865 + }
56866 +found2:
56867 + if (match == NULL)
56868 + match = default_role;
56869 + if (match->allowed_ips == NULL)
56870 + return match;
56871 + else {
56872 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
56873 + if (likely
56874 + ((ntohl(curr_ip) & ipp->netmask) ==
56875 + (ntohl(ipp->addr) & ipp->netmask)))
56876 + return match;
56877 + }
56878 + match = default_role;
56879 + }
56880 + } else if (match->allowed_ips == NULL) {
56881 + return match;
56882 + } else {
56883 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
56884 + if (likely
56885 + ((ntohl(curr_ip) & ipp->netmask) ==
56886 + (ntohl(ipp->addr) & ipp->netmask)))
56887 + return match;
56888 + }
56889 + goto try_group;
56890 + }
56891 +
56892 + return match;
56893 +}
56894 +
56895 +struct acl_subject_label *
56896 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
56897 + const struct acl_role_label *role)
56898 +{
56899 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
56900 + struct acl_subject_label *match;
56901 +
56902 + match = role->subj_hash[index];
56903 +
56904 + while (match && (match->inode != ino || match->device != dev ||
56905 + (match->mode & GR_DELETED))) {
56906 + match = match->next;
56907 + }
56908 +
56909 + if (match && !(match->mode & GR_DELETED))
56910 + return match;
56911 + else
56912 + return NULL;
56913 +}
56914 +
56915 +struct acl_subject_label *
56916 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
56917 + const struct acl_role_label *role)
56918 +{
56919 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
56920 + struct acl_subject_label *match;
56921 +
56922 + match = role->subj_hash[index];
56923 +
56924 + while (match && (match->inode != ino || match->device != dev ||
56925 + !(match->mode & GR_DELETED))) {
56926 + match = match->next;
56927 + }
56928 +
56929 + if (match && (match->mode & GR_DELETED))
56930 + return match;
56931 + else
56932 + return NULL;
56933 +}
56934 +
56935 +static struct acl_object_label *
56936 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
56937 + const struct acl_subject_label *subj)
56938 +{
56939 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
56940 + struct acl_object_label *match;
56941 +
56942 + match = subj->obj_hash[index];
56943 +
56944 + while (match && (match->inode != ino || match->device != dev ||
56945 + (match->mode & GR_DELETED))) {
56946 + match = match->next;
56947 + }
56948 +
56949 + if (match && !(match->mode & GR_DELETED))
56950 + return match;
56951 + else
56952 + return NULL;
56953 +}
56954 +
56955 +static struct acl_object_label *
56956 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
56957 + const struct acl_subject_label *subj)
56958 +{
56959 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
56960 + struct acl_object_label *match;
56961 +
56962 + match = subj->obj_hash[index];
56963 +
56964 + while (match && (match->inode != ino || match->device != dev ||
56965 + !(match->mode & GR_DELETED))) {
56966 + match = match->next;
56967 + }
56968 +
56969 + if (match && (match->mode & GR_DELETED))
56970 + return match;
56971 +
56972 + match = subj->obj_hash[index];
56973 +
56974 + while (match && (match->inode != ino || match->device != dev ||
56975 + (match->mode & GR_DELETED))) {
56976 + match = match->next;
56977 + }
56978 +
56979 + if (match && !(match->mode & GR_DELETED))
56980 + return match;
56981 + else
56982 + return NULL;
56983 +}
56984 +
56985 +static struct name_entry *
56986 +lookup_name_entry(const char *name)
56987 +{
56988 + unsigned int len = strlen(name);
56989 + unsigned int key = full_name_hash(name, len);
56990 + unsigned int index = key % name_set.n_size;
56991 + struct name_entry *match;
56992 +
56993 + match = name_set.n_hash[index];
56994 +
56995 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
56996 + match = match->next;
56997 +
56998 + return match;
56999 +}
57000 +
57001 +static struct name_entry *
57002 +lookup_name_entry_create(const char *name)
57003 +{
57004 + unsigned int len = strlen(name);
57005 + unsigned int key = full_name_hash(name, len);
57006 + unsigned int index = key % name_set.n_size;
57007 + struct name_entry *match;
57008 +
57009 + match = name_set.n_hash[index];
57010 +
57011 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
57012 + !match->deleted))
57013 + match = match->next;
57014 +
57015 + if (match && match->deleted)
57016 + return match;
57017 +
57018 + match = name_set.n_hash[index];
57019 +
57020 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
57021 + match->deleted))
57022 + match = match->next;
57023 +
57024 + if (match && !match->deleted)
57025 + return match;
57026 + else
57027 + return NULL;
57028 +}
57029 +
57030 +static struct inodev_entry *
57031 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
57032 +{
57033 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
57034 + struct inodev_entry *match;
57035 +
57036 + match = inodev_set.i_hash[index];
57037 +
57038 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
57039 + match = match->next;
57040 +
57041 + return match;
57042 +}
57043 +
57044 +static void
57045 +insert_inodev_entry(struct inodev_entry *entry)
57046 +{
57047 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
57048 + inodev_set.i_size);
57049 + struct inodev_entry **curr;
57050 +
57051 + entry->prev = NULL;
57052 +
57053 + curr = &inodev_set.i_hash[index];
57054 + if (*curr != NULL)
57055 + (*curr)->prev = entry;
57056 +
57057 + entry->next = *curr;
57058 + *curr = entry;
57059 +
57060 + return;
57061 +}
57062 +
57063 +static void
57064 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
57065 +{
57066 + unsigned int index =
57067 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
57068 + struct acl_role_label **curr;
57069 + struct acl_role_label *tmp;
57070 +
57071 + curr = &acl_role_set.r_hash[index];
57072 +
57073 + /* if role was already inserted due to domains and already has
57074 + a role in the same bucket as it attached, then we need to
57075 + combine these two buckets
57076 + */
57077 + if (role->next) {
57078 + tmp = role->next;
57079 + while (tmp->next)
57080 + tmp = tmp->next;
57081 + tmp->next = *curr;
57082 + } else
57083 + role->next = *curr;
57084 + *curr = role;
57085 +
57086 + return;
57087 +}
57088 +
57089 +static void
57090 +insert_acl_role_label(struct acl_role_label *role)
57091 +{
57092 + int i;
57093 +
57094 + if (role_list == NULL) {
57095 + role_list = role;
57096 + role->prev = NULL;
57097 + } else {
57098 + role->prev = role_list;
57099 + role_list = role;
57100 + }
57101 +
57102 + /* used for hash chains */
57103 + role->next = NULL;
57104 +
57105 + if (role->roletype & GR_ROLE_DOMAIN) {
57106 + for (i = 0; i < role->domain_child_num; i++)
57107 + __insert_acl_role_label(role, role->domain_children[i]);
57108 + } else
57109 + __insert_acl_role_label(role, role->uidgid);
57110 +}
57111 +
57112 +static int
57113 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
57114 +{
57115 + struct name_entry **curr, *nentry;
57116 + struct inodev_entry *ientry;
57117 + unsigned int len = strlen(name);
57118 + unsigned int key = full_name_hash(name, len);
57119 + unsigned int index = key % name_set.n_size;
57120 +
57121 + curr = &name_set.n_hash[index];
57122 +
57123 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
57124 + curr = &((*curr)->next);
57125 +
57126 + if (*curr != NULL)
57127 + return 1;
57128 +
57129 + nentry = acl_alloc(sizeof (struct name_entry));
57130 + if (nentry == NULL)
57131 + return 0;
57132 + ientry = acl_alloc(sizeof (struct inodev_entry));
57133 + if (ientry == NULL)
57134 + return 0;
57135 + ientry->nentry = nentry;
57136 +
57137 + nentry->key = key;
57138 + nentry->name = name;
57139 + nentry->inode = inode;
57140 + nentry->device = device;
57141 + nentry->len = len;
57142 + nentry->deleted = deleted;
57143 +
57144 + nentry->prev = NULL;
57145 + curr = &name_set.n_hash[index];
57146 + if (*curr != NULL)
57147 + (*curr)->prev = nentry;
57148 + nentry->next = *curr;
57149 + *curr = nentry;
57150 +
57151 + /* insert us into the table searchable by inode/dev */
57152 + insert_inodev_entry(ientry);
57153 +
57154 + return 1;
57155 +}
57156 +
57157 +static void
57158 +insert_acl_obj_label(struct acl_object_label *obj,
57159 + struct acl_subject_label *subj)
57160 +{
57161 + unsigned int index =
57162 + fhash(obj->inode, obj->device, subj->obj_hash_size);
57163 + struct acl_object_label **curr;
57164 +
57165 +
57166 + obj->prev = NULL;
57167 +
57168 + curr = &subj->obj_hash[index];
57169 + if (*curr != NULL)
57170 + (*curr)->prev = obj;
57171 +
57172 + obj->next = *curr;
57173 + *curr = obj;
57174 +
57175 + return;
57176 +}
57177 +
57178 +static void
57179 +insert_acl_subj_label(struct acl_subject_label *obj,
57180 + struct acl_role_label *role)
57181 +{
57182 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
57183 + struct acl_subject_label **curr;
57184 +
57185 + obj->prev = NULL;
57186 +
57187 + curr = &role->subj_hash[index];
57188 + if (*curr != NULL)
57189 + (*curr)->prev = obj;
57190 +
57191 + obj->next = *curr;
57192 + *curr = obj;
57193 +
57194 + return;
57195 +}
57196 +
57197 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
57198 +
57199 +static void *
57200 +create_table(__u32 * len, int elementsize)
57201 +{
57202 + unsigned int table_sizes[] = {
57203 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
57204 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
57205 + 4194301, 8388593, 16777213, 33554393, 67108859
57206 + };
57207 + void *newtable = NULL;
57208 + unsigned int pwr = 0;
57209 +
57210 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
57211 + table_sizes[pwr] <= *len)
57212 + pwr++;
57213 +
57214 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
57215 + return newtable;
57216 +
57217 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
57218 + newtable =
57219 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
57220 + else
57221 + newtable = vmalloc(table_sizes[pwr] * elementsize);
57222 +
57223 + *len = table_sizes[pwr];
57224 +
57225 + return newtable;
57226 +}
57227 +
57228 +static int
57229 +init_variables(const struct gr_arg *arg)
57230 +{
57231 + struct task_struct *reaper = &init_task;
57232 + unsigned int stacksize;
57233 +
57234 + subj_map_set.s_size = arg->role_db.num_subjects;
57235 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
57236 + name_set.n_size = arg->role_db.num_objects;
57237 + inodev_set.i_size = arg->role_db.num_objects;
57238 +
57239 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
57240 + !name_set.n_size || !inodev_set.i_size)
57241 + return 1;
57242 +
57243 + if (!gr_init_uidset())
57244 + return 1;
57245 +
57246 + /* set up the stack that holds allocation info */
57247 +
57248 + stacksize = arg->role_db.num_pointers + 5;
57249 +
57250 + if (!acl_alloc_stack_init(stacksize))
57251 + return 1;
57252 +
57253 + /* grab reference for the real root dentry and vfsmount */
57254 + read_lock(&reaper->fs->lock);
57255 + real_root = dget(reaper->fs->root.dentry);
57256 + real_root_mnt = mntget(reaper->fs->root.mnt);
57257 + read_unlock(&reaper->fs->lock);
57258 +
57259 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
57260 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
57261 +#endif
57262 +
57263 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
57264 + if (fakefs_obj_rw == NULL)
57265 + return 1;
57266 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
57267 +
57268 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
57269 + if (fakefs_obj_rwx == NULL)
57270 + return 1;
57271 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
57272 +
57273 + subj_map_set.s_hash =
57274 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
57275 + acl_role_set.r_hash =
57276 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
57277 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
57278 + inodev_set.i_hash =
57279 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
57280 +
57281 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
57282 + !name_set.n_hash || !inodev_set.i_hash)
57283 + return 1;
57284 +
57285 + memset(subj_map_set.s_hash, 0,
57286 + sizeof(struct subject_map *) * subj_map_set.s_size);
57287 + memset(acl_role_set.r_hash, 0,
57288 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
57289 + memset(name_set.n_hash, 0,
57290 + sizeof (struct name_entry *) * name_set.n_size);
57291 + memset(inodev_set.i_hash, 0,
57292 + sizeof (struct inodev_entry *) * inodev_set.i_size);
57293 +
57294 + return 0;
57295 +}
57296 +
57297 +/* free information not needed after startup
57298 + currently contains user->kernel pointer mappings for subjects
57299 +*/
57300 +
57301 +static void
57302 +free_init_variables(void)
57303 +{
57304 + __u32 i;
57305 +
57306 + if (subj_map_set.s_hash) {
57307 + for (i = 0; i < subj_map_set.s_size; i++) {
57308 + if (subj_map_set.s_hash[i]) {
57309 + kfree(subj_map_set.s_hash[i]);
57310 + subj_map_set.s_hash[i] = NULL;
57311 + }
57312 + }
57313 +
57314 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
57315 + PAGE_SIZE)
57316 + kfree(subj_map_set.s_hash);
57317 + else
57318 + vfree(subj_map_set.s_hash);
57319 + }
57320 +
57321 + return;
57322 +}
57323 +
57324 +static void
57325 +free_variables(void)
57326 +{
57327 + struct acl_subject_label *s;
57328 + struct acl_role_label *r;
57329 + struct task_struct *task, *task2;
57330 + unsigned int x;
57331 +
57332 + gr_clear_learn_entries();
57333 +
57334 + read_lock(&tasklist_lock);
57335 + do_each_thread(task2, task) {
57336 + task->acl_sp_role = 0;
57337 + task->acl_role_id = 0;
57338 + task->acl = NULL;
57339 + task->role = NULL;
57340 + } while_each_thread(task2, task);
57341 + read_unlock(&tasklist_lock);
57342 +
57343 + /* release the reference to the real root dentry and vfsmount */
57344 + if (real_root)
57345 + dput(real_root);
57346 + real_root = NULL;
57347 + if (real_root_mnt)
57348 + mntput(real_root_mnt);
57349 + real_root_mnt = NULL;
57350 +
57351 + /* free all object hash tables */
57352 +
57353 + FOR_EACH_ROLE_START(r)
57354 + if (r->subj_hash == NULL)
57355 + goto next_role;
57356 + FOR_EACH_SUBJECT_START(r, s, x)
57357 + if (s->obj_hash == NULL)
57358 + break;
57359 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
57360 + kfree(s->obj_hash);
57361 + else
57362 + vfree(s->obj_hash);
57363 + FOR_EACH_SUBJECT_END(s, x)
57364 + FOR_EACH_NESTED_SUBJECT_START(r, s)
57365 + if (s->obj_hash == NULL)
57366 + break;
57367 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
57368 + kfree(s->obj_hash);
57369 + else
57370 + vfree(s->obj_hash);
57371 + FOR_EACH_NESTED_SUBJECT_END(s)
57372 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
57373 + kfree(r->subj_hash);
57374 + else
57375 + vfree(r->subj_hash);
57376 + r->subj_hash = NULL;
57377 +next_role:
57378 + FOR_EACH_ROLE_END(r)
57379 +
57380 + acl_free_all();
57381 +
57382 + if (acl_role_set.r_hash) {
57383 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
57384 + PAGE_SIZE)
57385 + kfree(acl_role_set.r_hash);
57386 + else
57387 + vfree(acl_role_set.r_hash);
57388 + }
57389 + if (name_set.n_hash) {
57390 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
57391 + PAGE_SIZE)
57392 + kfree(name_set.n_hash);
57393 + else
57394 + vfree(name_set.n_hash);
57395 + }
57396 +
57397 + if (inodev_set.i_hash) {
57398 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
57399 + PAGE_SIZE)
57400 + kfree(inodev_set.i_hash);
57401 + else
57402 + vfree(inodev_set.i_hash);
57403 + }
57404 +
57405 + gr_free_uidset();
57406 +
57407 + memset(&name_set, 0, sizeof (struct name_db));
57408 + memset(&inodev_set, 0, sizeof (struct inodev_db));
57409 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
57410 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
57411 +
57412 + default_role = NULL;
57413 + role_list = NULL;
57414 +
57415 + return;
57416 +}
57417 +
57418 +static __u32
57419 +count_user_objs(struct acl_object_label *userp)
57420 +{
57421 + struct acl_object_label o_tmp;
57422 + __u32 num = 0;
57423 +
57424 + while (userp) {
57425 + if (copy_from_user(&o_tmp, userp,
57426 + sizeof (struct acl_object_label)))
57427 + break;
57428 +
57429 + userp = o_tmp.prev;
57430 + num++;
57431 + }
57432 +
57433 + return num;
57434 +}
57435 +
57436 +static struct acl_subject_label *
57437 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
57438 +
57439 +static int
57440 +copy_user_glob(struct acl_object_label *obj)
57441 +{
57442 + struct acl_object_label *g_tmp, **guser;
57443 + unsigned int len;
57444 + char *tmp;
57445 +
57446 + if (obj->globbed == NULL)
57447 + return 0;
57448 +
57449 + guser = &obj->globbed;
57450 + while (*guser) {
57451 + g_tmp = (struct acl_object_label *)
57452 + acl_alloc(sizeof (struct acl_object_label));
57453 + if (g_tmp == NULL)
57454 + return -ENOMEM;
57455 +
57456 + if (copy_from_user(g_tmp, *guser,
57457 + sizeof (struct acl_object_label)))
57458 + return -EFAULT;
57459 +
57460 + len = strnlen_user(g_tmp->filename, PATH_MAX);
57461 +
57462 + if (!len || len >= PATH_MAX)
57463 + return -EINVAL;
57464 +
57465 + if ((tmp = (char *) acl_alloc(len)) == NULL)
57466 + return -ENOMEM;
57467 +
57468 + if (copy_from_user(tmp, g_tmp->filename, len))
57469 + return -EFAULT;
57470 + tmp[len-1] = '\0';
57471 + g_tmp->filename = tmp;
57472 +
57473 + *guser = g_tmp;
57474 + guser = &(g_tmp->next);
57475 + }
57476 +
57477 + return 0;
57478 +}
57479 +
57480 +static int
57481 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
57482 + struct acl_role_label *role)
57483 +{
57484 + struct acl_object_label *o_tmp;
57485 + unsigned int len;
57486 + int ret;
57487 + char *tmp;
57488 +
57489 + while (userp) {
57490 + if ((o_tmp = (struct acl_object_label *)
57491 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
57492 + return -ENOMEM;
57493 +
57494 + if (copy_from_user(o_tmp, userp,
57495 + sizeof (struct acl_object_label)))
57496 + return -EFAULT;
57497 +
57498 + userp = o_tmp->prev;
57499 +
57500 + len = strnlen_user(o_tmp->filename, PATH_MAX);
57501 +
57502 + if (!len || len >= PATH_MAX)
57503 + return -EINVAL;
57504 +
57505 + if ((tmp = (char *) acl_alloc(len)) == NULL)
57506 + return -ENOMEM;
57507 +
57508 + if (copy_from_user(tmp, o_tmp->filename, len))
57509 + return -EFAULT;
57510 + tmp[len-1] = '\0';
57511 + o_tmp->filename = tmp;
57512 +
57513 + insert_acl_obj_label(o_tmp, subj);
57514 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
57515 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
57516 + return -ENOMEM;
57517 +
57518 + ret = copy_user_glob(o_tmp);
57519 + if (ret)
57520 + return ret;
57521 +
57522 + if (o_tmp->nested) {
57523 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
57524 + if (IS_ERR(o_tmp->nested))
57525 + return PTR_ERR(o_tmp->nested);
57526 +
57527 + /* insert into nested subject list */
57528 + o_tmp->nested->next = role->hash->first;
57529 + role->hash->first = o_tmp->nested;
57530 + }
57531 + }
57532 +
57533 + return 0;
57534 +}
57535 +
57536 +static __u32
57537 +count_user_subjs(struct acl_subject_label *userp)
57538 +{
57539 + struct acl_subject_label s_tmp;
57540 + __u32 num = 0;
57541 +
57542 + while (userp) {
57543 + if (copy_from_user(&s_tmp, userp,
57544 + sizeof (struct acl_subject_label)))
57545 + break;
57546 +
57547 + userp = s_tmp.prev;
57548 + /* do not count nested subjects against this count, since
57549 + they are not included in the hash table, but are
57550 + attached to objects. We have already counted
57551 + the subjects in userspace for the allocation
57552 + stack
57553 + */
57554 + if (!(s_tmp.mode & GR_NESTED))
57555 + num++;
57556 + }
57557 +
57558 + return num;
57559 +}
57560 +
57561 +static int
57562 +copy_user_allowedips(struct acl_role_label *rolep)
57563 +{
57564 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
57565 +
57566 + ruserip = rolep->allowed_ips;
57567 +
57568 + while (ruserip) {
57569 + rlast = rtmp;
57570 +
57571 + if ((rtmp = (struct role_allowed_ip *)
57572 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
57573 + return -ENOMEM;
57574 +
57575 + if (copy_from_user(rtmp, ruserip,
57576 + sizeof (struct role_allowed_ip)))
57577 + return -EFAULT;
57578 +
57579 + ruserip = rtmp->prev;
57580 +
57581 + if (!rlast) {
57582 + rtmp->prev = NULL;
57583 + rolep->allowed_ips = rtmp;
57584 + } else {
57585 + rlast->next = rtmp;
57586 + rtmp->prev = rlast;
57587 + }
57588 +
57589 + if (!ruserip)
57590 + rtmp->next = NULL;
57591 + }
57592 +
57593 + return 0;
57594 +}
57595 +
57596 +static int
57597 +copy_user_transitions(struct acl_role_label *rolep)
57598 +{
57599 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
57600 +
57601 + unsigned int len;
57602 + char *tmp;
57603 +
57604 + rusertp = rolep->transitions;
57605 +
57606 + while (rusertp) {
57607 + rlast = rtmp;
57608 +
57609 + if ((rtmp = (struct role_transition *)
57610 + acl_alloc(sizeof (struct role_transition))) == NULL)
57611 + return -ENOMEM;
57612 +
57613 + if (copy_from_user(rtmp, rusertp,
57614 + sizeof (struct role_transition)))
57615 + return -EFAULT;
57616 +
57617 + rusertp = rtmp->prev;
57618 +
57619 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
57620 +
57621 + if (!len || len >= GR_SPROLE_LEN)
57622 + return -EINVAL;
57623 +
57624 + if ((tmp = (char *) acl_alloc(len)) == NULL)
57625 + return -ENOMEM;
57626 +
57627 + if (copy_from_user(tmp, rtmp->rolename, len))
57628 + return -EFAULT;
57629 + tmp[len-1] = '\0';
57630 + rtmp->rolename = tmp;
57631 +
57632 + if (!rlast) {
57633 + rtmp->prev = NULL;
57634 + rolep->transitions = rtmp;
57635 + } else {
57636 + rlast->next = rtmp;
57637 + rtmp->prev = rlast;
57638 + }
57639 +
57640 + if (!rusertp)
57641 + rtmp->next = NULL;
57642 + }
57643 +
57644 + return 0;
57645 +}
57646 +
57647 +static struct acl_subject_label *
57648 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
57649 +{
57650 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
57651 + unsigned int len;
57652 + char *tmp;
57653 + __u32 num_objs;
57654 + struct acl_ip_label **i_tmp, *i_utmp2;
57655 + struct gr_hash_struct ghash;
57656 + struct subject_map *subjmap;
57657 + unsigned int i_num;
57658 + int err;
57659 +
57660 + s_tmp = lookup_subject_map(userp);
57661 +
57662 + /* we've already copied this subject into the kernel, just return
57663 + the reference to it, and don't copy it over again
57664 + */
57665 + if (s_tmp)
57666 + return(s_tmp);
57667 +
57668 + if ((s_tmp = (struct acl_subject_label *)
57669 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
57670 + return ERR_PTR(-ENOMEM);
57671 +
57672 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
57673 + if (subjmap == NULL)
57674 + return ERR_PTR(-ENOMEM);
57675 +
57676 + subjmap->user = userp;
57677 + subjmap->kernel = s_tmp;
57678 + insert_subj_map_entry(subjmap);
57679 +
57680 + if (copy_from_user(s_tmp, userp,
57681 + sizeof (struct acl_subject_label)))
57682 + return ERR_PTR(-EFAULT);
57683 +
57684 + len = strnlen_user(s_tmp->filename, PATH_MAX);
57685 +
57686 + if (!len || len >= PATH_MAX)
57687 + return ERR_PTR(-EINVAL);
57688 +
57689 + if ((tmp = (char *) acl_alloc(len)) == NULL)
57690 + return ERR_PTR(-ENOMEM);
57691 +
57692 + if (copy_from_user(tmp, s_tmp->filename, len))
57693 + return ERR_PTR(-EFAULT);
57694 + tmp[len-1] = '\0';
57695 + s_tmp->filename = tmp;
57696 +
57697 + if (!strcmp(s_tmp->filename, "/"))
57698 + role->root_label = s_tmp;
57699 +
57700 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
57701 + return ERR_PTR(-EFAULT);
57702 +
57703 + /* copy user and group transition tables */
57704 +
57705 + if (s_tmp->user_trans_num) {
57706 + uid_t *uidlist;
57707 +
57708 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
57709 + if (uidlist == NULL)
57710 + return ERR_PTR(-ENOMEM);
57711 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
57712 + return ERR_PTR(-EFAULT);
57713 +
57714 + s_tmp->user_transitions = uidlist;
57715 + }
57716 +
57717 + if (s_tmp->group_trans_num) {
57718 + gid_t *gidlist;
57719 +
57720 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
57721 + if (gidlist == NULL)
57722 + return ERR_PTR(-ENOMEM);
57723 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
57724 + return ERR_PTR(-EFAULT);
57725 +
57726 + s_tmp->group_transitions = gidlist;
57727 + }
57728 +
57729 + /* set up object hash table */
57730 + num_objs = count_user_objs(ghash.first);
57731 +
57732 + s_tmp->obj_hash_size = num_objs;
57733 + s_tmp->obj_hash =
57734 + (struct acl_object_label **)
57735 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
57736 +
57737 + if (!s_tmp->obj_hash)
57738 + return ERR_PTR(-ENOMEM);
57739 +
57740 + memset(s_tmp->obj_hash, 0,
57741 + s_tmp->obj_hash_size *
57742 + sizeof (struct acl_object_label *));
57743 +
57744 + /* add in objects */
57745 + err = copy_user_objs(ghash.first, s_tmp, role);
57746 +
57747 + if (err)
57748 + return ERR_PTR(err);
57749 +
57750 + /* set pointer for parent subject */
57751 + if (s_tmp->parent_subject) {
57752 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
57753 +
57754 + if (IS_ERR(s_tmp2))
57755 + return s_tmp2;
57756 +
57757 + s_tmp->parent_subject = s_tmp2;
57758 + }
57759 +
57760 + /* add in ip acls */
57761 +
57762 + if (!s_tmp->ip_num) {
57763 + s_tmp->ips = NULL;
57764 + goto insert;
57765 + }
57766 +
57767 + i_tmp =
57768 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
57769 + sizeof (struct acl_ip_label *));
57770 +
57771 + if (!i_tmp)
57772 + return ERR_PTR(-ENOMEM);
57773 +
57774 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
57775 + *(i_tmp + i_num) =
57776 + (struct acl_ip_label *)
57777 + acl_alloc(sizeof (struct acl_ip_label));
57778 + if (!*(i_tmp + i_num))
57779 + return ERR_PTR(-ENOMEM);
57780 +
57781 + if (copy_from_user
57782 + (&i_utmp2, s_tmp->ips + i_num,
57783 + sizeof (struct acl_ip_label *)))
57784 + return ERR_PTR(-EFAULT);
57785 +
57786 + if (copy_from_user
57787 + (*(i_tmp + i_num), i_utmp2,
57788 + sizeof (struct acl_ip_label)))
57789 + return ERR_PTR(-EFAULT);
57790 +
57791 + if ((*(i_tmp + i_num))->iface == NULL)
57792 + continue;
57793 +
57794 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
57795 + if (!len || len >= IFNAMSIZ)
57796 + return ERR_PTR(-EINVAL);
57797 + tmp = acl_alloc(len);
57798 + if (tmp == NULL)
57799 + return ERR_PTR(-ENOMEM);
57800 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
57801 + return ERR_PTR(-EFAULT);
57802 + (*(i_tmp + i_num))->iface = tmp;
57803 + }
57804 +
57805 + s_tmp->ips = i_tmp;
57806 +
57807 +insert:
57808 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
57809 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
57810 + return ERR_PTR(-ENOMEM);
57811 +
57812 + return s_tmp;
57813 +}
57814 +
57815 +static int
57816 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
57817 +{
57818 + struct acl_subject_label s_pre;
57819 + struct acl_subject_label * ret;
57820 + int err;
57821 +
57822 + while (userp) {
57823 + if (copy_from_user(&s_pre, userp,
57824 + sizeof (struct acl_subject_label)))
57825 + return -EFAULT;
57826 +
57827 + /* do not add nested subjects here, add
57828 + while parsing objects
57829 + */
57830 +
57831 + if (s_pre.mode & GR_NESTED) {
57832 + userp = s_pre.prev;
57833 + continue;
57834 + }
57835 +
57836 + ret = do_copy_user_subj(userp, role);
57837 +
57838 + err = PTR_ERR(ret);
57839 + if (IS_ERR(ret))
57840 + return err;
57841 +
57842 + insert_acl_subj_label(ret, role);
57843 +
57844 + userp = s_pre.prev;
57845 + }
57846 +
57847 + return 0;
57848 +}
57849 +
57850 +static int
57851 +copy_user_acl(struct gr_arg *arg)
57852 +{
57853 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
57854 + struct sprole_pw *sptmp;
57855 + struct gr_hash_struct *ghash;
57856 + uid_t *domainlist;
57857 + unsigned int r_num;
57858 + unsigned int len;
57859 + char *tmp;
57860 + int err = 0;
57861 + __u16 i;
57862 + __u32 num_subjs;
57863 +
57864 + /* we need a default and kernel role */
57865 + if (arg->role_db.num_roles < 2)
57866 + return -EINVAL;
57867 +
57868 + /* copy special role authentication info from userspace */
57869 +
57870 + num_sprole_pws = arg->num_sprole_pws;
57871 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
57872 +
57873 + if (!acl_special_roles) {
57874 + err = -ENOMEM;
57875 + goto cleanup;
57876 + }
57877 +
57878 + for (i = 0; i < num_sprole_pws; i++) {
57879 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
57880 + if (!sptmp) {
57881 + err = -ENOMEM;
57882 + goto cleanup;
57883 + }
57884 + if (copy_from_user(sptmp, arg->sprole_pws + i,
57885 + sizeof (struct sprole_pw))) {
57886 + err = -EFAULT;
57887 + goto cleanup;
57888 + }
57889 +
57890 + len =
57891 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
57892 +
57893 + if (!len || len >= GR_SPROLE_LEN) {
57894 + err = -EINVAL;
57895 + goto cleanup;
57896 + }
57897 +
57898 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
57899 + err = -ENOMEM;
57900 + goto cleanup;
57901 + }
57902 +
57903 + if (copy_from_user(tmp, sptmp->rolename, len)) {
57904 + err = -EFAULT;
57905 + goto cleanup;
57906 + }
57907 + tmp[len-1] = '\0';
57908 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
57909 + printk(KERN_ALERT "Copying special role %s\n", tmp);
57910 +#endif
57911 + sptmp->rolename = tmp;
57912 + acl_special_roles[i] = sptmp;
57913 + }
57914 +
57915 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
57916 +
57917 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
57918 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
57919 +
57920 + if (!r_tmp) {
57921 + err = -ENOMEM;
57922 + goto cleanup;
57923 + }
57924 +
57925 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
57926 + sizeof (struct acl_role_label *))) {
57927 + err = -EFAULT;
57928 + goto cleanup;
57929 + }
57930 +
57931 + if (copy_from_user(r_tmp, r_utmp2,
57932 + sizeof (struct acl_role_label))) {
57933 + err = -EFAULT;
57934 + goto cleanup;
57935 + }
57936 +
57937 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
57938 +
57939 + if (!len || len >= PATH_MAX) {
57940 + err = -EINVAL;
57941 + goto cleanup;
57942 + }
57943 +
57944 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
57945 + err = -ENOMEM;
57946 + goto cleanup;
57947 + }
57948 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
57949 + err = -EFAULT;
57950 + goto cleanup;
57951 + }
57952 + tmp[len-1] = '\0';
57953 + r_tmp->rolename = tmp;
57954 +
57955 + if (!strcmp(r_tmp->rolename, "default")
57956 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
57957 + default_role = r_tmp;
57958 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
57959 + kernel_role = r_tmp;
57960 + }
57961 +
57962 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
57963 + err = -ENOMEM;
57964 + goto cleanup;
57965 + }
57966 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
57967 + err = -EFAULT;
57968 + goto cleanup;
57969 + }
57970 +
57971 + r_tmp->hash = ghash;
57972 +
57973 + num_subjs = count_user_subjs(r_tmp->hash->first);
57974 +
57975 + r_tmp->subj_hash_size = num_subjs;
57976 + r_tmp->subj_hash =
57977 + (struct acl_subject_label **)
57978 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
57979 +
57980 + if (!r_tmp->subj_hash) {
57981 + err = -ENOMEM;
57982 + goto cleanup;
57983 + }
57984 +
57985 + err = copy_user_allowedips(r_tmp);
57986 + if (err)
57987 + goto cleanup;
57988 +
57989 + /* copy domain info */
57990 + if (r_tmp->domain_children != NULL) {
57991 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
57992 + if (domainlist == NULL) {
57993 + err = -ENOMEM;
57994 + goto cleanup;
57995 + }
57996 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
57997 + err = -EFAULT;
57998 + goto cleanup;
57999 + }
58000 + r_tmp->domain_children = domainlist;
58001 + }
58002 +
58003 + err = copy_user_transitions(r_tmp);
58004 + if (err)
58005 + goto cleanup;
58006 +
58007 + memset(r_tmp->subj_hash, 0,
58008 + r_tmp->subj_hash_size *
58009 + sizeof (struct acl_subject_label *));
58010 +
58011 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
58012 +
58013 + if (err)
58014 + goto cleanup;
58015 +
58016 + /* set nested subject list to null */
58017 + r_tmp->hash->first = NULL;
58018 +
58019 + insert_acl_role_label(r_tmp);
58020 + }
58021 +
58022 + goto return_err;
58023 + cleanup:
58024 + free_variables();
58025 + return_err:
58026 + return err;
58027 +
58028 +}
58029 +
58030 +static int
58031 +gracl_init(struct gr_arg *args)
58032 +{
58033 + int error = 0;
58034 +
58035 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
58036 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
58037 +
58038 + if (init_variables(args)) {
58039 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
58040 + error = -ENOMEM;
58041 + free_variables();
58042 + goto out;
58043 + }
58044 +
58045 + error = copy_user_acl(args);
58046 + free_init_variables();
58047 + if (error) {
58048 + free_variables();
58049 + goto out;
58050 + }
58051 +
58052 + if ((error = gr_set_acls(0))) {
58053 + free_variables();
58054 + goto out;
58055 + }
58056 +
58057 + pax_open_kernel();
58058 + gr_status |= GR_READY;
58059 + pax_close_kernel();
58060 +
58061 + out:
58062 + return error;
58063 +}
58064 +
58065 +/* derived from glibc fnmatch() 0: match, 1: no match*/
58066 +
58067 +static int
58068 +glob_match(const char *p, const char *n)
58069 +{
58070 + char c;
58071 +
58072 + while ((c = *p++) != '\0') {
58073 + switch (c) {
58074 + case '?':
58075 + if (*n == '\0')
58076 + return 1;
58077 + else if (*n == '/')
58078 + return 1;
58079 + break;
58080 + case '\\':
58081 + if (*n != c)
58082 + return 1;
58083 + break;
58084 + case '*':
58085 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
58086 + if (*n == '/')
58087 + return 1;
58088 + else if (c == '?') {
58089 + if (*n == '\0')
58090 + return 1;
58091 + else
58092 + ++n;
58093 + }
58094 + }
58095 + if (c == '\0') {
58096 + return 0;
58097 + } else {
58098 + const char *endp;
58099 +
58100 + if ((endp = strchr(n, '/')) == NULL)
58101 + endp = n + strlen(n);
58102 +
58103 + if (c == '[') {
58104 + for (--p; n < endp; ++n)
58105 + if (!glob_match(p, n))
58106 + return 0;
58107 + } else if (c == '/') {
58108 + while (*n != '\0' && *n != '/')
58109 + ++n;
58110 + if (*n == '/' && !glob_match(p, n + 1))
58111 + return 0;
58112 + } else {
58113 + for (--p; n < endp; ++n)
58114 + if (*n == c && !glob_match(p, n))
58115 + return 0;
58116 + }
58117 +
58118 + return 1;
58119 + }
58120 + case '[':
58121 + {
58122 + int not;
58123 + char cold;
58124 +
58125 + if (*n == '\0' || *n == '/')
58126 + return 1;
58127 +
58128 + not = (*p == '!' || *p == '^');
58129 + if (not)
58130 + ++p;
58131 +
58132 + c = *p++;
58133 + for (;;) {
58134 + unsigned char fn = (unsigned char)*n;
58135 +
58136 + if (c == '\0')
58137 + return 1;
58138 + else {
58139 + if (c == fn)
58140 + goto matched;
58141 + cold = c;
58142 + c = *p++;
58143 +
58144 + if (c == '-' && *p != ']') {
58145 + unsigned char cend = *p++;
58146 +
58147 + if (cend == '\0')
58148 + return 1;
58149 +
58150 + if (cold <= fn && fn <= cend)
58151 + goto matched;
58152 +
58153 + c = *p++;
58154 + }
58155 + }
58156 +
58157 + if (c == ']')
58158 + break;
58159 + }
58160 + if (!not)
58161 + return 1;
58162 + break;
58163 + matched:
58164 + while (c != ']') {
58165 + if (c == '\0')
58166 + return 1;
58167 +
58168 + c = *p++;
58169 + }
58170 + if (not)
58171 + return 1;
58172 + }
58173 + break;
58174 + default:
58175 + if (c != *n)
58176 + return 1;
58177 + }
58178 +
58179 + ++n;
58180 + }
58181 +
58182 + if (*n == '\0')
58183 + return 0;
58184 +
58185 + if (*n == '/')
58186 + return 0;
58187 +
58188 + return 1;
58189 +}
58190 +
58191 +static struct acl_object_label *
58192 +chk_glob_label(struct acl_object_label *globbed,
58193 + struct dentry *dentry, struct vfsmount *mnt, char **path)
58194 +{
58195 + struct acl_object_label *tmp;
58196 +
58197 + if (*path == NULL)
58198 + *path = gr_to_filename_nolock(dentry, mnt);
58199 +
58200 + tmp = globbed;
58201 +
58202 + while (tmp) {
58203 + if (!glob_match(tmp->filename, *path))
58204 + return tmp;
58205 + tmp = tmp->next;
58206 + }
58207 +
58208 + return NULL;
58209 +}
58210 +
58211 +static struct acl_object_label *
58212 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
58213 + const ino_t curr_ino, const dev_t curr_dev,
58214 + const struct acl_subject_label *subj, char **path, const int checkglob)
58215 +{
58216 + struct acl_subject_label *tmpsubj;
58217 + struct acl_object_label *retval;
58218 + struct acl_object_label *retval2;
58219 +
58220 + tmpsubj = (struct acl_subject_label *) subj;
58221 + read_lock(&gr_inode_lock);
58222 + do {
58223 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
58224 + if (retval) {
58225 + if (checkglob && retval->globbed) {
58226 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
58227 + (struct vfsmount *)orig_mnt, path);
58228 + if (retval2)
58229 + retval = retval2;
58230 + }
58231 + break;
58232 + }
58233 + } while ((tmpsubj = tmpsubj->parent_subject));
58234 + read_unlock(&gr_inode_lock);
58235 +
58236 + return retval;
58237 +}
58238 +
58239 +static __inline__ struct acl_object_label *
58240 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
58241 + const struct dentry *curr_dentry,
58242 + const struct acl_subject_label *subj, char **path, const int checkglob)
58243 +{
58244 + int newglob = checkglob;
58245 +
58246 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
58247 + as we don't want a / * rule to match instead of the / object
58248 + don't do this for create lookups that call this function though, since they're looking up
58249 + on the parent and thus need globbing checks on all paths
58250 + */
58251 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
58252 + newglob = GR_NO_GLOB;
58253 +
58254 + return __full_lookup(orig_dentry, orig_mnt,
58255 + curr_dentry->d_inode->i_ino,
58256 + __get_dev(curr_dentry), subj, path, newglob);
58257 +}
58258 +
58259 +static struct acl_object_label *
58260 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58261 + const struct acl_subject_label *subj, char *path, const int checkglob)
58262 +{
58263 + struct dentry *dentry = (struct dentry *) l_dentry;
58264 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
58265 + struct acl_object_label *retval;
58266 +
58267 + spin_lock(&dcache_lock);
58268 + spin_lock(&vfsmount_lock);
58269 +
58270 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
58271 +#ifdef CONFIG_NET
58272 + mnt == sock_mnt ||
58273 +#endif
58274 +#ifdef CONFIG_HUGETLBFS
58275 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
58276 +#endif
58277 + /* ignore Eric Biederman */
58278 + IS_PRIVATE(l_dentry->d_inode))) {
58279 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
58280 + goto out;
58281 + }
58282 +
58283 + for (;;) {
58284 + if (dentry == real_root && mnt == real_root_mnt)
58285 + break;
58286 +
58287 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
58288 + if (mnt->mnt_parent == mnt)
58289 + break;
58290 +
58291 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
58292 + if (retval != NULL)
58293 + goto out;
58294 +
58295 + dentry = mnt->mnt_mountpoint;
58296 + mnt = mnt->mnt_parent;
58297 + continue;
58298 + }
58299 +
58300 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
58301 + if (retval != NULL)
58302 + goto out;
58303 +
58304 + dentry = dentry->d_parent;
58305 + }
58306 +
58307 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
58308 +
58309 + if (retval == NULL)
58310 + retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
58311 +out:
58312 + spin_unlock(&vfsmount_lock);
58313 + spin_unlock(&dcache_lock);
58314 +
58315 + BUG_ON(retval == NULL);
58316 +
58317 + return retval;
58318 +}
58319 +
58320 +static __inline__ struct acl_object_label *
58321 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58322 + const struct acl_subject_label *subj)
58323 +{
58324 + char *path = NULL;
58325 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
58326 +}
58327 +
58328 +static __inline__ struct acl_object_label *
58329 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58330 + const struct acl_subject_label *subj)
58331 +{
58332 + char *path = NULL;
58333 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
58334 +}
58335 +
58336 +static __inline__ struct acl_object_label *
58337 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58338 + const struct acl_subject_label *subj, char *path)
58339 +{
58340 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
58341 +}
58342 +
58343 +static struct acl_subject_label *
58344 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58345 + const struct acl_role_label *role)
58346 +{
58347 + struct dentry *dentry = (struct dentry *) l_dentry;
58348 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
58349 + struct acl_subject_label *retval;
58350 +
58351 + spin_lock(&dcache_lock);
58352 + spin_lock(&vfsmount_lock);
58353 +
58354 + for (;;) {
58355 + if (dentry == real_root && mnt == real_root_mnt)
58356 + break;
58357 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
58358 + if (mnt->mnt_parent == mnt)
58359 + break;
58360 +
58361 + read_lock(&gr_inode_lock);
58362 + retval =
58363 + lookup_acl_subj_label(dentry->d_inode->i_ino,
58364 + __get_dev(dentry), role);
58365 + read_unlock(&gr_inode_lock);
58366 + if (retval != NULL)
58367 + goto out;
58368 +
58369 + dentry = mnt->mnt_mountpoint;
58370 + mnt = mnt->mnt_parent;
58371 + continue;
58372 + }
58373 +
58374 + read_lock(&gr_inode_lock);
58375 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
58376 + __get_dev(dentry), role);
58377 + read_unlock(&gr_inode_lock);
58378 + if (retval != NULL)
58379 + goto out;
58380 +
58381 + dentry = dentry->d_parent;
58382 + }
58383 +
58384 + read_lock(&gr_inode_lock);
58385 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
58386 + __get_dev(dentry), role);
58387 + read_unlock(&gr_inode_lock);
58388 +
58389 + if (unlikely(retval == NULL)) {
58390 + read_lock(&gr_inode_lock);
58391 + retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
58392 + __get_dev(real_root), role);
58393 + read_unlock(&gr_inode_lock);
58394 + }
58395 +out:
58396 + spin_unlock(&vfsmount_lock);
58397 + spin_unlock(&dcache_lock);
58398 +
58399 + BUG_ON(retval == NULL);
58400 +
58401 + return retval;
58402 +}
58403 +
58404 +static void
58405 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
58406 +{
58407 + struct task_struct *task = current;
58408 + const struct cred *cred = current_cred();
58409 +
58410 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
58411 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58412 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58413 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
58414 +
58415 + return;
58416 +}
58417 +
58418 +static void
58419 +gr_log_learn_sysctl(const char *path, const __u32 mode)
58420 +{
58421 + struct task_struct *task = current;
58422 + const struct cred *cred = current_cred();
58423 +
58424 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
58425 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58426 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58427 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
58428 +
58429 + return;
58430 +}
58431 +
58432 +static void
58433 +gr_log_learn_id_change(const char type, const unsigned int real,
58434 + const unsigned int effective, const unsigned int fs)
58435 +{
58436 + struct task_struct *task = current;
58437 + const struct cred *cred = current_cred();
58438 +
58439 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
58440 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58441 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58442 + type, real, effective, fs, &task->signal->saved_ip);
58443 +
58444 + return;
58445 +}
58446 +
58447 +__u32
58448 +gr_search_file(const struct dentry * dentry, const __u32 mode,
58449 + const struct vfsmount * mnt)
58450 +{
58451 + __u32 retval = mode;
58452 + struct acl_subject_label *curracl;
58453 + struct acl_object_label *currobj;
58454 +
58455 + if (unlikely(!(gr_status & GR_READY)))
58456 + return (mode & ~GR_AUDITS);
58457 +
58458 + curracl = current->acl;
58459 +
58460 + currobj = chk_obj_label(dentry, mnt, curracl);
58461 + retval = currobj->mode & mode;
58462 +
58463 + /* if we're opening a specified transfer file for writing
58464 + (e.g. /dev/initctl), then transfer our role to init
58465 + */
58466 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
58467 + current->role->roletype & GR_ROLE_PERSIST)) {
58468 + struct task_struct *task = init_pid_ns.child_reaper;
58469 +
58470 + if (task->role != current->role) {
58471 + task->acl_sp_role = 0;
58472 + task->acl_role_id = current->acl_role_id;
58473 + task->role = current->role;
58474 + rcu_read_lock();
58475 + read_lock(&grsec_exec_file_lock);
58476 + gr_apply_subject_to_task(task);
58477 + read_unlock(&grsec_exec_file_lock);
58478 + rcu_read_unlock();
58479 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
58480 + }
58481 + }
58482 +
58483 + if (unlikely
58484 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
58485 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
58486 + __u32 new_mode = mode;
58487 +
58488 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
58489 +
58490 + retval = new_mode;
58491 +
58492 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
58493 + new_mode |= GR_INHERIT;
58494 +
58495 + if (!(mode & GR_NOLEARN))
58496 + gr_log_learn(dentry, mnt, new_mode);
58497 + }
58498 +
58499 + return retval;
58500 +}
58501 +
58502 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
58503 + const struct dentry *parent,
58504 + const struct vfsmount *mnt)
58505 +{
58506 + struct name_entry *match;
58507 + struct acl_object_label *matchpo;
58508 + struct acl_subject_label *curracl;
58509 + char *path;
58510 +
58511 + if (unlikely(!(gr_status & GR_READY)))
58512 + return NULL;
58513 +
58514 + preempt_disable();
58515 + path = gr_to_filename_rbac(new_dentry, mnt);
58516 + match = lookup_name_entry_create(path);
58517 +
58518 + curracl = current->acl;
58519 +
58520 + if (match) {
58521 + read_lock(&gr_inode_lock);
58522 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
58523 + read_unlock(&gr_inode_lock);
58524 +
58525 + if (matchpo) {
58526 + preempt_enable();
58527 + return matchpo;
58528 + }
58529 + }
58530 +
58531 + // lookup parent
58532 +
58533 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
58534 +
58535 + preempt_enable();
58536 + return matchpo;
58537 +}
58538 +
58539 +__u32
58540 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
58541 + const struct vfsmount * mnt, const __u32 mode)
58542 +{
58543 + struct acl_object_label *matchpo;
58544 + __u32 retval;
58545 +
58546 + if (unlikely(!(gr_status & GR_READY)))
58547 + return (mode & ~GR_AUDITS);
58548 +
58549 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
58550 +
58551 + retval = matchpo->mode & mode;
58552 +
58553 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
58554 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
58555 + __u32 new_mode = mode;
58556 +
58557 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
58558 +
58559 + gr_log_learn(new_dentry, mnt, new_mode);
58560 + return new_mode;
58561 + }
58562 +
58563 + return retval;
58564 +}
58565 +
58566 +__u32
58567 +gr_check_link(const struct dentry * new_dentry,
58568 + const struct dentry * parent_dentry,
58569 + const struct vfsmount * parent_mnt,
58570 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
58571 +{
58572 + struct acl_object_label *obj;
58573 + __u32 oldmode, newmode;
58574 + __u32 needmode;
58575 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
58576 + GR_DELETE | GR_INHERIT;
58577 +
58578 + if (unlikely(!(gr_status & GR_READY)))
58579 + return (GR_CREATE | GR_LINK);
58580 +
58581 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
58582 + oldmode = obj->mode;
58583 +
58584 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
58585 + newmode = obj->mode;
58586 +
58587 + needmode = newmode & checkmodes;
58588 +
58589 + // old name for hardlink must have at least the permissions of the new name
58590 + if ((oldmode & needmode) != needmode)
58591 + goto bad;
58592 +
58593 + // if old name had restrictions/auditing, make sure the new name does as well
58594 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
58595 +
58596 + // don't allow hardlinking of suid/sgid files without permission
58597 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
58598 + needmode |= GR_SETID;
58599 +
58600 + if ((newmode & needmode) != needmode)
58601 + goto bad;
58602 +
58603 + // enforce minimum permissions
58604 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
58605 + return newmode;
58606 +bad:
58607 + needmode = oldmode;
58608 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
58609 + needmode |= GR_SETID;
58610 +
58611 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
58612 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
58613 + return (GR_CREATE | GR_LINK);
58614 + } else if (newmode & GR_SUPPRESS)
58615 + return GR_SUPPRESS;
58616 + else
58617 + return 0;
58618 +}
58619 +
58620 +int
58621 +gr_check_hidden_task(const struct task_struct *task)
58622 +{
58623 + if (unlikely(!(gr_status & GR_READY)))
58624 + return 0;
58625 +
58626 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
58627 + return 1;
58628 +
58629 + return 0;
58630 +}
58631 +
58632 +int
58633 +gr_check_protected_task(const struct task_struct *task)
58634 +{
58635 + if (unlikely(!(gr_status & GR_READY) || !task))
58636 + return 0;
58637 +
58638 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
58639 + task->acl != current->acl)
58640 + return 1;
58641 +
58642 + return 0;
58643 +}
58644 +
58645 +int
58646 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
58647 +{
58648 + struct task_struct *p;
58649 + int ret = 0;
58650 +
58651 + if (unlikely(!(gr_status & GR_READY) || !pid))
58652 + return ret;
58653 +
58654 + read_lock(&tasklist_lock);
58655 + do_each_pid_task(pid, type, p) {
58656 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
58657 + p->acl != current->acl) {
58658 + ret = 1;
58659 + goto out;
58660 + }
58661 + } while_each_pid_task(pid, type, p);
58662 +out:
58663 + read_unlock(&tasklist_lock);
58664 +
58665 + return ret;
58666 +}
58667 +
58668 +void
58669 +gr_copy_label(struct task_struct *tsk)
58670 +{
58671 + /* plain copying of fields is already done by dup_task_struct */
58672 + tsk->signal->used_accept = 0;
58673 + tsk->acl_sp_role = 0;
58674 + //tsk->acl_role_id = current->acl_role_id;
58675 + //tsk->acl = current->acl;
58676 + //tsk->role = current->role;
58677 + tsk->signal->curr_ip = current->signal->curr_ip;
58678 + tsk->signal->saved_ip = current->signal->saved_ip;
58679 + if (current->exec_file)
58680 + get_file(current->exec_file);
58681 + //tsk->exec_file = current->exec_file;
58682 + //tsk->is_writable = current->is_writable;
58683 + if (unlikely(current->signal->used_accept)) {
58684 + current->signal->curr_ip = 0;
58685 + current->signal->saved_ip = 0;
58686 + }
58687 +
58688 + return;
58689 +}
58690 +
58691 +static void
58692 +gr_set_proc_res(struct task_struct *task)
58693 +{
58694 + struct acl_subject_label *proc;
58695 + unsigned short i;
58696 +
58697 + proc = task->acl;
58698 +
58699 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
58700 + return;
58701 +
58702 + for (i = 0; i < RLIM_NLIMITS; i++) {
58703 + if (!(proc->resmask & (1 << i)))
58704 + continue;
58705 +
58706 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
58707 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
58708 + }
58709 +
58710 + return;
58711 +}
58712 +
58713 +extern int __gr_process_user_ban(struct user_struct *user);
58714 +
58715 +int
58716 +gr_check_user_change(int real, int effective, int fs)
58717 +{
58718 + unsigned int i;
58719 + __u16 num;
58720 + uid_t *uidlist;
58721 + int curuid;
58722 + int realok = 0;
58723 + int effectiveok = 0;
58724 + int fsok = 0;
58725 +
58726 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58727 + struct user_struct *user;
58728 +
58729 + if (real == -1)
58730 + goto skipit;
58731 +
58732 + user = find_user(real);
58733 + if (user == NULL)
58734 + goto skipit;
58735 +
58736 + if (__gr_process_user_ban(user)) {
58737 + /* for find_user */
58738 + free_uid(user);
58739 + return 1;
58740 + }
58741 +
58742 + /* for find_user */
58743 + free_uid(user);
58744 +
58745 +skipit:
58746 +#endif
58747 +
58748 + if (unlikely(!(gr_status & GR_READY)))
58749 + return 0;
58750 +
58751 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
58752 + gr_log_learn_id_change('u', real, effective, fs);
58753 +
58754 + num = current->acl->user_trans_num;
58755 + uidlist = current->acl->user_transitions;
58756 +
58757 + if (uidlist == NULL)
58758 + return 0;
58759 +
58760 + if (real == -1)
58761 + realok = 1;
58762 + if (effective == -1)
58763 + effectiveok = 1;
58764 + if (fs == -1)
58765 + fsok = 1;
58766 +
58767 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
58768 + for (i = 0; i < num; i++) {
58769 + curuid = (int)uidlist[i];
58770 + if (real == curuid)
58771 + realok = 1;
58772 + if (effective == curuid)
58773 + effectiveok = 1;
58774 + if (fs == curuid)
58775 + fsok = 1;
58776 + }
58777 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
58778 + for (i = 0; i < num; i++) {
58779 + curuid = (int)uidlist[i];
58780 + if (real == curuid)
58781 + break;
58782 + if (effective == curuid)
58783 + break;
58784 + if (fs == curuid)
58785 + break;
58786 + }
58787 + /* not in deny list */
58788 + if (i == num) {
58789 + realok = 1;
58790 + effectiveok = 1;
58791 + fsok = 1;
58792 + }
58793 + }
58794 +
58795 + if (realok && effectiveok && fsok)
58796 + return 0;
58797 + else {
58798 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
58799 + return 1;
58800 + }
58801 +}
58802 +
58803 +int
58804 +gr_check_group_change(int real, int effective, int fs)
58805 +{
58806 + unsigned int i;
58807 + __u16 num;
58808 + gid_t *gidlist;
58809 + int curgid;
58810 + int realok = 0;
58811 + int effectiveok = 0;
58812 + int fsok = 0;
58813 +
58814 + if (unlikely(!(gr_status & GR_READY)))
58815 + return 0;
58816 +
58817 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
58818 + gr_log_learn_id_change('g', real, effective, fs);
58819 +
58820 + num = current->acl->group_trans_num;
58821 + gidlist = current->acl->group_transitions;
58822 +
58823 + if (gidlist == NULL)
58824 + return 0;
58825 +
58826 + if (real == -1)
58827 + realok = 1;
58828 + if (effective == -1)
58829 + effectiveok = 1;
58830 + if (fs == -1)
58831 + fsok = 1;
58832 +
58833 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
58834 + for (i = 0; i < num; i++) {
58835 + curgid = (int)gidlist[i];
58836 + if (real == curgid)
58837 + realok = 1;
58838 + if (effective == curgid)
58839 + effectiveok = 1;
58840 + if (fs == curgid)
58841 + fsok = 1;
58842 + }
58843 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
58844 + for (i = 0; i < num; i++) {
58845 + curgid = (int)gidlist[i];
58846 + if (real == curgid)
58847 + break;
58848 + if (effective == curgid)
58849 + break;
58850 + if (fs == curgid)
58851 + break;
58852 + }
58853 + /* not in deny list */
58854 + if (i == num) {
58855 + realok = 1;
58856 + effectiveok = 1;
58857 + fsok = 1;
58858 + }
58859 + }
58860 +
58861 + if (realok && effectiveok && fsok)
58862 + return 0;
58863 + else {
58864 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
58865 + return 1;
58866 + }
58867 +}
58868 +
58869 +extern int gr_acl_is_capable(const int cap);
58870 +
58871 +void
58872 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
58873 +{
58874 + struct acl_role_label *role = task->role;
58875 + struct acl_subject_label *subj = NULL;
58876 + struct acl_object_label *obj;
58877 + struct file *filp;
58878 +
58879 + if (unlikely(!(gr_status & GR_READY)))
58880 + return;
58881 +
58882 + filp = task->exec_file;
58883 +
58884 + /* kernel process, we'll give them the kernel role */
58885 + if (unlikely(!filp)) {
58886 + task->role = kernel_role;
58887 + task->acl = kernel_role->root_label;
58888 + return;
58889 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
58890 + role = lookup_acl_role_label(task, uid, gid);
58891 +
58892 + /* don't change the role if we're not a privileged process */
58893 + if (role && task->role != role &&
58894 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
58895 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
58896 + return;
58897 +
58898 + /* perform subject lookup in possibly new role
58899 + we can use this result below in the case where role == task->role
58900 + */
58901 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
58902 +
58903 + /* if we changed uid/gid, but result in the same role
58904 + and are using inheritance, don't lose the inherited subject
58905 + if current subject is other than what normal lookup
58906 + would result in, we arrived via inheritance, don't
58907 + lose subject
58908 + */
58909 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
58910 + (subj == task->acl)))
58911 + task->acl = subj;
58912 +
58913 + task->role = role;
58914 +
58915 + task->is_writable = 0;
58916 +
58917 + /* ignore additional mmap checks for processes that are writable
58918 + by the default ACL */
58919 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
58920 + if (unlikely(obj->mode & GR_WRITE))
58921 + task->is_writable = 1;
58922 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
58923 + if (unlikely(obj->mode & GR_WRITE))
58924 + task->is_writable = 1;
58925 +
58926 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58927 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
58928 +#endif
58929 +
58930 + gr_set_proc_res(task);
58931 +
58932 + return;
58933 +}
58934 +
58935 +int
58936 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
58937 + const int unsafe_flags)
58938 +{
58939 + struct task_struct *task = current;
58940 + struct acl_subject_label *newacl;
58941 + struct acl_object_label *obj;
58942 + __u32 retmode;
58943 +
58944 + if (unlikely(!(gr_status & GR_READY)))
58945 + return 0;
58946 +
58947 + newacl = chk_subj_label(dentry, mnt, task->role);
58948 +
58949 + task_lock(task);
58950 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
58951 + !(task->role->roletype & GR_ROLE_GOD) &&
58952 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
58953 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
58954 + task_unlock(task);
58955 + if (unsafe_flags & LSM_UNSAFE_SHARE)
58956 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
58957 + else
58958 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
58959 + return -EACCES;
58960 + }
58961 + task_unlock(task);
58962 +
58963 + obj = chk_obj_label(dentry, mnt, task->acl);
58964 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
58965 +
58966 + if (!(task->acl->mode & GR_INHERITLEARN) &&
58967 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
58968 + if (obj->nested)
58969 + task->acl = obj->nested;
58970 + else
58971 + task->acl = newacl;
58972 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
58973 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
58974 +
58975 + task->is_writable = 0;
58976 +
58977 + /* ignore additional mmap checks for processes that are writable
58978 + by the default ACL */
58979 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
58980 + if (unlikely(obj->mode & GR_WRITE))
58981 + task->is_writable = 1;
58982 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
58983 + if (unlikely(obj->mode & GR_WRITE))
58984 + task->is_writable = 1;
58985 +
58986 + gr_set_proc_res(task);
58987 +
58988 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58989 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
58990 +#endif
58991 + return 0;
58992 +}
58993 +
58994 +/* always called with valid inodev ptr */
58995 +static void
58996 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
58997 +{
58998 + struct acl_object_label *matchpo;
58999 + struct acl_subject_label *matchps;
59000 + struct acl_subject_label *subj;
59001 + struct acl_role_label *role;
59002 + unsigned int x;
59003 +
59004 + FOR_EACH_ROLE_START(role)
59005 + FOR_EACH_SUBJECT_START(role, subj, x)
59006 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
59007 + matchpo->mode |= GR_DELETED;
59008 + FOR_EACH_SUBJECT_END(subj,x)
59009 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
59010 + if (subj->inode == ino && subj->device == dev)
59011 + subj->mode |= GR_DELETED;
59012 + FOR_EACH_NESTED_SUBJECT_END(subj)
59013 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
59014 + matchps->mode |= GR_DELETED;
59015 + FOR_EACH_ROLE_END(role)
59016 +
59017 + inodev->nentry->deleted = 1;
59018 +
59019 + return;
59020 +}
59021 +
59022 +void
59023 +gr_handle_delete(const ino_t ino, const dev_t dev)
59024 +{
59025 + struct inodev_entry *inodev;
59026 +
59027 + if (unlikely(!(gr_status & GR_READY)))
59028 + return;
59029 +
59030 + write_lock(&gr_inode_lock);
59031 + inodev = lookup_inodev_entry(ino, dev);
59032 + if (inodev != NULL)
59033 + do_handle_delete(inodev, ino, dev);
59034 + write_unlock(&gr_inode_lock);
59035 +
59036 + return;
59037 +}
59038 +
59039 +static void
59040 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
59041 + const ino_t newinode, const dev_t newdevice,
59042 + struct acl_subject_label *subj)
59043 +{
59044 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
59045 + struct acl_object_label *match;
59046 +
59047 + match = subj->obj_hash[index];
59048 +
59049 + while (match && (match->inode != oldinode ||
59050 + match->device != olddevice ||
59051 + !(match->mode & GR_DELETED)))
59052 + match = match->next;
59053 +
59054 + if (match && (match->inode == oldinode)
59055 + && (match->device == olddevice)
59056 + && (match->mode & GR_DELETED)) {
59057 + if (match->prev == NULL) {
59058 + subj->obj_hash[index] = match->next;
59059 + if (match->next != NULL)
59060 + match->next->prev = NULL;
59061 + } else {
59062 + match->prev->next = match->next;
59063 + if (match->next != NULL)
59064 + match->next->prev = match->prev;
59065 + }
59066 + match->prev = NULL;
59067 + match->next = NULL;
59068 + match->inode = newinode;
59069 + match->device = newdevice;
59070 + match->mode &= ~GR_DELETED;
59071 +
59072 + insert_acl_obj_label(match, subj);
59073 + }
59074 +
59075 + return;
59076 +}
59077 +
59078 +static void
59079 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
59080 + const ino_t newinode, const dev_t newdevice,
59081 + struct acl_role_label *role)
59082 +{
59083 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
59084 + struct acl_subject_label *match;
59085 +
59086 + match = role->subj_hash[index];
59087 +
59088 + while (match && (match->inode != oldinode ||
59089 + match->device != olddevice ||
59090 + !(match->mode & GR_DELETED)))
59091 + match = match->next;
59092 +
59093 + if (match && (match->inode == oldinode)
59094 + && (match->device == olddevice)
59095 + && (match->mode & GR_DELETED)) {
59096 + if (match->prev == NULL) {
59097 + role->subj_hash[index] = match->next;
59098 + if (match->next != NULL)
59099 + match->next->prev = NULL;
59100 + } else {
59101 + match->prev->next = match->next;
59102 + if (match->next != NULL)
59103 + match->next->prev = match->prev;
59104 + }
59105 + match->prev = NULL;
59106 + match->next = NULL;
59107 + match->inode = newinode;
59108 + match->device = newdevice;
59109 + match->mode &= ~GR_DELETED;
59110 +
59111 + insert_acl_subj_label(match, role);
59112 + }
59113 +
59114 + return;
59115 +}
59116 +
59117 +static void
59118 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
59119 + const ino_t newinode, const dev_t newdevice)
59120 +{
59121 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
59122 + struct inodev_entry *match;
59123 +
59124 + match = inodev_set.i_hash[index];
59125 +
59126 + while (match && (match->nentry->inode != oldinode ||
59127 + match->nentry->device != olddevice || !match->nentry->deleted))
59128 + match = match->next;
59129 +
59130 + if (match && (match->nentry->inode == oldinode)
59131 + && (match->nentry->device == olddevice) &&
59132 + match->nentry->deleted) {
59133 + if (match->prev == NULL) {
59134 + inodev_set.i_hash[index] = match->next;
59135 + if (match->next != NULL)
59136 + match->next->prev = NULL;
59137 + } else {
59138 + match->prev->next = match->next;
59139 + if (match->next != NULL)
59140 + match->next->prev = match->prev;
59141 + }
59142 + match->prev = NULL;
59143 + match->next = NULL;
59144 + match->nentry->inode = newinode;
59145 + match->nentry->device = newdevice;
59146 + match->nentry->deleted = 0;
59147 +
59148 + insert_inodev_entry(match);
59149 + }
59150 +
59151 + return;
59152 +}
59153 +
59154 +static void
59155 +__do_handle_create(const struct name_entry *matchn, ino_t inode, dev_t dev)
59156 +{
59157 + struct acl_subject_label *subj;
59158 + struct acl_role_label *role;
59159 + unsigned int x;
59160 +
59161 + FOR_EACH_ROLE_START(role)
59162 + update_acl_subj_label(matchn->inode, matchn->device,
59163 + inode, dev, role);
59164 +
59165 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
59166 + if ((subj->inode == inode) && (subj->device == dev)) {
59167 + subj->inode = inode;
59168 + subj->device = dev;
59169 + }
59170 + FOR_EACH_NESTED_SUBJECT_END(subj)
59171 + FOR_EACH_SUBJECT_START(role, subj, x)
59172 + update_acl_obj_label(matchn->inode, matchn->device,
59173 + inode, dev, subj);
59174 + FOR_EACH_SUBJECT_END(subj,x)
59175 + FOR_EACH_ROLE_END(role)
59176 +
59177 + update_inodev_entry(matchn->inode, matchn->device, inode, dev);
59178 +
59179 + return;
59180 +}
59181 +
59182 +static void
59183 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
59184 + const struct vfsmount *mnt)
59185 +{
59186 + ino_t ino = dentry->d_inode->i_ino;
59187 + dev_t dev = __get_dev(dentry);
59188 +
59189 + __do_handle_create(matchn, ino, dev);
59190 +
59191 + return;
59192 +}
59193 +
59194 +void
59195 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
59196 +{
59197 + struct name_entry *matchn;
59198 +
59199 + if (unlikely(!(gr_status & GR_READY)))
59200 + return;
59201 +
59202 + preempt_disable();
59203 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
59204 +
59205 + if (unlikely((unsigned long)matchn)) {
59206 + write_lock(&gr_inode_lock);
59207 + do_handle_create(matchn, dentry, mnt);
59208 + write_unlock(&gr_inode_lock);
59209 + }
59210 + preempt_enable();
59211 +
59212 + return;
59213 +}
59214 +
59215 +void
59216 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
59217 +{
59218 + struct name_entry *matchn;
59219 +
59220 + if (unlikely(!(gr_status & GR_READY)))
59221 + return;
59222 +
59223 + preempt_disable();
59224 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
59225 +
59226 + if (unlikely((unsigned long)matchn)) {
59227 + write_lock(&gr_inode_lock);
59228 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
59229 + write_unlock(&gr_inode_lock);
59230 + }
59231 + preempt_enable();
59232 +
59233 + return;
59234 +}
59235 +
59236 +void
59237 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59238 + struct dentry *old_dentry,
59239 + struct dentry *new_dentry,
59240 + struct vfsmount *mnt, const __u8 replace)
59241 +{
59242 + struct name_entry *matchn;
59243 + struct inodev_entry *inodev;
59244 + struct inode *inode = new_dentry->d_inode;
59245 + ino_t oldinode = old_dentry->d_inode->i_ino;
59246 + dev_t olddev = __get_dev(old_dentry);
59247 +
59248 + /* vfs_rename swaps the name and parent link for old_dentry and
59249 + new_dentry
59250 + at this point, old_dentry has the new name, parent link, and inode
59251 + for the renamed file
59252 + if a file is being replaced by a rename, new_dentry has the inode
59253 + and name for the replaced file
59254 + */
59255 +
59256 + if (unlikely(!(gr_status & GR_READY)))
59257 + return;
59258 +
59259 + preempt_disable();
59260 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
59261 +
59262 + /* we wouldn't have to check d_inode if it weren't for
59263 + NFS silly-renaming
59264 + */
59265 +
59266 + write_lock(&gr_inode_lock);
59267 + if (unlikely(replace && inode)) {
59268 + ino_t newinode = inode->i_ino;
59269 + dev_t newdev = __get_dev(new_dentry);
59270 + inodev = lookup_inodev_entry(newinode, newdev);
59271 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
59272 + do_handle_delete(inodev, newinode, newdev);
59273 + }
59274 +
59275 + inodev = lookup_inodev_entry(oldinode, olddev);
59276 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
59277 + do_handle_delete(inodev, oldinode, olddev);
59278 +
59279 + if (unlikely((unsigned long)matchn))
59280 + do_handle_create(matchn, old_dentry, mnt);
59281 +
59282 + write_unlock(&gr_inode_lock);
59283 + preempt_enable();
59284 +
59285 + return;
59286 +}
59287 +
59288 +static int
59289 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
59290 + unsigned char **sum)
59291 +{
59292 + struct acl_role_label *r;
59293 + struct role_allowed_ip *ipp;
59294 + struct role_transition *trans;
59295 + unsigned int i;
59296 + int found = 0;
59297 + u32 curr_ip = current->signal->curr_ip;
59298 +
59299 + current->signal->saved_ip = curr_ip;
59300 +
59301 + /* check transition table */
59302 +
59303 + for (trans = current->role->transitions; trans; trans = trans->next) {
59304 + if (!strcmp(rolename, trans->rolename)) {
59305 + found = 1;
59306 + break;
59307 + }
59308 + }
59309 +
59310 + if (!found)
59311 + return 0;
59312 +
59313 + /* handle special roles that do not require authentication
59314 + and check ip */
59315 +
59316 + FOR_EACH_ROLE_START(r)
59317 + if (!strcmp(rolename, r->rolename) &&
59318 + (r->roletype & GR_ROLE_SPECIAL)) {
59319 + found = 0;
59320 + if (r->allowed_ips != NULL) {
59321 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
59322 + if ((ntohl(curr_ip) & ipp->netmask) ==
59323 + (ntohl(ipp->addr) & ipp->netmask))
59324 + found = 1;
59325 + }
59326 + } else
59327 + found = 2;
59328 + if (!found)
59329 + return 0;
59330 +
59331 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
59332 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
59333 + *salt = NULL;
59334 + *sum = NULL;
59335 + return 1;
59336 + }
59337 + }
59338 + FOR_EACH_ROLE_END(r)
59339 +
59340 + for (i = 0; i < num_sprole_pws; i++) {
59341 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
59342 + *salt = acl_special_roles[i]->salt;
59343 + *sum = acl_special_roles[i]->sum;
59344 + return 1;
59345 + }
59346 + }
59347 +
59348 + return 0;
59349 +}
59350 +
59351 +static void
59352 +assign_special_role(char *rolename)
59353 +{
59354 + struct acl_object_label *obj;
59355 + struct acl_role_label *r;
59356 + struct acl_role_label *assigned = NULL;
59357 + struct task_struct *tsk;
59358 + struct file *filp;
59359 +
59360 + FOR_EACH_ROLE_START(r)
59361 + if (!strcmp(rolename, r->rolename) &&
59362 + (r->roletype & GR_ROLE_SPECIAL)) {
59363 + assigned = r;
59364 + break;
59365 + }
59366 + FOR_EACH_ROLE_END(r)
59367 +
59368 + if (!assigned)
59369 + return;
59370 +
59371 + read_lock(&tasklist_lock);
59372 + read_lock(&grsec_exec_file_lock);
59373 +
59374 + tsk = current->real_parent;
59375 + if (tsk == NULL)
59376 + goto out_unlock;
59377 +
59378 + filp = tsk->exec_file;
59379 + if (filp == NULL)
59380 + goto out_unlock;
59381 +
59382 + tsk->is_writable = 0;
59383 +
59384 + tsk->acl_sp_role = 1;
59385 + tsk->acl_role_id = ++acl_sp_role_value;
59386 + tsk->role = assigned;
59387 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
59388 +
59389 + /* ignore additional mmap checks for processes that are writable
59390 + by the default ACL */
59391 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59392 + if (unlikely(obj->mode & GR_WRITE))
59393 + tsk->is_writable = 1;
59394 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
59395 + if (unlikely(obj->mode & GR_WRITE))
59396 + tsk->is_writable = 1;
59397 +
59398 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59399 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
59400 +#endif
59401 +
59402 +out_unlock:
59403 + read_unlock(&grsec_exec_file_lock);
59404 + read_unlock(&tasklist_lock);
59405 + return;
59406 +}
59407 +
59408 +int gr_check_secure_terminal(struct task_struct *task)
59409 +{
59410 + struct task_struct *p, *p2, *p3;
59411 + struct files_struct *files;
59412 + struct fdtable *fdt;
59413 + struct file *our_file = NULL, *file;
59414 + int i;
59415 +
59416 + if (task->signal->tty == NULL)
59417 + return 1;
59418 +
59419 + files = get_files_struct(task);
59420 + if (files != NULL) {
59421 + rcu_read_lock();
59422 + fdt = files_fdtable(files);
59423 + for (i=0; i < fdt->max_fds; i++) {
59424 + file = fcheck_files(files, i);
59425 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
59426 + get_file(file);
59427 + our_file = file;
59428 + }
59429 + }
59430 + rcu_read_unlock();
59431 + put_files_struct(files);
59432 + }
59433 +
59434 + if (our_file == NULL)
59435 + return 1;
59436 +
59437 + read_lock(&tasklist_lock);
59438 + do_each_thread(p2, p) {
59439 + files = get_files_struct(p);
59440 + if (files == NULL ||
59441 + (p->signal && p->signal->tty == task->signal->tty)) {
59442 + if (files != NULL)
59443 + put_files_struct(files);
59444 + continue;
59445 + }
59446 + rcu_read_lock();
59447 + fdt = files_fdtable(files);
59448 + for (i=0; i < fdt->max_fds; i++) {
59449 + file = fcheck_files(files, i);
59450 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
59451 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
59452 + p3 = task;
59453 + while (p3->pid > 0) {
59454 + if (p3 == p)
59455 + break;
59456 + p3 = p3->real_parent;
59457 + }
59458 + if (p3 == p)
59459 + break;
59460 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
59461 + gr_handle_alertkill(p);
59462 + rcu_read_unlock();
59463 + put_files_struct(files);
59464 + read_unlock(&tasklist_lock);
59465 + fput(our_file);
59466 + return 0;
59467 + }
59468 + }
59469 + rcu_read_unlock();
59470 + put_files_struct(files);
59471 + } while_each_thread(p2, p);
59472 + read_unlock(&tasklist_lock);
59473 +
59474 + fput(our_file);
59475 + return 1;
59476 +}
59477 +
59478 +ssize_t
59479 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
59480 +{
59481 + struct gr_arg_wrapper uwrap;
59482 + unsigned char *sprole_salt = NULL;
59483 + unsigned char *sprole_sum = NULL;
59484 + int error = sizeof (struct gr_arg_wrapper);
59485 + int error2 = 0;
59486 +
59487 + mutex_lock(&gr_dev_mutex);
59488 +
59489 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
59490 + error = -EPERM;
59491 + goto out;
59492 + }
59493 +
59494 + if (count != sizeof (struct gr_arg_wrapper)) {
59495 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
59496 + error = -EINVAL;
59497 + goto out;
59498 + }
59499 +
59500 +
59501 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
59502 + gr_auth_expires = 0;
59503 + gr_auth_attempts = 0;
59504 + }
59505 +
59506 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
59507 + error = -EFAULT;
59508 + goto out;
59509 + }
59510 +
59511 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
59512 + error = -EINVAL;
59513 + goto out;
59514 + }
59515 +
59516 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
59517 + error = -EFAULT;
59518 + goto out;
59519 + }
59520 +
59521 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
59522 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
59523 + time_after(gr_auth_expires, get_seconds())) {
59524 + error = -EBUSY;
59525 + goto out;
59526 + }
59527 +
59528 + /* if non-root trying to do anything other than use a special role,
59529 + do not attempt authentication, do not count towards authentication
59530 + locking
59531 + */
59532 +
59533 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
59534 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
59535 + current_uid()) {
59536 + error = -EPERM;
59537 + goto out;
59538 + }
59539 +
59540 + /* ensure pw and special role name are null terminated */
59541 +
59542 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
59543 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
59544 +
59545 + /* Okay.
59546 + * We have our enough of the argument structure..(we have yet
59547 + * to copy_from_user the tables themselves) . Copy the tables
59548 + * only if we need them, i.e. for loading operations. */
59549 +
59550 + switch (gr_usermode->mode) {
59551 + case GR_STATUS:
59552 + if (gr_status & GR_READY) {
59553 + error = 1;
59554 + if (!gr_check_secure_terminal(current))
59555 + error = 3;
59556 + } else
59557 + error = 2;
59558 + goto out;
59559 + case GR_SHUTDOWN:
59560 + if ((gr_status & GR_READY)
59561 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59562 + pax_open_kernel();
59563 + gr_status &= ~GR_READY;
59564 + pax_close_kernel();
59565 +
59566 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
59567 + free_variables();
59568 + memset(gr_usermode, 0, sizeof (struct gr_arg));
59569 + memset(gr_system_salt, 0, GR_SALT_LEN);
59570 + memset(gr_system_sum, 0, GR_SHA_LEN);
59571 + } else if (gr_status & GR_READY) {
59572 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
59573 + error = -EPERM;
59574 + } else {
59575 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
59576 + error = -EAGAIN;
59577 + }
59578 + break;
59579 + case GR_ENABLE:
59580 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
59581 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
59582 + else {
59583 + if (gr_status & GR_READY)
59584 + error = -EAGAIN;
59585 + else
59586 + error = error2;
59587 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
59588 + }
59589 + break;
59590 + case GR_RELOAD:
59591 + if (!(gr_status & GR_READY)) {
59592 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
59593 + error = -EAGAIN;
59594 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59595 + lock_kernel();
59596 +
59597 + pax_open_kernel();
59598 + gr_status &= ~GR_READY;
59599 + pax_close_kernel();
59600 +
59601 + free_variables();
59602 + if (!(error2 = gracl_init(gr_usermode))) {
59603 + unlock_kernel();
59604 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
59605 + } else {
59606 + unlock_kernel();
59607 + error = error2;
59608 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
59609 + }
59610 + } else {
59611 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
59612 + error = -EPERM;
59613 + }
59614 + break;
59615 + case GR_SEGVMOD:
59616 + if (unlikely(!(gr_status & GR_READY))) {
59617 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
59618 + error = -EAGAIN;
59619 + break;
59620 + }
59621 +
59622 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59623 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
59624 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
59625 + struct acl_subject_label *segvacl;
59626 + segvacl =
59627 + lookup_acl_subj_label(gr_usermode->segv_inode,
59628 + gr_usermode->segv_device,
59629 + current->role);
59630 + if (segvacl) {
59631 + segvacl->crashes = 0;
59632 + segvacl->expires = 0;
59633 + }
59634 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
59635 + gr_remove_uid(gr_usermode->segv_uid);
59636 + }
59637 + } else {
59638 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
59639 + error = -EPERM;
59640 + }
59641 + break;
59642 + case GR_SPROLE:
59643 + case GR_SPROLEPAM:
59644 + if (unlikely(!(gr_status & GR_READY))) {
59645 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
59646 + error = -EAGAIN;
59647 + break;
59648 + }
59649 +
59650 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
59651 + current->role->expires = 0;
59652 + current->role->auth_attempts = 0;
59653 + }
59654 +
59655 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
59656 + time_after(current->role->expires, get_seconds())) {
59657 + error = -EBUSY;
59658 + goto out;
59659 + }
59660 +
59661 + if (lookup_special_role_auth
59662 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
59663 + && ((!sprole_salt && !sprole_sum)
59664 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
59665 + char *p = "";
59666 + assign_special_role(gr_usermode->sp_role);
59667 + read_lock(&tasklist_lock);
59668 + if (current->real_parent)
59669 + p = current->real_parent->role->rolename;
59670 + read_unlock(&tasklist_lock);
59671 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
59672 + p, acl_sp_role_value);
59673 + } else {
59674 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
59675 + error = -EPERM;
59676 + if(!(current->role->auth_attempts++))
59677 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
59678 +
59679 + goto out;
59680 + }
59681 + break;
59682 + case GR_UNSPROLE:
59683 + if (unlikely(!(gr_status & GR_READY))) {
59684 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
59685 + error = -EAGAIN;
59686 + break;
59687 + }
59688 +
59689 + if (current->role->roletype & GR_ROLE_SPECIAL) {
59690 + char *p = "";
59691 + int i = 0;
59692 +
59693 + read_lock(&tasklist_lock);
59694 + if (current->real_parent) {
59695 + p = current->real_parent->role->rolename;
59696 + i = current->real_parent->acl_role_id;
59697 + }
59698 + read_unlock(&tasklist_lock);
59699 +
59700 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
59701 + gr_set_acls(1);
59702 + } else {
59703 + error = -EPERM;
59704 + goto out;
59705 + }
59706 + break;
59707 + default:
59708 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
59709 + error = -EINVAL;
59710 + break;
59711 + }
59712 +
59713 + if (error != -EPERM)
59714 + goto out;
59715 +
59716 + if(!(gr_auth_attempts++))
59717 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
59718 +
59719 + out:
59720 + mutex_unlock(&gr_dev_mutex);
59721 + return error;
59722 +}
59723 +
59724 +/* must be called with
59725 + rcu_read_lock();
59726 + read_lock(&tasklist_lock);
59727 + read_lock(&grsec_exec_file_lock);
59728 +*/
59729 +int gr_apply_subject_to_task(struct task_struct *task)
59730 +{
59731 + struct acl_object_label *obj;
59732 + char *tmpname;
59733 + struct acl_subject_label *tmpsubj;
59734 + struct file *filp;
59735 + struct name_entry *nmatch;
59736 +
59737 + filp = task->exec_file;
59738 + if (filp == NULL)
59739 + return 0;
59740 +
59741 + /* the following is to apply the correct subject
59742 + on binaries running when the RBAC system
59743 + is enabled, when the binaries have been
59744 + replaced or deleted since their execution
59745 + -----
59746 + when the RBAC system starts, the inode/dev
59747 + from exec_file will be one the RBAC system
59748 + is unaware of. It only knows the inode/dev
59749 + of the present file on disk, or the absence
59750 + of it.
59751 + */
59752 + preempt_disable();
59753 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
59754 +
59755 + nmatch = lookup_name_entry(tmpname);
59756 + preempt_enable();
59757 + tmpsubj = NULL;
59758 + if (nmatch) {
59759 + if (nmatch->deleted)
59760 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
59761 + else
59762 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
59763 + if (tmpsubj != NULL)
59764 + task->acl = tmpsubj;
59765 + }
59766 + if (tmpsubj == NULL)
59767 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
59768 + task->role);
59769 + if (task->acl) {
59770 + task->is_writable = 0;
59771 + /* ignore additional mmap checks for processes that are writable
59772 + by the default ACL */
59773 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59774 + if (unlikely(obj->mode & GR_WRITE))
59775 + task->is_writable = 1;
59776 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
59777 + if (unlikely(obj->mode & GR_WRITE))
59778 + task->is_writable = 1;
59779 +
59780 + gr_set_proc_res(task);
59781 +
59782 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59783 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
59784 +#endif
59785 + } else {
59786 + return 1;
59787 + }
59788 +
59789 + return 0;
59790 +}
59791 +
59792 +int
59793 +gr_set_acls(const int type)
59794 +{
59795 + struct task_struct *task, *task2;
59796 + struct acl_role_label *role = current->role;
59797 + __u16 acl_role_id = current->acl_role_id;
59798 + const struct cred *cred;
59799 + int ret;
59800 +
59801 + rcu_read_lock();
59802 + read_lock(&tasklist_lock);
59803 + read_lock(&grsec_exec_file_lock);
59804 + do_each_thread(task2, task) {
59805 + /* check to see if we're called from the exit handler,
59806 + if so, only replace ACLs that have inherited the admin
59807 + ACL */
59808 +
59809 + if (type && (task->role != role ||
59810 + task->acl_role_id != acl_role_id))
59811 + continue;
59812 +
59813 + task->acl_role_id = 0;
59814 + task->acl_sp_role = 0;
59815 +
59816 + if (task->exec_file) {
59817 + cred = __task_cred(task);
59818 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
59819 +
59820 + ret = gr_apply_subject_to_task(task);
59821 + if (ret) {
59822 + read_unlock(&grsec_exec_file_lock);
59823 + read_unlock(&tasklist_lock);
59824 + rcu_read_unlock();
59825 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
59826 + return ret;
59827 + }
59828 + } else {
59829 + // it's a kernel process
59830 + task->role = kernel_role;
59831 + task->acl = kernel_role->root_label;
59832 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
59833 + task->acl->mode &= ~GR_PROCFIND;
59834 +#endif
59835 + }
59836 + } while_each_thread(task2, task);
59837 + read_unlock(&grsec_exec_file_lock);
59838 + read_unlock(&tasklist_lock);
59839 + rcu_read_unlock();
59840 +
59841 + return 0;
59842 +}
59843 +
59844 +void
59845 +gr_learn_resource(const struct task_struct *task,
59846 + const int res, const unsigned long wanted, const int gt)
59847 +{
59848 + struct acl_subject_label *acl;
59849 + const struct cred *cred;
59850 +
59851 + if (unlikely((gr_status & GR_READY) &&
59852 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
59853 + goto skip_reslog;
59854 +
59855 +#ifdef CONFIG_GRKERNSEC_RESLOG
59856 + gr_log_resource(task, res, wanted, gt);
59857 +#endif
59858 + skip_reslog:
59859 +
59860 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
59861 + return;
59862 +
59863 + acl = task->acl;
59864 +
59865 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
59866 + !(acl->resmask & (1 << (unsigned short) res))))
59867 + return;
59868 +
59869 + if (wanted >= acl->res[res].rlim_cur) {
59870 + unsigned long res_add;
59871 +
59872 + res_add = wanted;
59873 + switch (res) {
59874 + case RLIMIT_CPU:
59875 + res_add += GR_RLIM_CPU_BUMP;
59876 + break;
59877 + case RLIMIT_FSIZE:
59878 + res_add += GR_RLIM_FSIZE_BUMP;
59879 + break;
59880 + case RLIMIT_DATA:
59881 + res_add += GR_RLIM_DATA_BUMP;
59882 + break;
59883 + case RLIMIT_STACK:
59884 + res_add += GR_RLIM_STACK_BUMP;
59885 + break;
59886 + case RLIMIT_CORE:
59887 + res_add += GR_RLIM_CORE_BUMP;
59888 + break;
59889 + case RLIMIT_RSS:
59890 + res_add += GR_RLIM_RSS_BUMP;
59891 + break;
59892 + case RLIMIT_NPROC:
59893 + res_add += GR_RLIM_NPROC_BUMP;
59894 + break;
59895 + case RLIMIT_NOFILE:
59896 + res_add += GR_RLIM_NOFILE_BUMP;
59897 + break;
59898 + case RLIMIT_MEMLOCK:
59899 + res_add += GR_RLIM_MEMLOCK_BUMP;
59900 + break;
59901 + case RLIMIT_AS:
59902 + res_add += GR_RLIM_AS_BUMP;
59903 + break;
59904 + case RLIMIT_LOCKS:
59905 + res_add += GR_RLIM_LOCKS_BUMP;
59906 + break;
59907 + case RLIMIT_SIGPENDING:
59908 + res_add += GR_RLIM_SIGPENDING_BUMP;
59909 + break;
59910 + case RLIMIT_MSGQUEUE:
59911 + res_add += GR_RLIM_MSGQUEUE_BUMP;
59912 + break;
59913 + case RLIMIT_NICE:
59914 + res_add += GR_RLIM_NICE_BUMP;
59915 + break;
59916 + case RLIMIT_RTPRIO:
59917 + res_add += GR_RLIM_RTPRIO_BUMP;
59918 + break;
59919 + case RLIMIT_RTTIME:
59920 + res_add += GR_RLIM_RTTIME_BUMP;
59921 + break;
59922 + }
59923 +
59924 + acl->res[res].rlim_cur = res_add;
59925 +
59926 + if (wanted > acl->res[res].rlim_max)
59927 + acl->res[res].rlim_max = res_add;
59928 +
59929 + /* only log the subject filename, since resource logging is supported for
59930 + single-subject learning only */
59931 + rcu_read_lock();
59932 + cred = __task_cred(task);
59933 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
59934 + task->role->roletype, cred->uid, cred->gid, acl->filename,
59935 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
59936 + "", (unsigned long) res, &task->signal->saved_ip);
59937 + rcu_read_unlock();
59938 + }
59939 +
59940 + return;
59941 +}
59942 +
59943 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
59944 +void
59945 +pax_set_initial_flags(struct linux_binprm *bprm)
59946 +{
59947 + struct task_struct *task = current;
59948 + struct acl_subject_label *proc;
59949 + unsigned long flags;
59950 +
59951 + if (unlikely(!(gr_status & GR_READY)))
59952 + return;
59953 +
59954 + flags = pax_get_flags(task);
59955 +
59956 + proc = task->acl;
59957 +
59958 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
59959 + flags &= ~MF_PAX_PAGEEXEC;
59960 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
59961 + flags &= ~MF_PAX_SEGMEXEC;
59962 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
59963 + flags &= ~MF_PAX_RANDMMAP;
59964 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
59965 + flags &= ~MF_PAX_EMUTRAMP;
59966 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
59967 + flags &= ~MF_PAX_MPROTECT;
59968 +
59969 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
59970 + flags |= MF_PAX_PAGEEXEC;
59971 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
59972 + flags |= MF_PAX_SEGMEXEC;
59973 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
59974 + flags |= MF_PAX_RANDMMAP;
59975 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
59976 + flags |= MF_PAX_EMUTRAMP;
59977 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
59978 + flags |= MF_PAX_MPROTECT;
59979 +
59980 + pax_set_flags(task, flags);
59981 +
59982 + return;
59983 +}
59984 +#endif
59985 +
59986 +#ifdef CONFIG_SYSCTL
59987 +/* Eric Biederman likes breaking userland ABI and every inode-based security
59988 + system to save 35kb of memory */
59989 +
59990 +/* we modify the passed in filename, but adjust it back before returning */
59991 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
59992 +{
59993 + struct name_entry *nmatch;
59994 + char *p, *lastp = NULL;
59995 + struct acl_object_label *obj = NULL, *tmp;
59996 + struct acl_subject_label *tmpsubj;
59997 + char c = '\0';
59998 +
59999 + read_lock(&gr_inode_lock);
60000 +
60001 + p = name + len - 1;
60002 + do {
60003 + nmatch = lookup_name_entry(name);
60004 + if (lastp != NULL)
60005 + *lastp = c;
60006 +
60007 + if (nmatch == NULL)
60008 + goto next_component;
60009 + tmpsubj = current->acl;
60010 + do {
60011 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
60012 + if (obj != NULL) {
60013 + tmp = obj->globbed;
60014 + while (tmp) {
60015 + if (!glob_match(tmp->filename, name)) {
60016 + obj = tmp;
60017 + goto found_obj;
60018 + }
60019 + tmp = tmp->next;
60020 + }
60021 + goto found_obj;
60022 + }
60023 + } while ((tmpsubj = tmpsubj->parent_subject));
60024 +next_component:
60025 + /* end case */
60026 + if (p == name)
60027 + break;
60028 +
60029 + while (*p != '/')
60030 + p--;
60031 + if (p == name)
60032 + lastp = p + 1;
60033 + else {
60034 + lastp = p;
60035 + p--;
60036 + }
60037 + c = *lastp;
60038 + *lastp = '\0';
60039 + } while (1);
60040 +found_obj:
60041 + read_unlock(&gr_inode_lock);
60042 + /* obj returned will always be non-null */
60043 + return obj;
60044 +}
60045 +
60046 +/* returns 0 when allowing, non-zero on error
60047 + op of 0 is used for readdir, so we don't log the names of hidden files
60048 +*/
60049 +__u32
60050 +gr_handle_sysctl(const struct ctl_table *table, const int op)
60051 +{
60052 + ctl_table *tmp;
60053 + const char *proc_sys = "/proc/sys";
60054 + char *path;
60055 + struct acl_object_label *obj;
60056 + unsigned short len = 0, pos = 0, depth = 0, i;
60057 + __u32 err = 0;
60058 + __u32 mode = 0;
60059 +
60060 + if (unlikely(!(gr_status & GR_READY)))
60061 + return 0;
60062 +
60063 + /* for now, ignore operations on non-sysctl entries if it's not a
60064 + readdir*/
60065 + if (table->child != NULL && op != 0)
60066 + return 0;
60067 +
60068 + mode |= GR_FIND;
60069 + /* it's only a read if it's an entry, read on dirs is for readdir */
60070 + if (op & MAY_READ)
60071 + mode |= GR_READ;
60072 + if (op & MAY_WRITE)
60073 + mode |= GR_WRITE;
60074 +
60075 + preempt_disable();
60076 +
60077 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
60078 +
60079 + /* it's only a read/write if it's an actual entry, not a dir
60080 + (which are opened for readdir)
60081 + */
60082 +
60083 + /* convert the requested sysctl entry into a pathname */
60084 +
60085 + for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
60086 + len += strlen(tmp->procname);
60087 + len++;
60088 + depth++;
60089 + }
60090 +
60091 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
60092 + /* deny */
60093 + goto out;
60094 + }
60095 +
60096 + memset(path, 0, PAGE_SIZE);
60097 +
60098 + memcpy(path, proc_sys, strlen(proc_sys));
60099 +
60100 + pos += strlen(proc_sys);
60101 +
60102 + for (; depth > 0; depth--) {
60103 + path[pos] = '/';
60104 + pos++;
60105 + for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
60106 + if (depth == i) {
60107 + memcpy(path + pos, tmp->procname,
60108 + strlen(tmp->procname));
60109 + pos += strlen(tmp->procname);
60110 + }
60111 + i++;
60112 + }
60113 + }
60114 +
60115 + obj = gr_lookup_by_name(path, pos);
60116 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
60117 +
60118 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
60119 + ((err & mode) != mode))) {
60120 + __u32 new_mode = mode;
60121 +
60122 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
60123 +
60124 + err = 0;
60125 + gr_log_learn_sysctl(path, new_mode);
60126 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
60127 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
60128 + err = -ENOENT;
60129 + } else if (!(err & GR_FIND)) {
60130 + err = -ENOENT;
60131 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
60132 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
60133 + path, (mode & GR_READ) ? " reading" : "",
60134 + (mode & GR_WRITE) ? " writing" : "");
60135 + err = -EACCES;
60136 + } else if ((err & mode) != mode) {
60137 + err = -EACCES;
60138 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
60139 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
60140 + path, (mode & GR_READ) ? " reading" : "",
60141 + (mode & GR_WRITE) ? " writing" : "");
60142 + err = 0;
60143 + } else
60144 + err = 0;
60145 +
60146 + out:
60147 + preempt_enable();
60148 +
60149 + return err;
60150 +}
60151 +#endif
60152 +
60153 +int
60154 +gr_handle_proc_ptrace(struct task_struct *task)
60155 +{
60156 + struct file *filp;
60157 + struct task_struct *tmp = task;
60158 + struct task_struct *curtemp = current;
60159 + __u32 retmode;
60160 +
60161 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
60162 + if (unlikely(!(gr_status & GR_READY)))
60163 + return 0;
60164 +#endif
60165 +
60166 + read_lock(&tasklist_lock);
60167 + read_lock(&grsec_exec_file_lock);
60168 + filp = task->exec_file;
60169 +
60170 + while (tmp->pid > 0) {
60171 + if (tmp == curtemp)
60172 + break;
60173 + tmp = tmp->real_parent;
60174 + }
60175 +
60176 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
60177 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
60178 + read_unlock(&grsec_exec_file_lock);
60179 + read_unlock(&tasklist_lock);
60180 + return 1;
60181 + }
60182 +
60183 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60184 + if (!(gr_status & GR_READY)) {
60185 + read_unlock(&grsec_exec_file_lock);
60186 + read_unlock(&tasklist_lock);
60187 + return 0;
60188 + }
60189 +#endif
60190 +
60191 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
60192 + read_unlock(&grsec_exec_file_lock);
60193 + read_unlock(&tasklist_lock);
60194 +
60195 + if (retmode & GR_NOPTRACE)
60196 + return 1;
60197 +
60198 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
60199 + && (current->acl != task->acl || (current->acl != current->role->root_label
60200 + && current->pid != task->pid)))
60201 + return 1;
60202 +
60203 + return 0;
60204 +}
60205 +
60206 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
60207 +{
60208 + if (unlikely(!(gr_status & GR_READY)))
60209 + return;
60210 +
60211 + if (!(current->role->roletype & GR_ROLE_GOD))
60212 + return;
60213 +
60214 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
60215 + p->role->rolename, gr_task_roletype_to_char(p),
60216 + p->acl->filename);
60217 +}
60218 +
60219 +int
60220 +gr_handle_ptrace(struct task_struct *task, const long request)
60221 +{
60222 + struct task_struct *tmp = task;
60223 + struct task_struct *curtemp = current;
60224 + __u32 retmode;
60225 +
60226 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
60227 + if (unlikely(!(gr_status & GR_READY)))
60228 + return 0;
60229 +#endif
60230 +
60231 + read_lock(&tasklist_lock);
60232 + while (tmp->pid > 0) {
60233 + if (tmp == curtemp)
60234 + break;
60235 + tmp = tmp->real_parent;
60236 + }
60237 +
60238 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
60239 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
60240 + read_unlock(&tasklist_lock);
60241 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60242 + return 1;
60243 + }
60244 + read_unlock(&tasklist_lock);
60245 +
60246 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60247 + if (!(gr_status & GR_READY))
60248 + return 0;
60249 +#endif
60250 +
60251 + read_lock(&grsec_exec_file_lock);
60252 + if (unlikely(!task->exec_file)) {
60253 + read_unlock(&grsec_exec_file_lock);
60254 + return 0;
60255 + }
60256 +
60257 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
60258 + read_unlock(&grsec_exec_file_lock);
60259 +
60260 + if (retmode & GR_NOPTRACE) {
60261 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60262 + return 1;
60263 + }
60264 +
60265 + if (retmode & GR_PTRACERD) {
60266 + switch (request) {
60267 + case PTRACE_POKETEXT:
60268 + case PTRACE_POKEDATA:
60269 + case PTRACE_POKEUSR:
60270 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
60271 + case PTRACE_SETREGS:
60272 + case PTRACE_SETFPREGS:
60273 +#endif
60274 +#ifdef CONFIG_X86
60275 + case PTRACE_SETFPXREGS:
60276 +#endif
60277 +#ifdef CONFIG_ALTIVEC
60278 + case PTRACE_SETVRREGS:
60279 +#endif
60280 + return 1;
60281 + default:
60282 + return 0;
60283 + }
60284 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
60285 + !(current->role->roletype & GR_ROLE_GOD) &&
60286 + (current->acl != task->acl)) {
60287 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60288 + return 1;
60289 + }
60290 +
60291 + return 0;
60292 +}
60293 +
60294 +static int is_writable_mmap(const struct file *filp)
60295 +{
60296 + struct task_struct *task = current;
60297 + struct acl_object_label *obj, *obj2;
60298 +
60299 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
60300 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
60301 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
60302 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
60303 + task->role->root_label);
60304 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
60305 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
60306 + return 1;
60307 + }
60308 + }
60309 + return 0;
60310 +}
60311 +
60312 +int
60313 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
60314 +{
60315 + __u32 mode;
60316 +
60317 + if (unlikely(!file || !(prot & PROT_EXEC)))
60318 + return 1;
60319 +
60320 + if (is_writable_mmap(file))
60321 + return 0;
60322 +
60323 + mode =
60324 + gr_search_file(file->f_path.dentry,
60325 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
60326 + file->f_path.mnt);
60327 +
60328 + if (!gr_tpe_allow(file))
60329 + return 0;
60330 +
60331 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
60332 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60333 + return 0;
60334 + } else if (unlikely(!(mode & GR_EXEC))) {
60335 + return 0;
60336 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
60337 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60338 + return 1;
60339 + }
60340 +
60341 + return 1;
60342 +}
60343 +
60344 +int
60345 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
60346 +{
60347 + __u32 mode;
60348 +
60349 + if (unlikely(!file || !(prot & PROT_EXEC)))
60350 + return 1;
60351 +
60352 + if (is_writable_mmap(file))
60353 + return 0;
60354 +
60355 + mode =
60356 + gr_search_file(file->f_path.dentry,
60357 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
60358 + file->f_path.mnt);
60359 +
60360 + if (!gr_tpe_allow(file))
60361 + return 0;
60362 +
60363 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
60364 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60365 + return 0;
60366 + } else if (unlikely(!(mode & GR_EXEC))) {
60367 + return 0;
60368 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
60369 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60370 + return 1;
60371 + }
60372 +
60373 + return 1;
60374 +}
60375 +
60376 +void
60377 +gr_acl_handle_psacct(struct task_struct *task, const long code)
60378 +{
60379 + unsigned long runtime;
60380 + unsigned long cputime;
60381 + unsigned int wday, cday;
60382 + __u8 whr, chr;
60383 + __u8 wmin, cmin;
60384 + __u8 wsec, csec;
60385 + struct timespec timeval;
60386 +
60387 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
60388 + !(task->acl->mode & GR_PROCACCT)))
60389 + return;
60390 +
60391 + do_posix_clock_monotonic_gettime(&timeval);
60392 + runtime = timeval.tv_sec - task->start_time.tv_sec;
60393 + wday = runtime / (3600 * 24);
60394 + runtime -= wday * (3600 * 24);
60395 + whr = runtime / 3600;
60396 + runtime -= whr * 3600;
60397 + wmin = runtime / 60;
60398 + runtime -= wmin * 60;
60399 + wsec = runtime;
60400 +
60401 + cputime = (task->utime + task->stime) / HZ;
60402 + cday = cputime / (3600 * 24);
60403 + cputime -= cday * (3600 * 24);
60404 + chr = cputime / 3600;
60405 + cputime -= chr * 3600;
60406 + cmin = cputime / 60;
60407 + cputime -= cmin * 60;
60408 + csec = cputime;
60409 +
60410 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
60411 +
60412 + return;
60413 +}
60414 +
60415 +void gr_set_kernel_label(struct task_struct *task)
60416 +{
60417 + if (gr_status & GR_READY) {
60418 + task->role = kernel_role;
60419 + task->acl = kernel_role->root_label;
60420 + }
60421 + return;
60422 +}
60423 +
60424 +#ifdef CONFIG_TASKSTATS
60425 +int gr_is_taskstats_denied(int pid)
60426 +{
60427 + struct task_struct *task;
60428 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60429 + const struct cred *cred;
60430 +#endif
60431 + int ret = 0;
60432 +
60433 + /* restrict taskstats viewing to un-chrooted root users
60434 + who have the 'view' subject flag if the RBAC system is enabled
60435 + */
60436 +
60437 + rcu_read_lock();
60438 + read_lock(&tasklist_lock);
60439 + task = find_task_by_vpid(pid);
60440 + if (task) {
60441 +#ifdef CONFIG_GRKERNSEC_CHROOT
60442 + if (proc_is_chrooted(task))
60443 + ret = -EACCES;
60444 +#endif
60445 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60446 + cred = __task_cred(task);
60447 +#ifdef CONFIG_GRKERNSEC_PROC_USER
60448 + if (cred->uid != 0)
60449 + ret = -EACCES;
60450 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60451 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
60452 + ret = -EACCES;
60453 +#endif
60454 +#endif
60455 + if (gr_status & GR_READY) {
60456 + if (!(task->acl->mode & GR_VIEW))
60457 + ret = -EACCES;
60458 + }
60459 + } else
60460 + ret = -ENOENT;
60461 +
60462 + read_unlock(&tasklist_lock);
60463 + rcu_read_unlock();
60464 +
60465 + return ret;
60466 +}
60467 +#endif
60468 +
60469 +/* AUXV entries are filled via a descendant of search_binary_handler
60470 + after we've already applied the subject for the target
60471 +*/
60472 +int gr_acl_enable_at_secure(void)
60473 +{
60474 + if (unlikely(!(gr_status & GR_READY)))
60475 + return 0;
60476 +
60477 + if (current->acl->mode & GR_ATSECURE)
60478 + return 1;
60479 +
60480 + return 0;
60481 +}
60482 +
60483 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
60484 +{
60485 + struct task_struct *task = current;
60486 + struct dentry *dentry = file->f_path.dentry;
60487 + struct vfsmount *mnt = file->f_path.mnt;
60488 + struct acl_object_label *obj, *tmp;
60489 + struct acl_subject_label *subj;
60490 + unsigned int bufsize;
60491 + int is_not_root;
60492 + char *path;
60493 + dev_t dev = __get_dev(dentry);
60494 +
60495 + if (unlikely(!(gr_status & GR_READY)))
60496 + return 1;
60497 +
60498 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
60499 + return 1;
60500 +
60501 + /* ignore Eric Biederman */
60502 + if (IS_PRIVATE(dentry->d_inode))
60503 + return 1;
60504 +
60505 + subj = task->acl;
60506 + do {
60507 + obj = lookup_acl_obj_label(ino, dev, subj);
60508 + if (obj != NULL)
60509 + return (obj->mode & GR_FIND) ? 1 : 0;
60510 + } while ((subj = subj->parent_subject));
60511 +
60512 + /* this is purely an optimization since we're looking for an object
60513 + for the directory we're doing a readdir on
60514 + if it's possible for any globbed object to match the entry we're
60515 + filling into the directory, then the object we find here will be
60516 + an anchor point with attached globbed objects
60517 + */
60518 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
60519 + if (obj->globbed == NULL)
60520 + return (obj->mode & GR_FIND) ? 1 : 0;
60521 +
60522 + is_not_root = ((obj->filename[0] == '/') &&
60523 + (obj->filename[1] == '\0')) ? 0 : 1;
60524 + bufsize = PAGE_SIZE - namelen - is_not_root;
60525 +
60526 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
60527 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
60528 + return 1;
60529 +
60530 + preempt_disable();
60531 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
60532 + bufsize);
60533 +
60534 + bufsize = strlen(path);
60535 +
60536 + /* if base is "/", don't append an additional slash */
60537 + if (is_not_root)
60538 + *(path + bufsize) = '/';
60539 + memcpy(path + bufsize + is_not_root, name, namelen);
60540 + *(path + bufsize + namelen + is_not_root) = '\0';
60541 +
60542 + tmp = obj->globbed;
60543 + while (tmp) {
60544 + if (!glob_match(tmp->filename, path)) {
60545 + preempt_enable();
60546 + return (tmp->mode & GR_FIND) ? 1 : 0;
60547 + }
60548 + tmp = tmp->next;
60549 + }
60550 + preempt_enable();
60551 + return (obj->mode & GR_FIND) ? 1 : 0;
60552 +}
60553 +
60554 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
60555 +EXPORT_SYMBOL(gr_acl_is_enabled);
60556 +#endif
60557 +EXPORT_SYMBOL(gr_learn_resource);
60558 +EXPORT_SYMBOL(gr_set_kernel_label);
60559 +#ifdef CONFIG_SECURITY
60560 +EXPORT_SYMBOL(gr_check_user_change);
60561 +EXPORT_SYMBOL(gr_check_group_change);
60562 +#endif
60563 +
60564 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
60565 new file mode 100644
60566 index 0000000..34fefda
60567 --- /dev/null
60568 +++ b/grsecurity/gracl_alloc.c
60569 @@ -0,0 +1,105 @@
60570 +#include <linux/kernel.h>
60571 +#include <linux/mm.h>
60572 +#include <linux/slab.h>
60573 +#include <linux/vmalloc.h>
60574 +#include <linux/gracl.h>
60575 +#include <linux/grsecurity.h>
60576 +
60577 +static unsigned long alloc_stack_next = 1;
60578 +static unsigned long alloc_stack_size = 1;
60579 +static void **alloc_stack;
60580 +
60581 +static __inline__ int
60582 +alloc_pop(void)
60583 +{
60584 + if (alloc_stack_next == 1)
60585 + return 0;
60586 +
60587 + kfree(alloc_stack[alloc_stack_next - 2]);
60588 +
60589 + alloc_stack_next--;
60590 +
60591 + return 1;
60592 +}
60593 +
60594 +static __inline__ int
60595 +alloc_push(void *buf)
60596 +{
60597 + if (alloc_stack_next >= alloc_stack_size)
60598 + return 1;
60599 +
60600 + alloc_stack[alloc_stack_next - 1] = buf;
60601 +
60602 + alloc_stack_next++;
60603 +
60604 + return 0;
60605 +}
60606 +
60607 +void *
60608 +acl_alloc(unsigned long len)
60609 +{
60610 + void *ret = NULL;
60611 +
60612 + if (!len || len > PAGE_SIZE)
60613 + goto out;
60614 +
60615 + ret = kmalloc(len, GFP_KERNEL);
60616 +
60617 + if (ret) {
60618 + if (alloc_push(ret)) {
60619 + kfree(ret);
60620 + ret = NULL;
60621 + }
60622 + }
60623 +
60624 +out:
60625 + return ret;
60626 +}
60627 +
60628 +void *
60629 +acl_alloc_num(unsigned long num, unsigned long len)
60630 +{
60631 + if (!len || (num > (PAGE_SIZE / len)))
60632 + return NULL;
60633 +
60634 + return acl_alloc(num * len);
60635 +}
60636 +
60637 +void
60638 +acl_free_all(void)
60639 +{
60640 + if (gr_acl_is_enabled() || !alloc_stack)
60641 + return;
60642 +
60643 + while (alloc_pop()) ;
60644 +
60645 + if (alloc_stack) {
60646 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
60647 + kfree(alloc_stack);
60648 + else
60649 + vfree(alloc_stack);
60650 + }
60651 +
60652 + alloc_stack = NULL;
60653 + alloc_stack_size = 1;
60654 + alloc_stack_next = 1;
60655 +
60656 + return;
60657 +}
60658 +
60659 +int
60660 +acl_alloc_stack_init(unsigned long size)
60661 +{
60662 + if ((size * sizeof (void *)) <= PAGE_SIZE)
60663 + alloc_stack =
60664 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
60665 + else
60666 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
60667 +
60668 + alloc_stack_size = size;
60669 +
60670 + if (!alloc_stack)
60671 + return 0;
60672 + else
60673 + return 1;
60674 +}
60675 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
60676 new file mode 100644
60677 index 0000000..955ddfb
60678 --- /dev/null
60679 +++ b/grsecurity/gracl_cap.c
60680 @@ -0,0 +1,101 @@
60681 +#include <linux/kernel.h>
60682 +#include <linux/module.h>
60683 +#include <linux/sched.h>
60684 +#include <linux/gracl.h>
60685 +#include <linux/grsecurity.h>
60686 +#include <linux/grinternal.h>
60687 +
60688 +extern const char *captab_log[];
60689 +extern int captab_log_entries;
60690 +
60691 +int
60692 +gr_acl_is_capable(const int cap)
60693 +{
60694 + struct task_struct *task = current;
60695 + const struct cred *cred = current_cred();
60696 + struct acl_subject_label *curracl;
60697 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
60698 + kernel_cap_t cap_audit = __cap_empty_set;
60699 +
60700 + if (!gr_acl_is_enabled())
60701 + return 1;
60702 +
60703 + curracl = task->acl;
60704 +
60705 + cap_drop = curracl->cap_lower;
60706 + cap_mask = curracl->cap_mask;
60707 + cap_audit = curracl->cap_invert_audit;
60708 +
60709 + while ((curracl = curracl->parent_subject)) {
60710 + /* if the cap isn't specified in the current computed mask but is specified in the
60711 + current level subject, and is lowered in the current level subject, then add
60712 + it to the set of dropped capabilities
60713 + otherwise, add the current level subject's mask to the current computed mask
60714 + */
60715 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
60716 + cap_raise(cap_mask, cap);
60717 + if (cap_raised(curracl->cap_lower, cap))
60718 + cap_raise(cap_drop, cap);
60719 + if (cap_raised(curracl->cap_invert_audit, cap))
60720 + cap_raise(cap_audit, cap);
60721 + }
60722 + }
60723 +
60724 + if (!cap_raised(cap_drop, cap)) {
60725 + if (cap_raised(cap_audit, cap))
60726 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
60727 + return 1;
60728 + }
60729 +
60730 + curracl = task->acl;
60731 +
60732 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
60733 + && cap_raised(cred->cap_effective, cap)) {
60734 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
60735 + task->role->roletype, cred->uid,
60736 + cred->gid, task->exec_file ?
60737 + gr_to_filename(task->exec_file->f_path.dentry,
60738 + task->exec_file->f_path.mnt) : curracl->filename,
60739 + curracl->filename, 0UL,
60740 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
60741 + return 1;
60742 + }
60743 +
60744 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
60745 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
60746 + return 0;
60747 +}
60748 +
60749 +int
60750 +gr_acl_is_capable_nolog(const int cap)
60751 +{
60752 + struct acl_subject_label *curracl;
60753 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
60754 +
60755 + if (!gr_acl_is_enabled())
60756 + return 1;
60757 +
60758 + curracl = current->acl;
60759 +
60760 + cap_drop = curracl->cap_lower;
60761 + cap_mask = curracl->cap_mask;
60762 +
60763 + while ((curracl = curracl->parent_subject)) {
60764 + /* if the cap isn't specified in the current computed mask but is specified in the
60765 + current level subject, and is lowered in the current level subject, then add
60766 + it to the set of dropped capabilities
60767 + otherwise, add the current level subject's mask to the current computed mask
60768 + */
60769 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
60770 + cap_raise(cap_mask, cap);
60771 + if (cap_raised(curracl->cap_lower, cap))
60772 + cap_raise(cap_drop, cap);
60773 + }
60774 + }
60775 +
60776 + if (!cap_raised(cap_drop, cap))
60777 + return 1;
60778 +
60779 + return 0;
60780 +}
60781 +
60782 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
60783 new file mode 100644
60784 index 0000000..523e7e8
60785 --- /dev/null
60786 +++ b/grsecurity/gracl_fs.c
60787 @@ -0,0 +1,435 @@
60788 +#include <linux/kernel.h>
60789 +#include <linux/sched.h>
60790 +#include <linux/types.h>
60791 +#include <linux/fs.h>
60792 +#include <linux/file.h>
60793 +#include <linux/stat.h>
60794 +#include <linux/grsecurity.h>
60795 +#include <linux/grinternal.h>
60796 +#include <linux/gracl.h>
60797 +
60798 +umode_t
60799 +gr_acl_umask(void)
60800 +{
60801 + if (unlikely(!gr_acl_is_enabled()))
60802 + return 0;
60803 +
60804 + return current->role->umask;
60805 +}
60806 +
60807 +__u32
60808 +gr_acl_handle_hidden_file(const struct dentry * dentry,
60809 + const struct vfsmount * mnt)
60810 +{
60811 + __u32 mode;
60812 +
60813 + if (unlikely(!dentry->d_inode))
60814 + return GR_FIND;
60815 +
60816 + mode =
60817 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
60818 +
60819 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
60820 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
60821 + return mode;
60822 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
60823 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
60824 + return 0;
60825 + } else if (unlikely(!(mode & GR_FIND)))
60826 + return 0;
60827 +
60828 + return GR_FIND;
60829 +}
60830 +
60831 +__u32
60832 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
60833 + int acc_mode)
60834 +{
60835 + __u32 reqmode = GR_FIND;
60836 + __u32 mode;
60837 +
60838 + if (unlikely(!dentry->d_inode))
60839 + return reqmode;
60840 +
60841 + if (acc_mode & MAY_APPEND)
60842 + reqmode |= GR_APPEND;
60843 + else if (acc_mode & MAY_WRITE)
60844 + reqmode |= GR_WRITE;
60845 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
60846 + reqmode |= GR_READ;
60847 +
60848 + mode =
60849 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
60850 + mnt);
60851 +
60852 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
60853 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
60854 + reqmode & GR_READ ? " reading" : "",
60855 + reqmode & GR_WRITE ? " writing" : reqmode &
60856 + GR_APPEND ? " appending" : "");
60857 + return reqmode;
60858 + } else
60859 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
60860 + {
60861 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
60862 + reqmode & GR_READ ? " reading" : "",
60863 + reqmode & GR_WRITE ? " writing" : reqmode &
60864 + GR_APPEND ? " appending" : "");
60865 + return 0;
60866 + } else if (unlikely((mode & reqmode) != reqmode))
60867 + return 0;
60868 +
60869 + return reqmode;
60870 +}
60871 +
60872 +__u32
60873 +gr_acl_handle_creat(const struct dentry * dentry,
60874 + const struct dentry * p_dentry,
60875 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
60876 + const int imode)
60877 +{
60878 + __u32 reqmode = GR_WRITE | GR_CREATE;
60879 + __u32 mode;
60880 +
60881 + if (acc_mode & MAY_APPEND)
60882 + reqmode |= GR_APPEND;
60883 + // if a directory was required or the directory already exists, then
60884 + // don't count this open as a read
60885 + if ((acc_mode & MAY_READ) &&
60886 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
60887 + reqmode |= GR_READ;
60888 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
60889 + reqmode |= GR_SETID;
60890 +
60891 + mode =
60892 + gr_check_create(dentry, p_dentry, p_mnt,
60893 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
60894 +
60895 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
60896 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
60897 + reqmode & GR_READ ? " reading" : "",
60898 + reqmode & GR_WRITE ? " writing" : reqmode &
60899 + GR_APPEND ? " appending" : "");
60900 + return reqmode;
60901 + } else
60902 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
60903 + {
60904 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
60905 + reqmode & GR_READ ? " reading" : "",
60906 + reqmode & GR_WRITE ? " writing" : reqmode &
60907 + GR_APPEND ? " appending" : "");
60908 + return 0;
60909 + } else if (unlikely((mode & reqmode) != reqmode))
60910 + return 0;
60911 +
60912 + return reqmode;
60913 +}
60914 +
60915 +__u32
60916 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
60917 + const int fmode)
60918 +{
60919 + __u32 mode, reqmode = GR_FIND;
60920 +
60921 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
60922 + reqmode |= GR_EXEC;
60923 + if (fmode & S_IWOTH)
60924 + reqmode |= GR_WRITE;
60925 + if (fmode & S_IROTH)
60926 + reqmode |= GR_READ;
60927 +
60928 + mode =
60929 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
60930 + mnt);
60931 +
60932 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
60933 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
60934 + reqmode & GR_READ ? " reading" : "",
60935 + reqmode & GR_WRITE ? " writing" : "",
60936 + reqmode & GR_EXEC ? " executing" : "");
60937 + return reqmode;
60938 + } else
60939 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
60940 + {
60941 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
60942 + reqmode & GR_READ ? " reading" : "",
60943 + reqmode & GR_WRITE ? " writing" : "",
60944 + reqmode & GR_EXEC ? " executing" : "");
60945 + return 0;
60946 + } else if (unlikely((mode & reqmode) != reqmode))
60947 + return 0;
60948 +
60949 + return reqmode;
60950 +}
60951 +
60952 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
60953 +{
60954 + __u32 mode;
60955 +
60956 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
60957 +
60958 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
60959 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
60960 + return mode;
60961 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
60962 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
60963 + return 0;
60964 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
60965 + return 0;
60966 +
60967 + return (reqmode);
60968 +}
60969 +
60970 +__u32
60971 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
60972 +{
60973 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
60974 +}
60975 +
60976 +__u32
60977 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
60978 +{
60979 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
60980 +}
60981 +
60982 +__u32
60983 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
60984 +{
60985 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
60986 +}
60987 +
60988 +__u32
60989 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
60990 +{
60991 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
60992 +}
60993 +
60994 +__u32
60995 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
60996 + umode_t *modeptr)
60997 +{
60998 + mode_t mode;
60999 +
61000 + *modeptr &= ~(mode_t)gr_acl_umask();
61001 + mode = *modeptr;
61002 +
61003 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
61004 + return 1;
61005 +
61006 + if (unlikely(mode & (S_ISUID | S_ISGID))) {
61007 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
61008 + GR_CHMOD_ACL_MSG);
61009 + } else {
61010 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
61011 + }
61012 +}
61013 +
61014 +__u32
61015 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
61016 +{
61017 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
61018 +}
61019 +
61020 +__u32
61021 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
61022 +{
61023 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
61024 +}
61025 +
61026 +__u32
61027 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
61028 +{
61029 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
61030 +}
61031 +
61032 +__u32
61033 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
61034 +{
61035 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
61036 + GR_UNIXCONNECT_ACL_MSG);
61037 +}
61038 +
61039 +/* hardlinks require at minimum create and link permission,
61040 + any additional privilege required is based on the
61041 + privilege of the file being linked to
61042 +*/
61043 +__u32
61044 +gr_acl_handle_link(const struct dentry * new_dentry,
61045 + const struct dentry * parent_dentry,
61046 + const struct vfsmount * parent_mnt,
61047 + const struct dentry * old_dentry,
61048 + const struct vfsmount * old_mnt, const char *to)
61049 +{
61050 + __u32 mode;
61051 + __u32 needmode = GR_CREATE | GR_LINK;
61052 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
61053 +
61054 + mode =
61055 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
61056 + old_mnt);
61057 +
61058 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
61059 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
61060 + return mode;
61061 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
61062 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
61063 + return 0;
61064 + } else if (unlikely((mode & needmode) != needmode))
61065 + return 0;
61066 +
61067 + return 1;
61068 +}
61069 +
61070 +__u32
61071 +gr_acl_handle_symlink(const struct dentry * new_dentry,
61072 + const struct dentry * parent_dentry,
61073 + const struct vfsmount * parent_mnt, const char *from)
61074 +{
61075 + __u32 needmode = GR_WRITE | GR_CREATE;
61076 + __u32 mode;
61077 +
61078 + mode =
61079 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
61080 + GR_CREATE | GR_AUDIT_CREATE |
61081 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
61082 +
61083 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
61084 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
61085 + return mode;
61086 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
61087 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
61088 + return 0;
61089 + } else if (unlikely((mode & needmode) != needmode))
61090 + return 0;
61091 +
61092 + return (GR_WRITE | GR_CREATE);
61093 +}
61094 +
61095 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
61096 +{
61097 + __u32 mode;
61098 +
61099 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
61100 +
61101 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
61102 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
61103 + return mode;
61104 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
61105 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
61106 + return 0;
61107 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
61108 + return 0;
61109 +
61110 + return (reqmode);
61111 +}
61112 +
61113 +__u32
61114 +gr_acl_handle_mknod(const struct dentry * new_dentry,
61115 + const struct dentry * parent_dentry,
61116 + const struct vfsmount * parent_mnt,
61117 + const int mode)
61118 +{
61119 + __u32 reqmode = GR_WRITE | GR_CREATE;
61120 + if (unlikely(mode & (S_ISUID | S_ISGID)))
61121 + reqmode |= GR_SETID;
61122 +
61123 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
61124 + reqmode, GR_MKNOD_ACL_MSG);
61125 +}
61126 +
61127 +__u32
61128 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
61129 + const struct dentry *parent_dentry,
61130 + const struct vfsmount *parent_mnt)
61131 +{
61132 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
61133 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
61134 +}
61135 +
61136 +#define RENAME_CHECK_SUCCESS(old, new) \
61137 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
61138 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
61139 +
61140 +int
61141 +gr_acl_handle_rename(struct dentry *new_dentry,
61142 + struct dentry *parent_dentry,
61143 + const struct vfsmount *parent_mnt,
61144 + struct dentry *old_dentry,
61145 + struct inode *old_parent_inode,
61146 + struct vfsmount *old_mnt, const char *newname)
61147 +{
61148 + __u32 comp1, comp2;
61149 + int error = 0;
61150 +
61151 + if (unlikely(!gr_acl_is_enabled()))
61152 + return 0;
61153 +
61154 + if (!new_dentry->d_inode) {
61155 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
61156 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
61157 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
61158 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
61159 + GR_DELETE | GR_AUDIT_DELETE |
61160 + GR_AUDIT_READ | GR_AUDIT_WRITE |
61161 + GR_SUPPRESS, old_mnt);
61162 + } else {
61163 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
61164 + GR_CREATE | GR_DELETE |
61165 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
61166 + GR_AUDIT_READ | GR_AUDIT_WRITE |
61167 + GR_SUPPRESS, parent_mnt);
61168 + comp2 =
61169 + gr_search_file(old_dentry,
61170 + GR_READ | GR_WRITE | GR_AUDIT_READ |
61171 + GR_DELETE | GR_AUDIT_DELETE |
61172 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
61173 + }
61174 +
61175 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
61176 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
61177 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
61178 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
61179 + && !(comp2 & GR_SUPPRESS)) {
61180 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
61181 + error = -EACCES;
61182 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
61183 + error = -EACCES;
61184 +
61185 + return error;
61186 +}
61187 +
61188 +void
61189 +gr_acl_handle_exit(void)
61190 +{
61191 + u16 id;
61192 + char *rolename;
61193 + struct file *exec_file;
61194 +
61195 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
61196 + !(current->role->roletype & GR_ROLE_PERSIST))) {
61197 + id = current->acl_role_id;
61198 + rolename = current->role->rolename;
61199 + gr_set_acls(1);
61200 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
61201 + }
61202 +
61203 + write_lock(&grsec_exec_file_lock);
61204 + exec_file = current->exec_file;
61205 + current->exec_file = NULL;
61206 + write_unlock(&grsec_exec_file_lock);
61207 +
61208 + if (exec_file)
61209 + fput(exec_file);
61210 +}
61211 +
61212 +int
61213 +gr_acl_handle_procpidmem(const struct task_struct *task)
61214 +{
61215 + if (unlikely(!gr_acl_is_enabled()))
61216 + return 0;
61217 +
61218 + if (task != current && task->acl->mode & GR_PROTPROCFD)
61219 + return -EACCES;
61220 +
61221 + return 0;
61222 +}
61223 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
61224 new file mode 100644
61225 index 0000000..cd07b96
61226 --- /dev/null
61227 +++ b/grsecurity/gracl_ip.c
61228 @@ -0,0 +1,382 @@
61229 +#include <linux/kernel.h>
61230 +#include <asm/uaccess.h>
61231 +#include <asm/errno.h>
61232 +#include <net/sock.h>
61233 +#include <linux/file.h>
61234 +#include <linux/fs.h>
61235 +#include <linux/net.h>
61236 +#include <linux/in.h>
61237 +#include <linux/skbuff.h>
61238 +#include <linux/ip.h>
61239 +#include <linux/udp.h>
61240 +#include <linux/smp_lock.h>
61241 +#include <linux/types.h>
61242 +#include <linux/sched.h>
61243 +#include <linux/netdevice.h>
61244 +#include <linux/inetdevice.h>
61245 +#include <linux/gracl.h>
61246 +#include <linux/grsecurity.h>
61247 +#include <linux/grinternal.h>
61248 +
61249 +#define GR_BIND 0x01
61250 +#define GR_CONNECT 0x02
61251 +#define GR_INVERT 0x04
61252 +#define GR_BINDOVERRIDE 0x08
61253 +#define GR_CONNECTOVERRIDE 0x10
61254 +#define GR_SOCK_FAMILY 0x20
61255 +
61256 +static const char * gr_protocols[IPPROTO_MAX] = {
61257 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
61258 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
61259 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
61260 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
61261 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
61262 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
61263 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
61264 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
61265 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
61266 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
61267 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
61268 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
61269 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
61270 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
61271 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
61272 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
61273 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
61274 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
61275 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
61276 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
61277 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
61278 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
61279 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
61280 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
61281 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
61282 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
61283 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
61284 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
61285 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
61286 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
61287 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
61288 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
61289 + };
61290 +
61291 +static const char * gr_socktypes[SOCK_MAX] = {
61292 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
61293 + "unknown:7", "unknown:8", "unknown:9", "packet"
61294 + };
61295 +
61296 +static const char * gr_sockfamilies[AF_MAX+1] = {
61297 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
61298 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
61299 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
61300 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
61301 + };
61302 +
61303 +const char *
61304 +gr_proto_to_name(unsigned char proto)
61305 +{
61306 + return gr_protocols[proto];
61307 +}
61308 +
61309 +const char *
61310 +gr_socktype_to_name(unsigned char type)
61311 +{
61312 + return gr_socktypes[type];
61313 +}
61314 +
61315 +const char *
61316 +gr_sockfamily_to_name(unsigned char family)
61317 +{
61318 + return gr_sockfamilies[family];
61319 +}
61320 +
61321 +int
61322 +gr_search_socket(const int domain, const int type, const int protocol)
61323 +{
61324 + struct acl_subject_label *curr;
61325 + const struct cred *cred = current_cred();
61326 +
61327 + if (unlikely(!gr_acl_is_enabled()))
61328 + goto exit;
61329 +
61330 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
61331 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
61332 + goto exit; // let the kernel handle it
61333 +
61334 + curr = current->acl;
61335 +
61336 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
61337 + /* the family is allowed, if this is PF_INET allow it only if
61338 + the extra sock type/protocol checks pass */
61339 + if (domain == PF_INET)
61340 + goto inet_check;
61341 + goto exit;
61342 + } else {
61343 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61344 + __u32 fakeip = 0;
61345 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61346 + current->role->roletype, cred->uid,
61347 + cred->gid, current->exec_file ?
61348 + gr_to_filename(current->exec_file->f_path.dentry,
61349 + current->exec_file->f_path.mnt) :
61350 + curr->filename, curr->filename,
61351 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
61352 + &current->signal->saved_ip);
61353 + goto exit;
61354 + }
61355 + goto exit_fail;
61356 + }
61357 +
61358 +inet_check:
61359 + /* the rest of this checking is for IPv4 only */
61360 + if (!curr->ips)
61361 + goto exit;
61362 +
61363 + if ((curr->ip_type & (1 << type)) &&
61364 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
61365 + goto exit;
61366 +
61367 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61368 + /* we don't place acls on raw sockets , and sometimes
61369 + dgram/ip sockets are opened for ioctl and not
61370 + bind/connect, so we'll fake a bind learn log */
61371 + if (type == SOCK_RAW || type == SOCK_PACKET) {
61372 + __u32 fakeip = 0;
61373 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61374 + current->role->roletype, cred->uid,
61375 + cred->gid, current->exec_file ?
61376 + gr_to_filename(current->exec_file->f_path.dentry,
61377 + current->exec_file->f_path.mnt) :
61378 + curr->filename, curr->filename,
61379 + &fakeip, 0, type,
61380 + protocol, GR_CONNECT, &current->signal->saved_ip);
61381 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
61382 + __u32 fakeip = 0;
61383 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61384 + current->role->roletype, cred->uid,
61385 + cred->gid, current->exec_file ?
61386 + gr_to_filename(current->exec_file->f_path.dentry,
61387 + current->exec_file->f_path.mnt) :
61388 + curr->filename, curr->filename,
61389 + &fakeip, 0, type,
61390 + protocol, GR_BIND, &current->signal->saved_ip);
61391 + }
61392 + /* we'll log when they use connect or bind */
61393 + goto exit;
61394 + }
61395 +
61396 +exit_fail:
61397 + if (domain == PF_INET)
61398 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
61399 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
61400 + else
61401 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
61402 + gr_socktype_to_name(type), protocol);
61403 +
61404 + return 0;
61405 +exit:
61406 + return 1;
61407 +}
61408 +
61409 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
61410 +{
61411 + if ((ip->mode & mode) &&
61412 + (ip_port >= ip->low) &&
61413 + (ip_port <= ip->high) &&
61414 + ((ntohl(ip_addr) & our_netmask) ==
61415 + (ntohl(our_addr) & our_netmask))
61416 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
61417 + && (ip->type & (1 << type))) {
61418 + if (ip->mode & GR_INVERT)
61419 + return 2; // specifically denied
61420 + else
61421 + return 1; // allowed
61422 + }
61423 +
61424 + return 0; // not specifically allowed, may continue parsing
61425 +}
61426 +
61427 +static int
61428 +gr_search_connectbind(const int full_mode, struct sock *sk,
61429 + struct sockaddr_in *addr, const int type)
61430 +{
61431 + char iface[IFNAMSIZ] = {0};
61432 + struct acl_subject_label *curr;
61433 + struct acl_ip_label *ip;
61434 + struct inet_sock *isk;
61435 + struct net_device *dev;
61436 + struct in_device *idev;
61437 + unsigned long i;
61438 + int ret;
61439 + int mode = full_mode & (GR_BIND | GR_CONNECT);
61440 + __u32 ip_addr = 0;
61441 + __u32 our_addr;
61442 + __u32 our_netmask;
61443 + char *p;
61444 + __u16 ip_port = 0;
61445 + const struct cred *cred = current_cred();
61446 +
61447 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
61448 + return 0;
61449 +
61450 + curr = current->acl;
61451 + isk = inet_sk(sk);
61452 +
61453 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
61454 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
61455 + addr->sin_addr.s_addr = curr->inaddr_any_override;
61456 + if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
61457 + struct sockaddr_in saddr;
61458 + int err;
61459 +
61460 + saddr.sin_family = AF_INET;
61461 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
61462 + saddr.sin_port = isk->sport;
61463 +
61464 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
61465 + if (err)
61466 + return err;
61467 +
61468 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
61469 + if (err)
61470 + return err;
61471 + }
61472 +
61473 + if (!curr->ips)
61474 + return 0;
61475 +
61476 + ip_addr = addr->sin_addr.s_addr;
61477 + ip_port = ntohs(addr->sin_port);
61478 +
61479 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61480 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61481 + current->role->roletype, cred->uid,
61482 + cred->gid, current->exec_file ?
61483 + gr_to_filename(current->exec_file->f_path.dentry,
61484 + current->exec_file->f_path.mnt) :
61485 + curr->filename, curr->filename,
61486 + &ip_addr, ip_port, type,
61487 + sk->sk_protocol, mode, &current->signal->saved_ip);
61488 + return 0;
61489 + }
61490 +
61491 + for (i = 0; i < curr->ip_num; i++) {
61492 + ip = *(curr->ips + i);
61493 + if (ip->iface != NULL) {
61494 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
61495 + p = strchr(iface, ':');
61496 + if (p != NULL)
61497 + *p = '\0';
61498 + dev = dev_get_by_name(sock_net(sk), iface);
61499 + if (dev == NULL)
61500 + continue;
61501 + idev = in_dev_get(dev);
61502 + if (idev == NULL) {
61503 + dev_put(dev);
61504 + continue;
61505 + }
61506 + rcu_read_lock();
61507 + for_ifa(idev) {
61508 + if (!strcmp(ip->iface, ifa->ifa_label)) {
61509 + our_addr = ifa->ifa_address;
61510 + our_netmask = 0xffffffff;
61511 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
61512 + if (ret == 1) {
61513 + rcu_read_unlock();
61514 + in_dev_put(idev);
61515 + dev_put(dev);
61516 + return 0;
61517 + } else if (ret == 2) {
61518 + rcu_read_unlock();
61519 + in_dev_put(idev);
61520 + dev_put(dev);
61521 + goto denied;
61522 + }
61523 + }
61524 + } endfor_ifa(idev);
61525 + rcu_read_unlock();
61526 + in_dev_put(idev);
61527 + dev_put(dev);
61528 + } else {
61529 + our_addr = ip->addr;
61530 + our_netmask = ip->netmask;
61531 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
61532 + if (ret == 1)
61533 + return 0;
61534 + else if (ret == 2)
61535 + goto denied;
61536 + }
61537 + }
61538 +
61539 +denied:
61540 + if (mode == GR_BIND)
61541 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
61542 + else if (mode == GR_CONNECT)
61543 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
61544 +
61545 + return -EACCES;
61546 +}
61547 +
61548 +int
61549 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
61550 +{
61551 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
61552 +}
61553 +
61554 +int
61555 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
61556 +{
61557 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
61558 +}
61559 +
61560 +int gr_search_listen(struct socket *sock)
61561 +{
61562 + struct sock *sk = sock->sk;
61563 + struct sockaddr_in addr;
61564 +
61565 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
61566 + addr.sin_port = inet_sk(sk)->sport;
61567 +
61568 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
61569 +}
61570 +
61571 +int gr_search_accept(struct socket *sock)
61572 +{
61573 + struct sock *sk = sock->sk;
61574 + struct sockaddr_in addr;
61575 +
61576 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
61577 + addr.sin_port = inet_sk(sk)->sport;
61578 +
61579 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
61580 +}
61581 +
61582 +int
61583 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
61584 +{
61585 + if (addr)
61586 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
61587 + else {
61588 + struct sockaddr_in sin;
61589 + const struct inet_sock *inet = inet_sk(sk);
61590 +
61591 + sin.sin_addr.s_addr = inet->daddr;
61592 + sin.sin_port = inet->dport;
61593 +
61594 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
61595 + }
61596 +}
61597 +
61598 +int
61599 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
61600 +{
61601 + struct sockaddr_in sin;
61602 +
61603 + if (unlikely(skb->len < sizeof (struct udphdr)))
61604 + return 0; // skip this packet
61605 +
61606 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
61607 + sin.sin_port = udp_hdr(skb)->source;
61608 +
61609 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
61610 +}
61611 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
61612 new file mode 100644
61613 index 0000000..34bdd46
61614 --- /dev/null
61615 +++ b/grsecurity/gracl_learn.c
61616 @@ -0,0 +1,208 @@
61617 +#include <linux/kernel.h>
61618 +#include <linux/mm.h>
61619 +#include <linux/sched.h>
61620 +#include <linux/poll.h>
61621 +#include <linux/smp_lock.h>
61622 +#include <linux/string.h>
61623 +#include <linux/file.h>
61624 +#include <linux/types.h>
61625 +#include <linux/vmalloc.h>
61626 +#include <linux/grinternal.h>
61627 +
61628 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
61629 + size_t count, loff_t *ppos);
61630 +extern int gr_acl_is_enabled(void);
61631 +
61632 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
61633 +static int gr_learn_attached;
61634 +
61635 +/* use a 512k buffer */
61636 +#define LEARN_BUFFER_SIZE (512 * 1024)
61637 +
61638 +static DEFINE_SPINLOCK(gr_learn_lock);
61639 +static DEFINE_MUTEX(gr_learn_user_mutex);
61640 +
61641 +/* we need to maintain two buffers, so that the kernel context of grlearn
61642 + uses a semaphore around the userspace copying, and the other kernel contexts
61643 + use a spinlock when copying into the buffer, since they cannot sleep
61644 +*/
61645 +static char *learn_buffer;
61646 +static char *learn_buffer_user;
61647 +static int learn_buffer_len;
61648 +static int learn_buffer_user_len;
61649 +
61650 +static ssize_t
61651 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
61652 +{
61653 + DECLARE_WAITQUEUE(wait, current);
61654 + ssize_t retval = 0;
61655 +
61656 + add_wait_queue(&learn_wait, &wait);
61657 + set_current_state(TASK_INTERRUPTIBLE);
61658 + do {
61659 + mutex_lock(&gr_learn_user_mutex);
61660 + spin_lock(&gr_learn_lock);
61661 + if (learn_buffer_len)
61662 + break;
61663 + spin_unlock(&gr_learn_lock);
61664 + mutex_unlock(&gr_learn_user_mutex);
61665 + if (file->f_flags & O_NONBLOCK) {
61666 + retval = -EAGAIN;
61667 + goto out;
61668 + }
61669 + if (signal_pending(current)) {
61670 + retval = -ERESTARTSYS;
61671 + goto out;
61672 + }
61673 +
61674 + schedule();
61675 + } while (1);
61676 +
61677 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
61678 + learn_buffer_user_len = learn_buffer_len;
61679 + retval = learn_buffer_len;
61680 + learn_buffer_len = 0;
61681 +
61682 + spin_unlock(&gr_learn_lock);
61683 +
61684 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
61685 + retval = -EFAULT;
61686 +
61687 + mutex_unlock(&gr_learn_user_mutex);
61688 +out:
61689 + set_current_state(TASK_RUNNING);
61690 + remove_wait_queue(&learn_wait, &wait);
61691 + return retval;
61692 +}
61693 +
61694 +static unsigned int
61695 +poll_learn(struct file * file, poll_table * wait)
61696 +{
61697 + poll_wait(file, &learn_wait, wait);
61698 +
61699 + if (learn_buffer_len)
61700 + return (POLLIN | POLLRDNORM);
61701 +
61702 + return 0;
61703 +}
61704 +
61705 +void
61706 +gr_clear_learn_entries(void)
61707 +{
61708 + char *tmp;
61709 +
61710 + mutex_lock(&gr_learn_user_mutex);
61711 + spin_lock(&gr_learn_lock);
61712 + tmp = learn_buffer;
61713 + learn_buffer = NULL;
61714 + spin_unlock(&gr_learn_lock);
61715 + if (tmp)
61716 + vfree(tmp);
61717 + if (learn_buffer_user != NULL) {
61718 + vfree(learn_buffer_user);
61719 + learn_buffer_user = NULL;
61720 + }
61721 + learn_buffer_len = 0;
61722 + mutex_unlock(&gr_learn_user_mutex);
61723 +
61724 + return;
61725 +}
61726 +
61727 +void
61728 +gr_add_learn_entry(const char *fmt, ...)
61729 +{
61730 + va_list args;
61731 + unsigned int len;
61732 +
61733 + if (!gr_learn_attached)
61734 + return;
61735 +
61736 + spin_lock(&gr_learn_lock);
61737 +
61738 + /* leave a gap at the end so we know when it's "full" but don't have to
61739 + compute the exact length of the string we're trying to append
61740 + */
61741 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
61742 + spin_unlock(&gr_learn_lock);
61743 + wake_up_interruptible(&learn_wait);
61744 + return;
61745 + }
61746 + if (learn_buffer == NULL) {
61747 + spin_unlock(&gr_learn_lock);
61748 + return;
61749 + }
61750 +
61751 + va_start(args, fmt);
61752 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
61753 + va_end(args);
61754 +
61755 + learn_buffer_len += len + 1;
61756 +
61757 + spin_unlock(&gr_learn_lock);
61758 + wake_up_interruptible(&learn_wait);
61759 +
61760 + return;
61761 +}
61762 +
61763 +static int
61764 +open_learn(struct inode *inode, struct file *file)
61765 +{
61766 + if (file->f_mode & FMODE_READ && gr_learn_attached)
61767 + return -EBUSY;
61768 + if (file->f_mode & FMODE_READ) {
61769 + int retval = 0;
61770 + mutex_lock(&gr_learn_user_mutex);
61771 + if (learn_buffer == NULL)
61772 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
61773 + if (learn_buffer_user == NULL)
61774 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
61775 + if (learn_buffer == NULL) {
61776 + retval = -ENOMEM;
61777 + goto out_error;
61778 + }
61779 + if (learn_buffer_user == NULL) {
61780 + retval = -ENOMEM;
61781 + goto out_error;
61782 + }
61783 + learn_buffer_len = 0;
61784 + learn_buffer_user_len = 0;
61785 + gr_learn_attached = 1;
61786 +out_error:
61787 + mutex_unlock(&gr_learn_user_mutex);
61788 + return retval;
61789 + }
61790 + return 0;
61791 +}
61792 +
61793 +static int
61794 +close_learn(struct inode *inode, struct file *file)
61795 +{
61796 + if (file->f_mode & FMODE_READ) {
61797 + char *tmp = NULL;
61798 + mutex_lock(&gr_learn_user_mutex);
61799 + spin_lock(&gr_learn_lock);
61800 + tmp = learn_buffer;
61801 + learn_buffer = NULL;
61802 + spin_unlock(&gr_learn_lock);
61803 + if (tmp)
61804 + vfree(tmp);
61805 + if (learn_buffer_user != NULL) {
61806 + vfree(learn_buffer_user);
61807 + learn_buffer_user = NULL;
61808 + }
61809 + learn_buffer_len = 0;
61810 + learn_buffer_user_len = 0;
61811 + gr_learn_attached = 0;
61812 + mutex_unlock(&gr_learn_user_mutex);
61813 + }
61814 +
61815 + return 0;
61816 +}
61817 +
61818 +const struct file_operations grsec_fops = {
61819 + .read = read_learn,
61820 + .write = write_grsec_handler,
61821 + .open = open_learn,
61822 + .release = close_learn,
61823 + .poll = poll_learn,
61824 +};
61825 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
61826 new file mode 100644
61827 index 0000000..70b2179
61828 --- /dev/null
61829 +++ b/grsecurity/gracl_res.c
61830 @@ -0,0 +1,67 @@
61831 +#include <linux/kernel.h>
61832 +#include <linux/sched.h>
61833 +#include <linux/gracl.h>
61834 +#include <linux/grinternal.h>
61835 +
61836 +static const char *restab_log[] = {
61837 + [RLIMIT_CPU] = "RLIMIT_CPU",
61838 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
61839 + [RLIMIT_DATA] = "RLIMIT_DATA",
61840 + [RLIMIT_STACK] = "RLIMIT_STACK",
61841 + [RLIMIT_CORE] = "RLIMIT_CORE",
61842 + [RLIMIT_RSS] = "RLIMIT_RSS",
61843 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
61844 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
61845 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
61846 + [RLIMIT_AS] = "RLIMIT_AS",
61847 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
61848 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
61849 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
61850 + [RLIMIT_NICE] = "RLIMIT_NICE",
61851 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
61852 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
61853 + [GR_CRASH_RES] = "RLIMIT_CRASH"
61854 +};
61855 +
61856 +void
61857 +gr_log_resource(const struct task_struct *task,
61858 + const int res, const unsigned long wanted, const int gt)
61859 +{
61860 + const struct cred *cred;
61861 + unsigned long rlim;
61862 +
61863 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
61864 + return;
61865 +
61866 + // not yet supported resource
61867 + if (unlikely(!restab_log[res]))
61868 + return;
61869 +
61870 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
61871 + rlim = task->signal->rlim[res].rlim_max;
61872 + else
61873 + rlim = task->signal->rlim[res].rlim_cur;
61874 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
61875 + return;
61876 +
61877 + rcu_read_lock();
61878 + cred = __task_cred(task);
61879 +
61880 + if (res == RLIMIT_NPROC &&
61881 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
61882 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
61883 + goto out_rcu_unlock;
61884 + else if (res == RLIMIT_MEMLOCK &&
61885 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
61886 + goto out_rcu_unlock;
61887 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
61888 + goto out_rcu_unlock;
61889 + rcu_read_unlock();
61890 +
61891 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
61892 +
61893 + return;
61894 +out_rcu_unlock:
61895 + rcu_read_unlock();
61896 + return;
61897 +}
61898 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
61899 new file mode 100644
61900 index 0000000..1d1b734
61901 --- /dev/null
61902 +++ b/grsecurity/gracl_segv.c
61903 @@ -0,0 +1,284 @@
61904 +#include <linux/kernel.h>
61905 +#include <linux/mm.h>
61906 +#include <asm/uaccess.h>
61907 +#include <asm/errno.h>
61908 +#include <asm/mman.h>
61909 +#include <net/sock.h>
61910 +#include <linux/file.h>
61911 +#include <linux/fs.h>
61912 +#include <linux/net.h>
61913 +#include <linux/in.h>
61914 +#include <linux/smp_lock.h>
61915 +#include <linux/slab.h>
61916 +#include <linux/types.h>
61917 +#include <linux/sched.h>
61918 +#include <linux/timer.h>
61919 +#include <linux/gracl.h>
61920 +#include <linux/grsecurity.h>
61921 +#include <linux/grinternal.h>
61922 +
61923 +static struct crash_uid *uid_set;
61924 +static unsigned short uid_used;
61925 +static DEFINE_SPINLOCK(gr_uid_lock);
61926 +extern rwlock_t gr_inode_lock;
61927 +extern struct acl_subject_label *
61928 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
61929 + struct acl_role_label *role);
61930 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
61931 +
61932 +int
61933 +gr_init_uidset(void)
61934 +{
61935 + uid_set =
61936 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
61937 + uid_used = 0;
61938 +
61939 + return uid_set ? 1 : 0;
61940 +}
61941 +
61942 +void
61943 +gr_free_uidset(void)
61944 +{
61945 + if (uid_set)
61946 + kfree(uid_set);
61947 +
61948 + return;
61949 +}
61950 +
61951 +int
61952 +gr_find_uid(const uid_t uid)
61953 +{
61954 + struct crash_uid *tmp = uid_set;
61955 + uid_t buid;
61956 + int low = 0, high = uid_used - 1, mid;
61957 +
61958 + while (high >= low) {
61959 + mid = (low + high) >> 1;
61960 + buid = tmp[mid].uid;
61961 + if (buid == uid)
61962 + return mid;
61963 + if (buid > uid)
61964 + high = mid - 1;
61965 + if (buid < uid)
61966 + low = mid + 1;
61967 + }
61968 +
61969 + return -1;
61970 +}
61971 +
61972 +static __inline__ void
61973 +gr_insertsort(void)
61974 +{
61975 + unsigned short i, j;
61976 + struct crash_uid index;
61977 +
61978 + for (i = 1; i < uid_used; i++) {
61979 + index = uid_set[i];
61980 + j = i;
61981 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
61982 + uid_set[j] = uid_set[j - 1];
61983 + j--;
61984 + }
61985 + uid_set[j] = index;
61986 + }
61987 +
61988 + return;
61989 +}
61990 +
61991 +static __inline__ void
61992 +gr_insert_uid(const uid_t uid, const unsigned long expires)
61993 +{
61994 + int loc;
61995 +
61996 + if (uid_used == GR_UIDTABLE_MAX)
61997 + return;
61998 +
61999 + loc = gr_find_uid(uid);
62000 +
62001 + if (loc >= 0) {
62002 + uid_set[loc].expires = expires;
62003 + return;
62004 + }
62005 +
62006 + uid_set[uid_used].uid = uid;
62007 + uid_set[uid_used].expires = expires;
62008 + uid_used++;
62009 +
62010 + gr_insertsort();
62011 +
62012 + return;
62013 +}
62014 +
62015 +void
62016 +gr_remove_uid(const unsigned short loc)
62017 +{
62018 + unsigned short i;
62019 +
62020 + for (i = loc + 1; i < uid_used; i++)
62021 + uid_set[i - 1] = uid_set[i];
62022 +
62023 + uid_used--;
62024 +
62025 + return;
62026 +}
62027 +
62028 +int
62029 +gr_check_crash_uid(const uid_t uid)
62030 +{
62031 + int loc;
62032 + int ret = 0;
62033 +
62034 + if (unlikely(!gr_acl_is_enabled()))
62035 + return 0;
62036 +
62037 + spin_lock(&gr_uid_lock);
62038 + loc = gr_find_uid(uid);
62039 +
62040 + if (loc < 0)
62041 + goto out_unlock;
62042 +
62043 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
62044 + gr_remove_uid(loc);
62045 + else
62046 + ret = 1;
62047 +
62048 +out_unlock:
62049 + spin_unlock(&gr_uid_lock);
62050 + return ret;
62051 +}
62052 +
62053 +static __inline__ int
62054 +proc_is_setxid(const struct cred *cred)
62055 +{
62056 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
62057 + cred->uid != cred->fsuid)
62058 + return 1;
62059 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
62060 + cred->gid != cred->fsgid)
62061 + return 1;
62062 +
62063 + return 0;
62064 +}
62065 +
62066 +void
62067 +gr_handle_crash(struct task_struct *task, const int sig)
62068 +{
62069 + struct acl_subject_label *curr;
62070 + struct task_struct *tsk, *tsk2;
62071 + const struct cred *cred;
62072 + const struct cred *cred2;
62073 +
62074 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
62075 + return;
62076 +
62077 + if (unlikely(!gr_acl_is_enabled()))
62078 + return;
62079 +
62080 + curr = task->acl;
62081 +
62082 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
62083 + return;
62084 +
62085 + if (time_before_eq(curr->expires, get_seconds())) {
62086 + curr->expires = 0;
62087 + curr->crashes = 0;
62088 + }
62089 +
62090 + curr->crashes++;
62091 +
62092 + if (!curr->expires)
62093 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
62094 +
62095 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
62096 + time_after(curr->expires, get_seconds())) {
62097 + rcu_read_lock();
62098 + cred = __task_cred(task);
62099 + if (cred->uid && proc_is_setxid(cred)) {
62100 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
62101 + spin_lock(&gr_uid_lock);
62102 + gr_insert_uid(cred->uid, curr->expires);
62103 + spin_unlock(&gr_uid_lock);
62104 + curr->expires = 0;
62105 + curr->crashes = 0;
62106 + read_lock(&tasklist_lock);
62107 + do_each_thread(tsk2, tsk) {
62108 + cred2 = __task_cred(tsk);
62109 + if (tsk != task && cred2->uid == cred->uid)
62110 + gr_fake_force_sig(SIGKILL, tsk);
62111 + } while_each_thread(tsk2, tsk);
62112 + read_unlock(&tasklist_lock);
62113 + } else {
62114 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
62115 + read_lock(&tasklist_lock);
62116 + read_lock(&grsec_exec_file_lock);
62117 + do_each_thread(tsk2, tsk) {
62118 + if (likely(tsk != task)) {
62119 + // if this thread has the same subject as the one that triggered
62120 + // RES_CRASH and it's the same binary, kill it
62121 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
62122 + gr_fake_force_sig(SIGKILL, tsk);
62123 + }
62124 + } while_each_thread(tsk2, tsk);
62125 + read_unlock(&grsec_exec_file_lock);
62126 + read_unlock(&tasklist_lock);
62127 + }
62128 + rcu_read_unlock();
62129 + }
62130 +
62131 + return;
62132 +}
62133 +
62134 +int
62135 +gr_check_crash_exec(const struct file *filp)
62136 +{
62137 + struct acl_subject_label *curr;
62138 +
62139 + if (unlikely(!gr_acl_is_enabled()))
62140 + return 0;
62141 +
62142 + read_lock(&gr_inode_lock);
62143 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
62144 + filp->f_path.dentry->d_inode->i_sb->s_dev,
62145 + current->role);
62146 + read_unlock(&gr_inode_lock);
62147 +
62148 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
62149 + (!curr->crashes && !curr->expires))
62150 + return 0;
62151 +
62152 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
62153 + time_after(curr->expires, get_seconds()))
62154 + return 1;
62155 + else if (time_before_eq(curr->expires, get_seconds())) {
62156 + curr->crashes = 0;
62157 + curr->expires = 0;
62158 + }
62159 +
62160 + return 0;
62161 +}
62162 +
62163 +void
62164 +gr_handle_alertkill(struct task_struct *task)
62165 +{
62166 + struct acl_subject_label *curracl;
62167 + __u32 curr_ip;
62168 + struct task_struct *p, *p2;
62169 +
62170 + if (unlikely(!gr_acl_is_enabled()))
62171 + return;
62172 +
62173 + curracl = task->acl;
62174 + curr_ip = task->signal->curr_ip;
62175 +
62176 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
62177 + read_lock(&tasklist_lock);
62178 + do_each_thread(p2, p) {
62179 + if (p->signal->curr_ip == curr_ip)
62180 + gr_fake_force_sig(SIGKILL, p);
62181 + } while_each_thread(p2, p);
62182 + read_unlock(&tasklist_lock);
62183 + } else if (curracl->mode & GR_KILLPROC)
62184 + gr_fake_force_sig(SIGKILL, task);
62185 +
62186 + return;
62187 +}
62188 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
62189 new file mode 100644
62190 index 0000000..9d83a69
62191 --- /dev/null
62192 +++ b/grsecurity/gracl_shm.c
62193 @@ -0,0 +1,40 @@
62194 +#include <linux/kernel.h>
62195 +#include <linux/mm.h>
62196 +#include <linux/sched.h>
62197 +#include <linux/file.h>
62198 +#include <linux/ipc.h>
62199 +#include <linux/gracl.h>
62200 +#include <linux/grsecurity.h>
62201 +#include <linux/grinternal.h>
62202 +
62203 +int
62204 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62205 + const time_t shm_createtime, const uid_t cuid, const int shmid)
62206 +{
62207 + struct task_struct *task;
62208 +
62209 + if (!gr_acl_is_enabled())
62210 + return 1;
62211 +
62212 + rcu_read_lock();
62213 + read_lock(&tasklist_lock);
62214 +
62215 + task = find_task_by_vpid(shm_cprid);
62216 +
62217 + if (unlikely(!task))
62218 + task = find_task_by_vpid(shm_lapid);
62219 +
62220 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
62221 + (task->pid == shm_lapid)) &&
62222 + (task->acl->mode & GR_PROTSHM) &&
62223 + (task->acl != current->acl))) {
62224 + read_unlock(&tasklist_lock);
62225 + rcu_read_unlock();
62226 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
62227 + return 0;
62228 + }
62229 + read_unlock(&tasklist_lock);
62230 + rcu_read_unlock();
62231 +
62232 + return 1;
62233 +}
62234 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
62235 new file mode 100644
62236 index 0000000..bc0be01
62237 --- /dev/null
62238 +++ b/grsecurity/grsec_chdir.c
62239 @@ -0,0 +1,19 @@
62240 +#include <linux/kernel.h>
62241 +#include <linux/sched.h>
62242 +#include <linux/fs.h>
62243 +#include <linux/file.h>
62244 +#include <linux/grsecurity.h>
62245 +#include <linux/grinternal.h>
62246 +
62247 +void
62248 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
62249 +{
62250 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
62251 + if ((grsec_enable_chdir && grsec_enable_group &&
62252 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
62253 + !grsec_enable_group)) {
62254 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
62255 + }
62256 +#endif
62257 + return;
62258 +}
62259 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
62260 new file mode 100644
62261 index 0000000..197bdd5
62262 --- /dev/null
62263 +++ b/grsecurity/grsec_chroot.c
62264 @@ -0,0 +1,386 @@
62265 +#include <linux/kernel.h>
62266 +#include <linux/module.h>
62267 +#include <linux/sched.h>
62268 +#include <linux/file.h>
62269 +#include <linux/fs.h>
62270 +#include <linux/mount.h>
62271 +#include <linux/types.h>
62272 +#include <linux/pid_namespace.h>
62273 +#include <linux/grsecurity.h>
62274 +#include <linux/grinternal.h>
62275 +
62276 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
62277 +{
62278 +#ifdef CONFIG_GRKERNSEC
62279 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
62280 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
62281 + task->gr_is_chrooted = 1;
62282 + else
62283 + task->gr_is_chrooted = 0;
62284 +
62285 + task->gr_chroot_dentry = path->dentry;
62286 +#endif
62287 + return;
62288 +}
62289 +
62290 +void gr_clear_chroot_entries(struct task_struct *task)
62291 +{
62292 +#ifdef CONFIG_GRKERNSEC
62293 + task->gr_is_chrooted = 0;
62294 + task->gr_chroot_dentry = NULL;
62295 +#endif
62296 + return;
62297 +}
62298 +
62299 +int
62300 +gr_handle_chroot_unix(const pid_t pid)
62301 +{
62302 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
62303 + struct task_struct *p;
62304 +
62305 + if (unlikely(!grsec_enable_chroot_unix))
62306 + return 1;
62307 +
62308 + if (likely(!proc_is_chrooted(current)))
62309 + return 1;
62310 +
62311 + rcu_read_lock();
62312 + read_lock(&tasklist_lock);
62313 +
62314 + p = find_task_by_vpid_unrestricted(pid);
62315 + if (unlikely(p && !have_same_root(current, p))) {
62316 + read_unlock(&tasklist_lock);
62317 + rcu_read_unlock();
62318 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
62319 + return 0;
62320 + }
62321 + read_unlock(&tasklist_lock);
62322 + rcu_read_unlock();
62323 +#endif
62324 + return 1;
62325 +}
62326 +
62327 +int
62328 +gr_handle_chroot_nice(void)
62329 +{
62330 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
62331 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
62332 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
62333 + return -EPERM;
62334 + }
62335 +#endif
62336 + return 0;
62337 +}
62338 +
62339 +int
62340 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
62341 +{
62342 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
62343 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
62344 + && proc_is_chrooted(current)) {
62345 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
62346 + return -EACCES;
62347 + }
62348 +#endif
62349 + return 0;
62350 +}
62351 +
62352 +int
62353 +gr_handle_chroot_rawio(const struct inode *inode)
62354 +{
62355 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62356 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
62357 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
62358 + return 1;
62359 +#endif
62360 + return 0;
62361 +}
62362 +
62363 +int
62364 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
62365 +{
62366 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62367 + struct task_struct *p;
62368 + int ret = 0;
62369 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
62370 + return ret;
62371 +
62372 + read_lock(&tasklist_lock);
62373 + do_each_pid_task(pid, type, p) {
62374 + if (!have_same_root(current, p)) {
62375 + ret = 1;
62376 + goto out;
62377 + }
62378 + } while_each_pid_task(pid, type, p);
62379 +out:
62380 + read_unlock(&tasklist_lock);
62381 + return ret;
62382 +#endif
62383 + return 0;
62384 +}
62385 +
62386 +int
62387 +gr_pid_is_chrooted(struct task_struct *p)
62388 +{
62389 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62390 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
62391 + return 0;
62392 +
62393 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
62394 + !have_same_root(current, p)) {
62395 + return 1;
62396 + }
62397 +#endif
62398 + return 0;
62399 +}
62400 +
62401 +EXPORT_SYMBOL(gr_pid_is_chrooted);
62402 +
62403 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
62404 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
62405 +{
62406 + struct dentry *dentry = (struct dentry *)u_dentry;
62407 + struct vfsmount *mnt = (struct vfsmount *)u_mnt;
62408 + struct dentry *realroot;
62409 + struct vfsmount *realrootmnt;
62410 + struct dentry *currentroot;
62411 + struct vfsmount *currentmnt;
62412 + struct task_struct *reaper = &init_task;
62413 + int ret = 1;
62414 +
62415 + read_lock(&reaper->fs->lock);
62416 + realrootmnt = mntget(reaper->fs->root.mnt);
62417 + realroot = dget(reaper->fs->root.dentry);
62418 + read_unlock(&reaper->fs->lock);
62419 +
62420 + read_lock(&current->fs->lock);
62421 + currentmnt = mntget(current->fs->root.mnt);
62422 + currentroot = dget(current->fs->root.dentry);
62423 + read_unlock(&current->fs->lock);
62424 +
62425 + spin_lock(&dcache_lock);
62426 + for (;;) {
62427 + if (unlikely((dentry == realroot && mnt == realrootmnt)
62428 + || (dentry == currentroot && mnt == currentmnt)))
62429 + break;
62430 + if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
62431 + if (mnt->mnt_parent == mnt)
62432 + break;
62433 + dentry = mnt->mnt_mountpoint;
62434 + mnt = mnt->mnt_parent;
62435 + continue;
62436 + }
62437 + dentry = dentry->d_parent;
62438 + }
62439 + spin_unlock(&dcache_lock);
62440 +
62441 + dput(currentroot);
62442 + mntput(currentmnt);
62443 +
62444 + /* access is outside of chroot */
62445 + if (dentry == realroot && mnt == realrootmnt)
62446 + ret = 0;
62447 +
62448 + dput(realroot);
62449 + mntput(realrootmnt);
62450 + return ret;
62451 +}
62452 +#endif
62453 +
62454 +int
62455 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
62456 +{
62457 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
62458 + if (!grsec_enable_chroot_fchdir)
62459 + return 1;
62460 +
62461 + if (!proc_is_chrooted(current))
62462 + return 1;
62463 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
62464 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
62465 + return 0;
62466 + }
62467 +#endif
62468 + return 1;
62469 +}
62470 +
62471 +int
62472 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62473 + const time_t shm_createtime)
62474 +{
62475 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
62476 + struct task_struct *p;
62477 + time_t starttime;
62478 +
62479 + if (unlikely(!grsec_enable_chroot_shmat))
62480 + return 1;
62481 +
62482 + if (likely(!proc_is_chrooted(current)))
62483 + return 1;
62484 +
62485 + rcu_read_lock();
62486 + read_lock(&tasklist_lock);
62487 +
62488 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
62489 + starttime = p->start_time.tv_sec;
62490 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
62491 + if (have_same_root(current, p)) {
62492 + goto allow;
62493 + } else {
62494 + read_unlock(&tasklist_lock);
62495 + rcu_read_unlock();
62496 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
62497 + return 0;
62498 + }
62499 + }
62500 + /* creator exited, pid reuse, fall through to next check */
62501 + }
62502 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
62503 + if (unlikely(!have_same_root(current, p))) {
62504 + read_unlock(&tasklist_lock);
62505 + rcu_read_unlock();
62506 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
62507 + return 0;
62508 + }
62509 + }
62510 +
62511 +allow:
62512 + read_unlock(&tasklist_lock);
62513 + rcu_read_unlock();
62514 +#endif
62515 + return 1;
62516 +}
62517 +
62518 +void
62519 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
62520 +{
62521 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
62522 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
62523 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
62524 +#endif
62525 + return;
62526 +}
62527 +
62528 +int
62529 +gr_handle_chroot_mknod(const struct dentry *dentry,
62530 + const struct vfsmount *mnt, const int mode)
62531 +{
62532 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
62533 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
62534 + proc_is_chrooted(current)) {
62535 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
62536 + return -EPERM;
62537 + }
62538 +#endif
62539 + return 0;
62540 +}
62541 +
62542 +int
62543 +gr_handle_chroot_mount(const struct dentry *dentry,
62544 + const struct vfsmount *mnt, const char *dev_name)
62545 +{
62546 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
62547 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
62548 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
62549 + return -EPERM;
62550 + }
62551 +#endif
62552 + return 0;
62553 +}
62554 +
62555 +int
62556 +gr_handle_chroot_pivot(void)
62557 +{
62558 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
62559 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
62560 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
62561 + return -EPERM;
62562 + }
62563 +#endif
62564 + return 0;
62565 +}
62566 +
62567 +int
62568 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
62569 +{
62570 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
62571 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
62572 + !gr_is_outside_chroot(dentry, mnt)) {
62573 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
62574 + return -EPERM;
62575 + }
62576 +#endif
62577 + return 0;
62578 +}
62579 +
62580 +extern const char *captab_log[];
62581 +extern int captab_log_entries;
62582 +
62583 +int
62584 +gr_chroot_is_capable(const int cap)
62585 +{
62586 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62587 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
62588 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
62589 + if (cap_raised(chroot_caps, cap)) {
62590 + const struct cred *creds = current_cred();
62591 + if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
62592 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
62593 + }
62594 + return 0;
62595 + }
62596 + }
62597 +#endif
62598 + return 1;
62599 +}
62600 +
62601 +int
62602 +gr_chroot_is_capable_nolog(const int cap)
62603 +{
62604 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62605 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
62606 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
62607 + if (cap_raised(chroot_caps, cap)) {
62608 + return 0;
62609 + }
62610 + }
62611 +#endif
62612 + return 1;
62613 +}
62614 +
62615 +int
62616 +gr_handle_chroot_sysctl(const int op)
62617 +{
62618 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
62619 + if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
62620 + && (op & MAY_WRITE))
62621 + return -EACCES;
62622 +#endif
62623 + return 0;
62624 +}
62625 +
62626 +void
62627 +gr_handle_chroot_chdir(struct path *path)
62628 +{
62629 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
62630 + if (grsec_enable_chroot_chdir)
62631 + set_fs_pwd(current->fs, path);
62632 +#endif
62633 + return;
62634 +}
62635 +
62636 +int
62637 +gr_handle_chroot_chmod(const struct dentry *dentry,
62638 + const struct vfsmount *mnt, const int mode)
62639 +{
62640 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
62641 + /* allow chmod +s on directories, but not on files */
62642 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
62643 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
62644 + proc_is_chrooted(current)) {
62645 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
62646 + return -EPERM;
62647 + }
62648 +#endif
62649 + return 0;
62650 +}
62651 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
62652 new file mode 100644
62653 index 0000000..40545bf
62654 --- /dev/null
62655 +++ b/grsecurity/grsec_disabled.c
62656 @@ -0,0 +1,437 @@
62657 +#include <linux/kernel.h>
62658 +#include <linux/module.h>
62659 +#include <linux/sched.h>
62660 +#include <linux/file.h>
62661 +#include <linux/fs.h>
62662 +#include <linux/kdev_t.h>
62663 +#include <linux/net.h>
62664 +#include <linux/in.h>
62665 +#include <linux/ip.h>
62666 +#include <linux/skbuff.h>
62667 +#include <linux/sysctl.h>
62668 +
62669 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62670 +void
62671 +pax_set_initial_flags(struct linux_binprm *bprm)
62672 +{
62673 + return;
62674 +}
62675 +#endif
62676 +
62677 +#ifdef CONFIG_SYSCTL
62678 +__u32
62679 +gr_handle_sysctl(const struct ctl_table * table, const int op)
62680 +{
62681 + return 0;
62682 +}
62683 +#endif
62684 +
62685 +#ifdef CONFIG_TASKSTATS
62686 +int gr_is_taskstats_denied(int pid)
62687 +{
62688 + return 0;
62689 +}
62690 +#endif
62691 +
62692 +int
62693 +gr_acl_is_enabled(void)
62694 +{
62695 + return 0;
62696 +}
62697 +
62698 +void
62699 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
62700 +{
62701 + return;
62702 +}
62703 +
62704 +int
62705 +gr_handle_rawio(const struct inode *inode)
62706 +{
62707 + return 0;
62708 +}
62709 +
62710 +void
62711 +gr_acl_handle_psacct(struct task_struct *task, const long code)
62712 +{
62713 + return;
62714 +}
62715 +
62716 +int
62717 +gr_handle_ptrace(struct task_struct *task, const long request)
62718 +{
62719 + return 0;
62720 +}
62721 +
62722 +int
62723 +gr_handle_proc_ptrace(struct task_struct *task)
62724 +{
62725 + return 0;
62726 +}
62727 +
62728 +void
62729 +gr_learn_resource(const struct task_struct *task,
62730 + const int res, const unsigned long wanted, const int gt)
62731 +{
62732 + return;
62733 +}
62734 +
62735 +int
62736 +gr_set_acls(const int type)
62737 +{
62738 + return 0;
62739 +}
62740 +
62741 +int
62742 +gr_check_hidden_task(const struct task_struct *tsk)
62743 +{
62744 + return 0;
62745 +}
62746 +
62747 +int
62748 +gr_check_protected_task(const struct task_struct *task)
62749 +{
62750 + return 0;
62751 +}
62752 +
62753 +int
62754 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
62755 +{
62756 + return 0;
62757 +}
62758 +
62759 +void
62760 +gr_copy_label(struct task_struct *tsk)
62761 +{
62762 + return;
62763 +}
62764 +
62765 +void
62766 +gr_set_pax_flags(struct task_struct *task)
62767 +{
62768 + return;
62769 +}
62770 +
62771 +int
62772 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
62773 + const int unsafe_share)
62774 +{
62775 + return 0;
62776 +}
62777 +
62778 +void
62779 +gr_handle_delete(const ino_t ino, const dev_t dev)
62780 +{
62781 + return;
62782 +}
62783 +
62784 +void
62785 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
62786 +{
62787 + return;
62788 +}
62789 +
62790 +void
62791 +gr_handle_crash(struct task_struct *task, const int sig)
62792 +{
62793 + return;
62794 +}
62795 +
62796 +int
62797 +gr_check_crash_exec(const struct file *filp)
62798 +{
62799 + return 0;
62800 +}
62801 +
62802 +int
62803 +gr_check_crash_uid(const uid_t uid)
62804 +{
62805 + return 0;
62806 +}
62807 +
62808 +void
62809 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
62810 + struct dentry *old_dentry,
62811 + struct dentry *new_dentry,
62812 + struct vfsmount *mnt, const __u8 replace)
62813 +{
62814 + return;
62815 +}
62816 +
62817 +int
62818 +gr_search_socket(const int family, const int type, const int protocol)
62819 +{
62820 + return 1;
62821 +}
62822 +
62823 +int
62824 +gr_search_connectbind(const int mode, const struct socket *sock,
62825 + const struct sockaddr_in *addr)
62826 +{
62827 + return 0;
62828 +}
62829 +
62830 +void
62831 +gr_handle_alertkill(struct task_struct *task)
62832 +{
62833 + return;
62834 +}
62835 +
62836 +__u32
62837 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
62838 +{
62839 + return 1;
62840 +}
62841 +
62842 +__u32
62843 +gr_acl_handle_hidden_file(const struct dentry * dentry,
62844 + const struct vfsmount * mnt)
62845 +{
62846 + return 1;
62847 +}
62848 +
62849 +__u32
62850 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
62851 + int acc_mode)
62852 +{
62853 + return 1;
62854 +}
62855 +
62856 +__u32
62857 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
62858 +{
62859 + return 1;
62860 +}
62861 +
62862 +__u32
62863 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
62864 +{
62865 + return 1;
62866 +}
62867 +
62868 +int
62869 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
62870 + unsigned int *vm_flags)
62871 +{
62872 + return 1;
62873 +}
62874 +
62875 +__u32
62876 +gr_acl_handle_truncate(const struct dentry * dentry,
62877 + const struct vfsmount * mnt)
62878 +{
62879 + return 1;
62880 +}
62881 +
62882 +__u32
62883 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
62884 +{
62885 + return 1;
62886 +}
62887 +
62888 +__u32
62889 +gr_acl_handle_access(const struct dentry * dentry,
62890 + const struct vfsmount * mnt, const int fmode)
62891 +{
62892 + return 1;
62893 +}
62894 +
62895 +__u32
62896 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
62897 + umode_t *mode)
62898 +{
62899 + return 1;
62900 +}
62901 +
62902 +__u32
62903 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
62904 +{
62905 + return 1;
62906 +}
62907 +
62908 +__u32
62909 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
62910 +{
62911 + return 1;
62912 +}
62913 +
62914 +void
62915 +grsecurity_init(void)
62916 +{
62917 + return;
62918 +}
62919 +
62920 +umode_t gr_acl_umask(void)
62921 +{
62922 + return 0;
62923 +}
62924 +
62925 +__u32
62926 +gr_acl_handle_mknod(const struct dentry * new_dentry,
62927 + const struct dentry * parent_dentry,
62928 + const struct vfsmount * parent_mnt,
62929 + const int mode)
62930 +{
62931 + return 1;
62932 +}
62933 +
62934 +__u32
62935 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
62936 + const struct dentry * parent_dentry,
62937 + const struct vfsmount * parent_mnt)
62938 +{
62939 + return 1;
62940 +}
62941 +
62942 +__u32
62943 +gr_acl_handle_symlink(const struct dentry * new_dentry,
62944 + const struct dentry * parent_dentry,
62945 + const struct vfsmount * parent_mnt, const char *from)
62946 +{
62947 + return 1;
62948 +}
62949 +
62950 +__u32
62951 +gr_acl_handle_link(const struct dentry * new_dentry,
62952 + const struct dentry * parent_dentry,
62953 + const struct vfsmount * parent_mnt,
62954 + const struct dentry * old_dentry,
62955 + const struct vfsmount * old_mnt, const char *to)
62956 +{
62957 + return 1;
62958 +}
62959 +
62960 +int
62961 +gr_acl_handle_rename(const struct dentry *new_dentry,
62962 + const struct dentry *parent_dentry,
62963 + const struct vfsmount *parent_mnt,
62964 + const struct dentry *old_dentry,
62965 + const struct inode *old_parent_inode,
62966 + const struct vfsmount *old_mnt, const char *newname)
62967 +{
62968 + return 0;
62969 +}
62970 +
62971 +int
62972 +gr_acl_handle_filldir(const struct file *file, const char *name,
62973 + const int namelen, const ino_t ino)
62974 +{
62975 + return 1;
62976 +}
62977 +
62978 +int
62979 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62980 + const time_t shm_createtime, const uid_t cuid, const int shmid)
62981 +{
62982 + return 1;
62983 +}
62984 +
62985 +int
62986 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
62987 +{
62988 + return 0;
62989 +}
62990 +
62991 +int
62992 +gr_search_accept(const struct socket *sock)
62993 +{
62994 + return 0;
62995 +}
62996 +
62997 +int
62998 +gr_search_listen(const struct socket *sock)
62999 +{
63000 + return 0;
63001 +}
63002 +
63003 +int
63004 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
63005 +{
63006 + return 0;
63007 +}
63008 +
63009 +__u32
63010 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
63011 +{
63012 + return 1;
63013 +}
63014 +
63015 +__u32
63016 +gr_acl_handle_creat(const struct dentry * dentry,
63017 + const struct dentry * p_dentry,
63018 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
63019 + const int imode)
63020 +{
63021 + return 1;
63022 +}
63023 +
63024 +void
63025 +gr_acl_handle_exit(void)
63026 +{
63027 + return;
63028 +}
63029 +
63030 +int
63031 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
63032 +{
63033 + return 1;
63034 +}
63035 +
63036 +void
63037 +gr_set_role_label(const uid_t uid, const gid_t gid)
63038 +{
63039 + return;
63040 +}
63041 +
63042 +int
63043 +gr_acl_handle_procpidmem(const struct task_struct *task)
63044 +{
63045 + return 0;
63046 +}
63047 +
63048 +int
63049 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
63050 +{
63051 + return 0;
63052 +}
63053 +
63054 +int
63055 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
63056 +{
63057 + return 0;
63058 +}
63059 +
63060 +void
63061 +gr_set_kernel_label(struct task_struct *task)
63062 +{
63063 + return;
63064 +}
63065 +
63066 +int
63067 +gr_check_user_change(int real, int effective, int fs)
63068 +{
63069 + return 0;
63070 +}
63071 +
63072 +int
63073 +gr_check_group_change(int real, int effective, int fs)
63074 +{
63075 + return 0;
63076 +}
63077 +
63078 +int gr_acl_enable_at_secure(void)
63079 +{
63080 + return 0;
63081 +}
63082 +
63083 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
63084 +{
63085 + return dentry->d_inode->i_sb->s_dev;
63086 +}
63087 +
63088 +EXPORT_SYMBOL(gr_learn_resource);
63089 +EXPORT_SYMBOL(gr_set_kernel_label);
63090 +#ifdef CONFIG_SECURITY
63091 +EXPORT_SYMBOL(gr_check_user_change);
63092 +EXPORT_SYMBOL(gr_check_group_change);
63093 +#endif
63094 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
63095 new file mode 100644
63096 index 0000000..a96e155
63097 --- /dev/null
63098 +++ b/grsecurity/grsec_exec.c
63099 @@ -0,0 +1,204 @@
63100 +#include <linux/kernel.h>
63101 +#include <linux/sched.h>
63102 +#include <linux/file.h>
63103 +#include <linux/binfmts.h>
63104 +#include <linux/smp_lock.h>
63105 +#include <linux/fs.h>
63106 +#include <linux/types.h>
63107 +#include <linux/grdefs.h>
63108 +#include <linux/grinternal.h>
63109 +#include <linux/capability.h>
63110 +#include <linux/compat.h>
63111 +#include <linux/module.h>
63112 +
63113 +#include <asm/uaccess.h>
63114 +
63115 +#ifdef CONFIG_GRKERNSEC_EXECLOG
63116 +static char gr_exec_arg_buf[132];
63117 +static DEFINE_MUTEX(gr_exec_arg_mutex);
63118 +#endif
63119 +
63120 +void
63121 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
63122 +{
63123 +#ifdef CONFIG_GRKERNSEC_EXECLOG
63124 + char *grarg = gr_exec_arg_buf;
63125 + unsigned int i, x, execlen = 0;
63126 + char c;
63127 +
63128 + if (!((grsec_enable_execlog && grsec_enable_group &&
63129 + in_group_p(grsec_audit_gid))
63130 + || (grsec_enable_execlog && !grsec_enable_group)))
63131 + return;
63132 +
63133 + mutex_lock(&gr_exec_arg_mutex);
63134 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
63135 +
63136 + if (unlikely(argv == NULL))
63137 + goto log;
63138 +
63139 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
63140 + const char __user *p;
63141 + unsigned int len;
63142 +
63143 + if (copy_from_user(&p, argv + i, sizeof(p)))
63144 + goto log;
63145 + if (!p)
63146 + goto log;
63147 + len = strnlen_user(p, 128 - execlen);
63148 + if (len > 128 - execlen)
63149 + len = 128 - execlen;
63150 + else if (len > 0)
63151 + len--;
63152 + if (copy_from_user(grarg + execlen, p, len))
63153 + goto log;
63154 +
63155 + /* rewrite unprintable characters */
63156 + for (x = 0; x < len; x++) {
63157 + c = *(grarg + execlen + x);
63158 + if (c < 32 || c > 126)
63159 + *(grarg + execlen + x) = ' ';
63160 + }
63161 +
63162 + execlen += len;
63163 + *(grarg + execlen) = ' ';
63164 + *(grarg + execlen + 1) = '\0';
63165 + execlen++;
63166 + }
63167 +
63168 + log:
63169 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
63170 + bprm->file->f_path.mnt, grarg);
63171 + mutex_unlock(&gr_exec_arg_mutex);
63172 +#endif
63173 + return;
63174 +}
63175 +
63176 +#ifdef CONFIG_COMPAT
63177 +void
63178 +gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
63179 +{
63180 +#ifdef CONFIG_GRKERNSEC_EXECLOG
63181 + char *grarg = gr_exec_arg_buf;
63182 + unsigned int i, x, execlen = 0;
63183 + char c;
63184 +
63185 + if (!((grsec_enable_execlog && grsec_enable_group &&
63186 + in_group_p(grsec_audit_gid))
63187 + || (grsec_enable_execlog && !grsec_enable_group)))
63188 + return;
63189 +
63190 + mutex_lock(&gr_exec_arg_mutex);
63191 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
63192 +
63193 + if (unlikely(argv == NULL))
63194 + goto log;
63195 +
63196 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
63197 + compat_uptr_t p;
63198 + unsigned int len;
63199 +
63200 + if (get_user(p, argv + i))
63201 + goto log;
63202 + len = strnlen_user(compat_ptr(p), 128 - execlen);
63203 + if (len > 128 - execlen)
63204 + len = 128 - execlen;
63205 + else if (len > 0)
63206 + len--;
63207 + else
63208 + goto log;
63209 + if (copy_from_user(grarg + execlen, compat_ptr(p), len))
63210 + goto log;
63211 +
63212 + /* rewrite unprintable characters */
63213 + for (x = 0; x < len; x++) {
63214 + c = *(grarg + execlen + x);
63215 + if (c < 32 || c > 126)
63216 + *(grarg + execlen + x) = ' ';
63217 + }
63218 +
63219 + execlen += len;
63220 + *(grarg + execlen) = ' ';
63221 + *(grarg + execlen + 1) = '\0';
63222 + execlen++;
63223 + }
63224 +
63225 + log:
63226 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
63227 + bprm->file->f_path.mnt, grarg);
63228 + mutex_unlock(&gr_exec_arg_mutex);
63229 +#endif
63230 + return;
63231 +}
63232 +#endif
63233 +
63234 +#ifdef CONFIG_GRKERNSEC
63235 +extern int gr_acl_is_capable(const int cap);
63236 +extern int gr_acl_is_capable_nolog(const int cap);
63237 +extern int gr_chroot_is_capable(const int cap);
63238 +extern int gr_chroot_is_capable_nolog(const int cap);
63239 +#endif
63240 +
63241 +const char *captab_log[] = {
63242 + "CAP_CHOWN",
63243 + "CAP_DAC_OVERRIDE",
63244 + "CAP_DAC_READ_SEARCH",
63245 + "CAP_FOWNER",
63246 + "CAP_FSETID",
63247 + "CAP_KILL",
63248 + "CAP_SETGID",
63249 + "CAP_SETUID",
63250 + "CAP_SETPCAP",
63251 + "CAP_LINUX_IMMUTABLE",
63252 + "CAP_NET_BIND_SERVICE",
63253 + "CAP_NET_BROADCAST",
63254 + "CAP_NET_ADMIN",
63255 + "CAP_NET_RAW",
63256 + "CAP_IPC_LOCK",
63257 + "CAP_IPC_OWNER",
63258 + "CAP_SYS_MODULE",
63259 + "CAP_SYS_RAWIO",
63260 + "CAP_SYS_CHROOT",
63261 + "CAP_SYS_PTRACE",
63262 + "CAP_SYS_PACCT",
63263 + "CAP_SYS_ADMIN",
63264 + "CAP_SYS_BOOT",
63265 + "CAP_SYS_NICE",
63266 + "CAP_SYS_RESOURCE",
63267 + "CAP_SYS_TIME",
63268 + "CAP_SYS_TTY_CONFIG",
63269 + "CAP_MKNOD",
63270 + "CAP_LEASE",
63271 + "CAP_AUDIT_WRITE",
63272 + "CAP_AUDIT_CONTROL",
63273 + "CAP_SETFCAP",
63274 + "CAP_MAC_OVERRIDE",
63275 + "CAP_MAC_ADMIN"
63276 +};
63277 +
63278 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
63279 +
63280 +int gr_is_capable(const int cap)
63281 +{
63282 +#ifdef CONFIG_GRKERNSEC
63283 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
63284 + return 1;
63285 + return 0;
63286 +#else
63287 + return 1;
63288 +#endif
63289 +}
63290 +
63291 +int gr_is_capable_nolog(const int cap)
63292 +{
63293 +#ifdef CONFIG_GRKERNSEC
63294 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
63295 + return 1;
63296 + return 0;
63297 +#else
63298 + return 1;
63299 +#endif
63300 +}
63301 +
63302 +EXPORT_SYMBOL(gr_is_capable);
63303 +EXPORT_SYMBOL(gr_is_capable_nolog);
63304 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
63305 new file mode 100644
63306 index 0000000..d3ee748
63307 --- /dev/null
63308 +++ b/grsecurity/grsec_fifo.c
63309 @@ -0,0 +1,24 @@
63310 +#include <linux/kernel.h>
63311 +#include <linux/sched.h>
63312 +#include <linux/fs.h>
63313 +#include <linux/file.h>
63314 +#include <linux/grinternal.h>
63315 +
63316 +int
63317 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
63318 + const struct dentry *dir, const int flag, const int acc_mode)
63319 +{
63320 +#ifdef CONFIG_GRKERNSEC_FIFO
63321 + const struct cred *cred = current_cred();
63322 +
63323 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
63324 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
63325 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
63326 + (cred->fsuid != dentry->d_inode->i_uid)) {
63327 + if (!inode_permission(dentry->d_inode, acc_mode))
63328 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
63329 + return -EACCES;
63330 + }
63331 +#endif
63332 + return 0;
63333 +}
63334 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
63335 new file mode 100644
63336 index 0000000..8ca18bf
63337 --- /dev/null
63338 +++ b/grsecurity/grsec_fork.c
63339 @@ -0,0 +1,23 @@
63340 +#include <linux/kernel.h>
63341 +#include <linux/sched.h>
63342 +#include <linux/grsecurity.h>
63343 +#include <linux/grinternal.h>
63344 +#include <linux/errno.h>
63345 +
63346 +void
63347 +gr_log_forkfail(const int retval)
63348 +{
63349 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
63350 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
63351 + switch (retval) {
63352 + case -EAGAIN:
63353 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
63354 + break;
63355 + case -ENOMEM:
63356 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
63357 + break;
63358 + }
63359 + }
63360 +#endif
63361 + return;
63362 +}
63363 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
63364 new file mode 100644
63365 index 0000000..1e995d3
63366 --- /dev/null
63367 +++ b/grsecurity/grsec_init.c
63368 @@ -0,0 +1,278 @@
63369 +#include <linux/kernel.h>
63370 +#include <linux/sched.h>
63371 +#include <linux/mm.h>
63372 +#include <linux/smp_lock.h>
63373 +#include <linux/gracl.h>
63374 +#include <linux/slab.h>
63375 +#include <linux/vmalloc.h>
63376 +#include <linux/percpu.h>
63377 +#include <linux/module.h>
63378 +
63379 +int grsec_enable_ptrace_readexec;
63380 +int grsec_enable_setxid;
63381 +int grsec_enable_brute;
63382 +int grsec_enable_link;
63383 +int grsec_enable_dmesg;
63384 +int grsec_enable_harden_ptrace;
63385 +int grsec_enable_fifo;
63386 +int grsec_enable_execlog;
63387 +int grsec_enable_signal;
63388 +int grsec_enable_forkfail;
63389 +int grsec_enable_audit_ptrace;
63390 +int grsec_enable_time;
63391 +int grsec_enable_audit_textrel;
63392 +int grsec_enable_group;
63393 +int grsec_audit_gid;
63394 +int grsec_enable_chdir;
63395 +int grsec_enable_mount;
63396 +int grsec_enable_rofs;
63397 +int grsec_enable_chroot_findtask;
63398 +int grsec_enable_chroot_mount;
63399 +int grsec_enable_chroot_shmat;
63400 +int grsec_enable_chroot_fchdir;
63401 +int grsec_enable_chroot_double;
63402 +int grsec_enable_chroot_pivot;
63403 +int grsec_enable_chroot_chdir;
63404 +int grsec_enable_chroot_chmod;
63405 +int grsec_enable_chroot_mknod;
63406 +int grsec_enable_chroot_nice;
63407 +int grsec_enable_chroot_execlog;
63408 +int grsec_enable_chroot_caps;
63409 +int grsec_enable_chroot_sysctl;
63410 +int grsec_enable_chroot_unix;
63411 +int grsec_enable_tpe;
63412 +int grsec_tpe_gid;
63413 +int grsec_enable_blackhole;
63414 +#ifdef CONFIG_IPV6_MODULE
63415 +EXPORT_SYMBOL(grsec_enable_blackhole);
63416 +#endif
63417 +int grsec_lastack_retries;
63418 +int grsec_enable_tpe_all;
63419 +int grsec_enable_tpe_invert;
63420 +int grsec_enable_socket_all;
63421 +int grsec_socket_all_gid;
63422 +int grsec_enable_socket_client;
63423 +int grsec_socket_client_gid;
63424 +int grsec_enable_socket_server;
63425 +int grsec_socket_server_gid;
63426 +int grsec_resource_logging;
63427 +int grsec_disable_privio;
63428 +int grsec_enable_log_rwxmaps;
63429 +int grsec_lock;
63430 +
63431 +DEFINE_SPINLOCK(grsec_alert_lock);
63432 +unsigned long grsec_alert_wtime = 0;
63433 +unsigned long grsec_alert_fyet = 0;
63434 +
63435 +DEFINE_SPINLOCK(grsec_audit_lock);
63436 +
63437 +DEFINE_RWLOCK(grsec_exec_file_lock);
63438 +
63439 +char *gr_shared_page[4];
63440 +
63441 +char *gr_alert_log_fmt;
63442 +char *gr_audit_log_fmt;
63443 +char *gr_alert_log_buf;
63444 +char *gr_audit_log_buf;
63445 +
63446 +extern struct gr_arg *gr_usermode;
63447 +extern unsigned char *gr_system_salt;
63448 +extern unsigned char *gr_system_sum;
63449 +
63450 +void __init
63451 +grsecurity_init(void)
63452 +{
63453 + int j;
63454 + /* create the per-cpu shared pages */
63455 +
63456 +#ifdef CONFIG_X86
63457 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
63458 +#endif
63459 +
63460 + for (j = 0; j < 4; j++) {
63461 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
63462 + if (gr_shared_page[j] == NULL) {
63463 + panic("Unable to allocate grsecurity shared page");
63464 + return;
63465 + }
63466 + }
63467 +
63468 + /* allocate log buffers */
63469 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
63470 + if (!gr_alert_log_fmt) {
63471 + panic("Unable to allocate grsecurity alert log format buffer");
63472 + return;
63473 + }
63474 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
63475 + if (!gr_audit_log_fmt) {
63476 + panic("Unable to allocate grsecurity audit log format buffer");
63477 + return;
63478 + }
63479 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
63480 + if (!gr_alert_log_buf) {
63481 + panic("Unable to allocate grsecurity alert log buffer");
63482 + return;
63483 + }
63484 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
63485 + if (!gr_audit_log_buf) {
63486 + panic("Unable to allocate grsecurity audit log buffer");
63487 + return;
63488 + }
63489 +
63490 + /* allocate memory for authentication structure */
63491 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
63492 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
63493 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
63494 +
63495 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
63496 + panic("Unable to allocate grsecurity authentication structure");
63497 + return;
63498 + }
63499 +
63500 +
63501 +#ifdef CONFIG_GRKERNSEC_IO
63502 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
63503 + grsec_disable_privio = 1;
63504 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
63505 + grsec_disable_privio = 1;
63506 +#else
63507 + grsec_disable_privio = 0;
63508 +#endif
63509 +#endif
63510 +
63511 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
63512 + /* for backward compatibility, tpe_invert always defaults to on if
63513 + enabled in the kernel
63514 + */
63515 + grsec_enable_tpe_invert = 1;
63516 +#endif
63517 +
63518 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
63519 +#ifndef CONFIG_GRKERNSEC_SYSCTL
63520 + grsec_lock = 1;
63521 +#endif
63522 +
63523 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
63524 + grsec_enable_audit_textrel = 1;
63525 +#endif
63526 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63527 + grsec_enable_log_rwxmaps = 1;
63528 +#endif
63529 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
63530 + grsec_enable_group = 1;
63531 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
63532 +#endif
63533 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
63534 + grsec_enable_chdir = 1;
63535 +#endif
63536 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
63537 + grsec_enable_harden_ptrace = 1;
63538 +#endif
63539 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63540 + grsec_enable_mount = 1;
63541 +#endif
63542 +#ifdef CONFIG_GRKERNSEC_LINK
63543 + grsec_enable_link = 1;
63544 +#endif
63545 +#ifdef CONFIG_GRKERNSEC_BRUTE
63546 + grsec_enable_brute = 1;
63547 +#endif
63548 +#ifdef CONFIG_GRKERNSEC_DMESG
63549 + grsec_enable_dmesg = 1;
63550 +#endif
63551 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
63552 + grsec_enable_blackhole = 1;
63553 + grsec_lastack_retries = 4;
63554 +#endif
63555 +#ifdef CONFIG_GRKERNSEC_FIFO
63556 + grsec_enable_fifo = 1;
63557 +#endif
63558 +#ifdef CONFIG_GRKERNSEC_EXECLOG
63559 + grsec_enable_execlog = 1;
63560 +#endif
63561 +#ifdef CONFIG_GRKERNSEC_SETXID
63562 + grsec_enable_setxid = 1;
63563 +#endif
63564 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
63565 + grsec_enable_ptrace_readexec = 1;
63566 +#endif
63567 +#ifdef CONFIG_GRKERNSEC_SIGNAL
63568 + grsec_enable_signal = 1;
63569 +#endif
63570 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
63571 + grsec_enable_forkfail = 1;
63572 +#endif
63573 +#ifdef CONFIG_GRKERNSEC_TIME
63574 + grsec_enable_time = 1;
63575 +#endif
63576 +#ifdef CONFIG_GRKERNSEC_RESLOG
63577 + grsec_resource_logging = 1;
63578 +#endif
63579 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63580 + grsec_enable_chroot_findtask = 1;
63581 +#endif
63582 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
63583 + grsec_enable_chroot_unix = 1;
63584 +#endif
63585 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
63586 + grsec_enable_chroot_mount = 1;
63587 +#endif
63588 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
63589 + grsec_enable_chroot_fchdir = 1;
63590 +#endif
63591 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
63592 + grsec_enable_chroot_shmat = 1;
63593 +#endif
63594 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
63595 + grsec_enable_audit_ptrace = 1;
63596 +#endif
63597 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
63598 + grsec_enable_chroot_double = 1;
63599 +#endif
63600 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
63601 + grsec_enable_chroot_pivot = 1;
63602 +#endif
63603 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
63604 + grsec_enable_chroot_chdir = 1;
63605 +#endif
63606 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
63607 + grsec_enable_chroot_chmod = 1;
63608 +#endif
63609 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
63610 + grsec_enable_chroot_mknod = 1;
63611 +#endif
63612 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
63613 + grsec_enable_chroot_nice = 1;
63614 +#endif
63615 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
63616 + grsec_enable_chroot_execlog = 1;
63617 +#endif
63618 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63619 + grsec_enable_chroot_caps = 1;
63620 +#endif
63621 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
63622 + grsec_enable_chroot_sysctl = 1;
63623 +#endif
63624 +#ifdef CONFIG_GRKERNSEC_TPE
63625 + grsec_enable_tpe = 1;
63626 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
63627 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
63628 + grsec_enable_tpe_all = 1;
63629 +#endif
63630 +#endif
63631 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
63632 + grsec_enable_socket_all = 1;
63633 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
63634 +#endif
63635 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
63636 + grsec_enable_socket_client = 1;
63637 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
63638 +#endif
63639 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
63640 + grsec_enable_socket_server = 1;
63641 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
63642 +#endif
63643 +#endif
63644 +
63645 + return;
63646 +}
63647 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
63648 new file mode 100644
63649 index 0000000..3efe141
63650 --- /dev/null
63651 +++ b/grsecurity/grsec_link.c
63652 @@ -0,0 +1,43 @@
63653 +#include <linux/kernel.h>
63654 +#include <linux/sched.h>
63655 +#include <linux/fs.h>
63656 +#include <linux/file.h>
63657 +#include <linux/grinternal.h>
63658 +
63659 +int
63660 +gr_handle_follow_link(const struct inode *parent,
63661 + const struct inode *inode,
63662 + const struct dentry *dentry, const struct vfsmount *mnt)
63663 +{
63664 +#ifdef CONFIG_GRKERNSEC_LINK
63665 + const struct cred *cred = current_cred();
63666 +
63667 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
63668 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
63669 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
63670 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
63671 + return -EACCES;
63672 + }
63673 +#endif
63674 + return 0;
63675 +}
63676 +
63677 +int
63678 +gr_handle_hardlink(const struct dentry *dentry,
63679 + const struct vfsmount *mnt,
63680 + struct inode *inode, const int mode, const char *to)
63681 +{
63682 +#ifdef CONFIG_GRKERNSEC_LINK
63683 + const struct cred *cred = current_cred();
63684 +
63685 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
63686 + (!S_ISREG(mode) || (mode & S_ISUID) ||
63687 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
63688 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
63689 + !capable(CAP_FOWNER) && cred->uid) {
63690 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
63691 + return -EPERM;
63692 + }
63693 +#endif
63694 + return 0;
63695 +}
63696 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
63697 new file mode 100644
63698 index 0000000..a45d2e9
63699 --- /dev/null
63700 +++ b/grsecurity/grsec_log.c
63701 @@ -0,0 +1,322 @@
63702 +#include <linux/kernel.h>
63703 +#include <linux/sched.h>
63704 +#include <linux/file.h>
63705 +#include <linux/tty.h>
63706 +#include <linux/fs.h>
63707 +#include <linux/grinternal.h>
63708 +
63709 +#ifdef CONFIG_TREE_PREEMPT_RCU
63710 +#define DISABLE_PREEMPT() preempt_disable()
63711 +#define ENABLE_PREEMPT() preempt_enable()
63712 +#else
63713 +#define DISABLE_PREEMPT()
63714 +#define ENABLE_PREEMPT()
63715 +#endif
63716 +
63717 +#define BEGIN_LOCKS(x) \
63718 + DISABLE_PREEMPT(); \
63719 + rcu_read_lock(); \
63720 + read_lock(&tasklist_lock); \
63721 + read_lock(&grsec_exec_file_lock); \
63722 + if (x != GR_DO_AUDIT) \
63723 + spin_lock(&grsec_alert_lock); \
63724 + else \
63725 + spin_lock(&grsec_audit_lock)
63726 +
63727 +#define END_LOCKS(x) \
63728 + if (x != GR_DO_AUDIT) \
63729 + spin_unlock(&grsec_alert_lock); \
63730 + else \
63731 + spin_unlock(&grsec_audit_lock); \
63732 + read_unlock(&grsec_exec_file_lock); \
63733 + read_unlock(&tasklist_lock); \
63734 + rcu_read_unlock(); \
63735 + ENABLE_PREEMPT(); \
63736 + if (x == GR_DONT_AUDIT) \
63737 + gr_handle_alertkill(current)
63738 +
63739 +enum {
63740 + FLOODING,
63741 + NO_FLOODING
63742 +};
63743 +
63744 +extern char *gr_alert_log_fmt;
63745 +extern char *gr_audit_log_fmt;
63746 +extern char *gr_alert_log_buf;
63747 +extern char *gr_audit_log_buf;
63748 +
63749 +static int gr_log_start(int audit)
63750 +{
63751 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
63752 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
63753 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63754 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
63755 + unsigned long curr_secs = get_seconds();
63756 +
63757 + if (audit == GR_DO_AUDIT)
63758 + goto set_fmt;
63759 +
63760 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
63761 + grsec_alert_wtime = curr_secs;
63762 + grsec_alert_fyet = 0;
63763 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
63764 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
63765 + grsec_alert_fyet++;
63766 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
63767 + grsec_alert_wtime = curr_secs;
63768 + grsec_alert_fyet++;
63769 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
63770 + return FLOODING;
63771 + }
63772 + else return FLOODING;
63773 +
63774 +set_fmt:
63775 +#endif
63776 + memset(buf, 0, PAGE_SIZE);
63777 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
63778 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
63779 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
63780 + } else if (current->signal->curr_ip) {
63781 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
63782 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
63783 + } else if (gr_acl_is_enabled()) {
63784 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
63785 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
63786 + } else {
63787 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
63788 + strcpy(buf, fmt);
63789 + }
63790 +
63791 + return NO_FLOODING;
63792 +}
63793 +
63794 +static void gr_log_middle(int audit, const char *msg, va_list ap)
63795 + __attribute__ ((format (printf, 2, 0)));
63796 +
63797 +static void gr_log_middle(int audit, const char *msg, va_list ap)
63798 +{
63799 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63800 + unsigned int len = strlen(buf);
63801 +
63802 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
63803 +
63804 + return;
63805 +}
63806 +
63807 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
63808 + __attribute__ ((format (printf, 2, 3)));
63809 +
63810 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
63811 +{
63812 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63813 + unsigned int len = strlen(buf);
63814 + va_list ap;
63815 +
63816 + va_start(ap, msg);
63817 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
63818 + va_end(ap);
63819 +
63820 + return;
63821 +}
63822 +
63823 +static void gr_log_end(int audit, int append_default)
63824 +{
63825 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63826 +
63827 + if (append_default) {
63828 + unsigned int len = strlen(buf);
63829 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
63830 + }
63831 +
63832 + printk("%s\n", buf);
63833 +
63834 + return;
63835 +}
63836 +
63837 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
63838 +{
63839 + int logtype;
63840 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
63841 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
63842 + void *voidptr = NULL;
63843 + int num1 = 0, num2 = 0;
63844 + unsigned long ulong1 = 0, ulong2 = 0;
63845 + struct dentry *dentry = NULL;
63846 + struct vfsmount *mnt = NULL;
63847 + struct file *file = NULL;
63848 + struct task_struct *task = NULL;
63849 + const struct cred *cred, *pcred;
63850 + va_list ap;
63851 +
63852 + BEGIN_LOCKS(audit);
63853 + logtype = gr_log_start(audit);
63854 + if (logtype == FLOODING) {
63855 + END_LOCKS(audit);
63856 + return;
63857 + }
63858 + va_start(ap, argtypes);
63859 + switch (argtypes) {
63860 + case GR_TTYSNIFF:
63861 + task = va_arg(ap, struct task_struct *);
63862 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
63863 + break;
63864 + case GR_SYSCTL_HIDDEN:
63865 + str1 = va_arg(ap, char *);
63866 + gr_log_middle_varargs(audit, msg, result, str1);
63867 + break;
63868 + case GR_RBAC:
63869 + dentry = va_arg(ap, struct dentry *);
63870 + mnt = va_arg(ap, struct vfsmount *);
63871 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
63872 + break;
63873 + case GR_RBAC_STR:
63874 + dentry = va_arg(ap, struct dentry *);
63875 + mnt = va_arg(ap, struct vfsmount *);
63876 + str1 = va_arg(ap, char *);
63877 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
63878 + break;
63879 + case GR_STR_RBAC:
63880 + str1 = va_arg(ap, char *);
63881 + dentry = va_arg(ap, struct dentry *);
63882 + mnt = va_arg(ap, struct vfsmount *);
63883 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
63884 + break;
63885 + case GR_RBAC_MODE2:
63886 + dentry = va_arg(ap, struct dentry *);
63887 + mnt = va_arg(ap, struct vfsmount *);
63888 + str1 = va_arg(ap, char *);
63889 + str2 = va_arg(ap, char *);
63890 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
63891 + break;
63892 + case GR_RBAC_MODE3:
63893 + dentry = va_arg(ap, struct dentry *);
63894 + mnt = va_arg(ap, struct vfsmount *);
63895 + str1 = va_arg(ap, char *);
63896 + str2 = va_arg(ap, char *);
63897 + str3 = va_arg(ap, char *);
63898 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
63899 + break;
63900 + case GR_FILENAME:
63901 + dentry = va_arg(ap, struct dentry *);
63902 + mnt = va_arg(ap, struct vfsmount *);
63903 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
63904 + break;
63905 + case GR_STR_FILENAME:
63906 + str1 = va_arg(ap, char *);
63907 + dentry = va_arg(ap, struct dentry *);
63908 + mnt = va_arg(ap, struct vfsmount *);
63909 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
63910 + break;
63911 + case GR_FILENAME_STR:
63912 + dentry = va_arg(ap, struct dentry *);
63913 + mnt = va_arg(ap, struct vfsmount *);
63914 + str1 = va_arg(ap, char *);
63915 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
63916 + break;
63917 + case GR_FILENAME_TWO_INT:
63918 + dentry = va_arg(ap, struct dentry *);
63919 + mnt = va_arg(ap, struct vfsmount *);
63920 + num1 = va_arg(ap, int);
63921 + num2 = va_arg(ap, int);
63922 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
63923 + break;
63924 + case GR_FILENAME_TWO_INT_STR:
63925 + dentry = va_arg(ap, struct dentry *);
63926 + mnt = va_arg(ap, struct vfsmount *);
63927 + num1 = va_arg(ap, int);
63928 + num2 = va_arg(ap, int);
63929 + str1 = va_arg(ap, char *);
63930 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
63931 + break;
63932 + case GR_TEXTREL:
63933 + file = va_arg(ap, struct file *);
63934 + ulong1 = va_arg(ap, unsigned long);
63935 + ulong2 = va_arg(ap, unsigned long);
63936 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
63937 + break;
63938 + case GR_PTRACE:
63939 + task = va_arg(ap, struct task_struct *);
63940 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
63941 + break;
63942 + case GR_RESOURCE:
63943 + task = va_arg(ap, struct task_struct *);
63944 + cred = __task_cred(task);
63945 + pcred = __task_cred(task->real_parent);
63946 + ulong1 = va_arg(ap, unsigned long);
63947 + str1 = va_arg(ap, char *);
63948 + ulong2 = va_arg(ap, unsigned long);
63949 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63950 + break;
63951 + case GR_CAP:
63952 + task = va_arg(ap, struct task_struct *);
63953 + cred = __task_cred(task);
63954 + pcred = __task_cred(task->real_parent);
63955 + str1 = va_arg(ap, char *);
63956 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63957 + break;
63958 + case GR_SIG:
63959 + str1 = va_arg(ap, char *);
63960 + voidptr = va_arg(ap, void *);
63961 + gr_log_middle_varargs(audit, msg, str1, voidptr);
63962 + break;
63963 + case GR_SIG2:
63964 + task = va_arg(ap, struct task_struct *);
63965 + cred = __task_cred(task);
63966 + pcred = __task_cred(task->real_parent);
63967 + num1 = va_arg(ap, int);
63968 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63969 + break;
63970 + case GR_CRASH1:
63971 + task = va_arg(ap, struct task_struct *);
63972 + cred = __task_cred(task);
63973 + pcred = __task_cred(task->real_parent);
63974 + ulong1 = va_arg(ap, unsigned long);
63975 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
63976 + break;
63977 + case GR_CRASH2:
63978 + task = va_arg(ap, struct task_struct *);
63979 + cred = __task_cred(task);
63980 + pcred = __task_cred(task->real_parent);
63981 + ulong1 = va_arg(ap, unsigned long);
63982 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
63983 + break;
63984 + case GR_RWXMAP:
63985 + file = va_arg(ap, struct file *);
63986 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
63987 + break;
63988 + case GR_PSACCT:
63989 + {
63990 + unsigned int wday, cday;
63991 + __u8 whr, chr;
63992 + __u8 wmin, cmin;
63993 + __u8 wsec, csec;
63994 + char cur_tty[64] = { 0 };
63995 + char parent_tty[64] = { 0 };
63996 +
63997 + task = va_arg(ap, struct task_struct *);
63998 + wday = va_arg(ap, unsigned int);
63999 + cday = va_arg(ap, unsigned int);
64000 + whr = va_arg(ap, int);
64001 + chr = va_arg(ap, int);
64002 + wmin = va_arg(ap, int);
64003 + cmin = va_arg(ap, int);
64004 + wsec = va_arg(ap, int);
64005 + csec = va_arg(ap, int);
64006 + ulong1 = va_arg(ap, unsigned long);
64007 + cred = __task_cred(task);
64008 + pcred = __task_cred(task->real_parent);
64009 +
64010 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
64011 + }
64012 + break;
64013 + default:
64014 + gr_log_middle(audit, msg, ap);
64015 + }
64016 + va_end(ap);
64017 + // these don't need DEFAULTSECARGS printed on the end
64018 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
64019 + gr_log_end(audit, 0);
64020 + else
64021 + gr_log_end(audit, 1);
64022 + END_LOCKS(audit);
64023 +}
64024 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
64025 new file mode 100644
64026 index 0000000..f536303
64027 --- /dev/null
64028 +++ b/grsecurity/grsec_mem.c
64029 @@ -0,0 +1,40 @@
64030 +#include <linux/kernel.h>
64031 +#include <linux/sched.h>
64032 +#include <linux/mm.h>
64033 +#include <linux/mman.h>
64034 +#include <linux/grinternal.h>
64035 +
64036 +void
64037 +gr_handle_ioperm(void)
64038 +{
64039 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
64040 + return;
64041 +}
64042 +
64043 +void
64044 +gr_handle_iopl(void)
64045 +{
64046 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
64047 + return;
64048 +}
64049 +
64050 +void
64051 +gr_handle_mem_readwrite(u64 from, u64 to)
64052 +{
64053 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
64054 + return;
64055 +}
64056 +
64057 +void
64058 +gr_handle_vm86(void)
64059 +{
64060 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
64061 + return;
64062 +}
64063 +
64064 +void
64065 +gr_log_badprocpid(const char *entry)
64066 +{
64067 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
64068 + return;
64069 +}
64070 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
64071 new file mode 100644
64072 index 0000000..2131422
64073 --- /dev/null
64074 +++ b/grsecurity/grsec_mount.c
64075 @@ -0,0 +1,62 @@
64076 +#include <linux/kernel.h>
64077 +#include <linux/sched.h>
64078 +#include <linux/mount.h>
64079 +#include <linux/grsecurity.h>
64080 +#include <linux/grinternal.h>
64081 +
64082 +void
64083 +gr_log_remount(const char *devname, const int retval)
64084 +{
64085 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64086 + if (grsec_enable_mount && (retval >= 0))
64087 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
64088 +#endif
64089 + return;
64090 +}
64091 +
64092 +void
64093 +gr_log_unmount(const char *devname, const int retval)
64094 +{
64095 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64096 + if (grsec_enable_mount && (retval >= 0))
64097 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
64098 +#endif
64099 + return;
64100 +}
64101 +
64102 +void
64103 +gr_log_mount(const char *from, const char *to, const int retval)
64104 +{
64105 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64106 + if (grsec_enable_mount && (retval >= 0))
64107 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
64108 +#endif
64109 + return;
64110 +}
64111 +
64112 +int
64113 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
64114 +{
64115 +#ifdef CONFIG_GRKERNSEC_ROFS
64116 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
64117 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
64118 + return -EPERM;
64119 + } else
64120 + return 0;
64121 +#endif
64122 + return 0;
64123 +}
64124 +
64125 +int
64126 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
64127 +{
64128 +#ifdef CONFIG_GRKERNSEC_ROFS
64129 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
64130 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
64131 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
64132 + return -EPERM;
64133 + } else
64134 + return 0;
64135 +#endif
64136 + return 0;
64137 +}
64138 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
64139 new file mode 100644
64140 index 0000000..a3b12a0
64141 --- /dev/null
64142 +++ b/grsecurity/grsec_pax.c
64143 @@ -0,0 +1,36 @@
64144 +#include <linux/kernel.h>
64145 +#include <linux/sched.h>
64146 +#include <linux/mm.h>
64147 +#include <linux/file.h>
64148 +#include <linux/grinternal.h>
64149 +#include <linux/grsecurity.h>
64150 +
64151 +void
64152 +gr_log_textrel(struct vm_area_struct * vma)
64153 +{
64154 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
64155 + if (grsec_enable_audit_textrel)
64156 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
64157 +#endif
64158 + return;
64159 +}
64160 +
64161 +void
64162 +gr_log_rwxmmap(struct file *file)
64163 +{
64164 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64165 + if (grsec_enable_log_rwxmaps)
64166 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
64167 +#endif
64168 + return;
64169 +}
64170 +
64171 +void
64172 +gr_log_rwxmprotect(struct file *file)
64173 +{
64174 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64175 + if (grsec_enable_log_rwxmaps)
64176 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
64177 +#endif
64178 + return;
64179 +}
64180 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
64181 new file mode 100644
64182 index 0000000..78f8733
64183 --- /dev/null
64184 +++ b/grsecurity/grsec_ptrace.c
64185 @@ -0,0 +1,30 @@
64186 +#include <linux/kernel.h>
64187 +#include <linux/sched.h>
64188 +#include <linux/grinternal.h>
64189 +#include <linux/security.h>
64190 +
64191 +void
64192 +gr_audit_ptrace(struct task_struct *task)
64193 +{
64194 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
64195 + if (grsec_enable_audit_ptrace)
64196 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
64197 +#endif
64198 + return;
64199 +}
64200 +
64201 +int
64202 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
64203 +{
64204 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
64205 + const struct dentry *dentry = file->f_path.dentry;
64206 + const struct vfsmount *mnt = file->f_path.mnt;
64207 +
64208 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
64209 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
64210 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
64211 + return -EACCES;
64212 + }
64213 +#endif
64214 + return 0;
64215 +}
64216 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
64217 new file mode 100644
64218 index 0000000..c648492
64219 --- /dev/null
64220 +++ b/grsecurity/grsec_sig.c
64221 @@ -0,0 +1,206 @@
64222 +#include <linux/kernel.h>
64223 +#include <linux/sched.h>
64224 +#include <linux/delay.h>
64225 +#include <linux/grsecurity.h>
64226 +#include <linux/grinternal.h>
64227 +#include <linux/hardirq.h>
64228 +
64229 +char *signames[] = {
64230 + [SIGSEGV] = "Segmentation fault",
64231 + [SIGILL] = "Illegal instruction",
64232 + [SIGABRT] = "Abort",
64233 + [SIGBUS] = "Invalid alignment/Bus error"
64234 +};
64235 +
64236 +void
64237 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
64238 +{
64239 +#ifdef CONFIG_GRKERNSEC_SIGNAL
64240 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
64241 + (sig == SIGABRT) || (sig == SIGBUS))) {
64242 + if (t->pid == current->pid) {
64243 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
64244 + } else {
64245 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
64246 + }
64247 + }
64248 +#endif
64249 + return;
64250 +}
64251 +
64252 +int
64253 +gr_handle_signal(const struct task_struct *p, const int sig)
64254 +{
64255 +#ifdef CONFIG_GRKERNSEC
64256 + /* ignore the 0 signal for protected task checks */
64257 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
64258 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
64259 + return -EPERM;
64260 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
64261 + return -EPERM;
64262 + }
64263 +#endif
64264 + return 0;
64265 +}
64266 +
64267 +#ifdef CONFIG_GRKERNSEC
64268 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
64269 +
64270 +int gr_fake_force_sig(int sig, struct task_struct *t)
64271 +{
64272 + unsigned long int flags;
64273 + int ret, blocked, ignored;
64274 + struct k_sigaction *action;
64275 +
64276 + spin_lock_irqsave(&t->sighand->siglock, flags);
64277 + action = &t->sighand->action[sig-1];
64278 + ignored = action->sa.sa_handler == SIG_IGN;
64279 + blocked = sigismember(&t->blocked, sig);
64280 + if (blocked || ignored) {
64281 + action->sa.sa_handler = SIG_DFL;
64282 + if (blocked) {
64283 + sigdelset(&t->blocked, sig);
64284 + recalc_sigpending_and_wake(t);
64285 + }
64286 + }
64287 + if (action->sa.sa_handler == SIG_DFL)
64288 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
64289 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
64290 +
64291 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
64292 +
64293 + return ret;
64294 +}
64295 +#endif
64296 +
64297 +#ifdef CONFIG_GRKERNSEC_BRUTE
64298 +#define GR_USER_BAN_TIME (15 * 60)
64299 +
64300 +static int __get_dumpable(unsigned long mm_flags)
64301 +{
64302 + int ret;
64303 +
64304 + ret = mm_flags & MMF_DUMPABLE_MASK;
64305 + return (ret >= 2) ? 2 : ret;
64306 +}
64307 +#endif
64308 +
64309 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
64310 +{
64311 +#ifdef CONFIG_GRKERNSEC_BRUTE
64312 + uid_t uid = 0;
64313 +
64314 + if (!grsec_enable_brute)
64315 + return;
64316 +
64317 + rcu_read_lock();
64318 + read_lock(&tasklist_lock);
64319 + read_lock(&grsec_exec_file_lock);
64320 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
64321 + p->real_parent->brute = 1;
64322 + else {
64323 + const struct cred *cred = __task_cred(p), *cred2;
64324 + struct task_struct *tsk, *tsk2;
64325 +
64326 + if (!__get_dumpable(mm_flags) && cred->uid) {
64327 + struct user_struct *user;
64328 +
64329 + uid = cred->uid;
64330 +
64331 + /* this is put upon execution past expiration */
64332 + user = find_user(uid);
64333 + if (user == NULL)
64334 + goto unlock;
64335 + user->banned = 1;
64336 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
64337 + if (user->ban_expires == ~0UL)
64338 + user->ban_expires--;
64339 +
64340 + do_each_thread(tsk2, tsk) {
64341 + cred2 = __task_cred(tsk);
64342 + if (tsk != p && cred2->uid == uid)
64343 + gr_fake_force_sig(SIGKILL, tsk);
64344 + } while_each_thread(tsk2, tsk);
64345 + }
64346 + }
64347 +unlock:
64348 + read_unlock(&grsec_exec_file_lock);
64349 + read_unlock(&tasklist_lock);
64350 + rcu_read_unlock();
64351 +
64352 + if (uid)
64353 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
64354 +#endif
64355 + return;
64356 +}
64357 +
64358 +void gr_handle_brute_check(void)
64359 +{
64360 +#ifdef CONFIG_GRKERNSEC_BRUTE
64361 + if (current->brute)
64362 + msleep(30 * 1000);
64363 +#endif
64364 + return;
64365 +}
64366 +
64367 +void gr_handle_kernel_exploit(void)
64368 +{
64369 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
64370 + const struct cred *cred;
64371 + struct task_struct *tsk, *tsk2;
64372 + struct user_struct *user;
64373 + uid_t uid;
64374 +
64375 + if (in_irq() || in_serving_softirq() || in_nmi())
64376 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
64377 +
64378 + uid = current_uid();
64379 +
64380 + if (uid == 0)
64381 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
64382 + else {
64383 + /* kill all the processes of this user, hold a reference
64384 + to their creds struct, and prevent them from creating
64385 + another process until system reset
64386 + */
64387 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
64388 + /* we intentionally leak this ref */
64389 + user = get_uid(current->cred->user);
64390 + if (user) {
64391 + user->banned = 1;
64392 + user->ban_expires = ~0UL;
64393 + }
64394 +
64395 + read_lock(&tasklist_lock);
64396 + do_each_thread(tsk2, tsk) {
64397 + cred = __task_cred(tsk);
64398 + if (cred->uid == uid)
64399 + gr_fake_force_sig(SIGKILL, tsk);
64400 + } while_each_thread(tsk2, tsk);
64401 + read_unlock(&tasklist_lock);
64402 + }
64403 +#endif
64404 +}
64405 +
64406 +int __gr_process_user_ban(struct user_struct *user)
64407 +{
64408 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
64409 + if (unlikely(user->banned)) {
64410 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
64411 + user->banned = 0;
64412 + user->ban_expires = 0;
64413 + free_uid(user);
64414 + } else
64415 + return -EPERM;
64416 + }
64417 +#endif
64418 + return 0;
64419 +}
64420 +
64421 +int gr_process_user_ban(void)
64422 +{
64423 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
64424 + return __gr_process_user_ban(current->cred->user);
64425 +#endif
64426 + return 0;
64427 +}
64428 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
64429 new file mode 100644
64430 index 0000000..7512ea9
64431 --- /dev/null
64432 +++ b/grsecurity/grsec_sock.c
64433 @@ -0,0 +1,275 @@
64434 +#include <linux/kernel.h>
64435 +#include <linux/module.h>
64436 +#include <linux/sched.h>
64437 +#include <linux/file.h>
64438 +#include <linux/net.h>
64439 +#include <linux/in.h>
64440 +#include <linux/ip.h>
64441 +#include <net/sock.h>
64442 +#include <net/inet_sock.h>
64443 +#include <linux/grsecurity.h>
64444 +#include <linux/grinternal.h>
64445 +#include <linux/gracl.h>
64446 +
64447 +kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
64448 +EXPORT_SYMBOL(gr_cap_rtnetlink);
64449 +
64450 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
64451 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
64452 +
64453 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
64454 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
64455 +
64456 +#ifdef CONFIG_UNIX_MODULE
64457 +EXPORT_SYMBOL(gr_acl_handle_unix);
64458 +EXPORT_SYMBOL(gr_acl_handle_mknod);
64459 +EXPORT_SYMBOL(gr_handle_chroot_unix);
64460 +EXPORT_SYMBOL(gr_handle_create);
64461 +#endif
64462 +
64463 +#ifdef CONFIG_GRKERNSEC
64464 +#define gr_conn_table_size 32749
64465 +struct conn_table_entry {
64466 + struct conn_table_entry *next;
64467 + struct signal_struct *sig;
64468 +};
64469 +
64470 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
64471 +DEFINE_SPINLOCK(gr_conn_table_lock);
64472 +
64473 +extern const char * gr_socktype_to_name(unsigned char type);
64474 +extern const char * gr_proto_to_name(unsigned char proto);
64475 +extern const char * gr_sockfamily_to_name(unsigned char family);
64476 +
64477 +static __inline__ int
64478 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
64479 +{
64480 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
64481 +}
64482 +
64483 +static __inline__ int
64484 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
64485 + __u16 sport, __u16 dport)
64486 +{
64487 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
64488 + sig->gr_sport == sport && sig->gr_dport == dport))
64489 + return 1;
64490 + else
64491 + return 0;
64492 +}
64493 +
64494 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
64495 +{
64496 + struct conn_table_entry **match;
64497 + unsigned int index;
64498 +
64499 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
64500 + sig->gr_sport, sig->gr_dport,
64501 + gr_conn_table_size);
64502 +
64503 + newent->sig = sig;
64504 +
64505 + match = &gr_conn_table[index];
64506 + newent->next = *match;
64507 + *match = newent;
64508 +
64509 + return;
64510 +}
64511 +
64512 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
64513 +{
64514 + struct conn_table_entry *match, *last = NULL;
64515 + unsigned int index;
64516 +
64517 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
64518 + sig->gr_sport, sig->gr_dport,
64519 + gr_conn_table_size);
64520 +
64521 + match = gr_conn_table[index];
64522 + while (match && !conn_match(match->sig,
64523 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
64524 + sig->gr_dport)) {
64525 + last = match;
64526 + match = match->next;
64527 + }
64528 +
64529 + if (match) {
64530 + if (last)
64531 + last->next = match->next;
64532 + else
64533 + gr_conn_table[index] = NULL;
64534 + kfree(match);
64535 + }
64536 +
64537 + return;
64538 +}
64539 +
64540 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
64541 + __u16 sport, __u16 dport)
64542 +{
64543 + struct conn_table_entry *match;
64544 + unsigned int index;
64545 +
64546 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
64547 +
64548 + match = gr_conn_table[index];
64549 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
64550 + match = match->next;
64551 +
64552 + if (match)
64553 + return match->sig;
64554 + else
64555 + return NULL;
64556 +}
64557 +
64558 +#endif
64559 +
64560 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
64561 +{
64562 +#ifdef CONFIG_GRKERNSEC
64563 + struct signal_struct *sig = task->signal;
64564 + struct conn_table_entry *newent;
64565 +
64566 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
64567 + if (newent == NULL)
64568 + return;
64569 + /* no bh lock needed since we are called with bh disabled */
64570 + spin_lock(&gr_conn_table_lock);
64571 + gr_del_task_from_ip_table_nolock(sig);
64572 + sig->gr_saddr = inet->rcv_saddr;
64573 + sig->gr_daddr = inet->daddr;
64574 + sig->gr_sport = inet->sport;
64575 + sig->gr_dport = inet->dport;
64576 + gr_add_to_task_ip_table_nolock(sig, newent);
64577 + spin_unlock(&gr_conn_table_lock);
64578 +#endif
64579 + return;
64580 +}
64581 +
64582 +void gr_del_task_from_ip_table(struct task_struct *task)
64583 +{
64584 +#ifdef CONFIG_GRKERNSEC
64585 + spin_lock_bh(&gr_conn_table_lock);
64586 + gr_del_task_from_ip_table_nolock(task->signal);
64587 + spin_unlock_bh(&gr_conn_table_lock);
64588 +#endif
64589 + return;
64590 +}
64591 +
64592 +void
64593 +gr_attach_curr_ip(const struct sock *sk)
64594 +{
64595 +#ifdef CONFIG_GRKERNSEC
64596 + struct signal_struct *p, *set;
64597 + const struct inet_sock *inet = inet_sk(sk);
64598 +
64599 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
64600 + return;
64601 +
64602 + set = current->signal;
64603 +
64604 + spin_lock_bh(&gr_conn_table_lock);
64605 + p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
64606 + inet->dport, inet->sport);
64607 + if (unlikely(p != NULL)) {
64608 + set->curr_ip = p->curr_ip;
64609 + set->used_accept = 1;
64610 + gr_del_task_from_ip_table_nolock(p);
64611 + spin_unlock_bh(&gr_conn_table_lock);
64612 + return;
64613 + }
64614 + spin_unlock_bh(&gr_conn_table_lock);
64615 +
64616 + set->curr_ip = inet->daddr;
64617 + set->used_accept = 1;
64618 +#endif
64619 + return;
64620 +}
64621 +
64622 +int
64623 +gr_handle_sock_all(const int family, const int type, const int protocol)
64624 +{
64625 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
64626 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
64627 + (family != AF_UNIX)) {
64628 + if (family == AF_INET)
64629 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
64630 + else
64631 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
64632 + return -EACCES;
64633 + }
64634 +#endif
64635 + return 0;
64636 +}
64637 +
64638 +int
64639 +gr_handle_sock_server(const struct sockaddr *sck)
64640 +{
64641 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64642 + if (grsec_enable_socket_server &&
64643 + in_group_p(grsec_socket_server_gid) &&
64644 + sck && (sck->sa_family != AF_UNIX) &&
64645 + (sck->sa_family != AF_LOCAL)) {
64646 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
64647 + return -EACCES;
64648 + }
64649 +#endif
64650 + return 0;
64651 +}
64652 +
64653 +int
64654 +gr_handle_sock_server_other(const struct sock *sck)
64655 +{
64656 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64657 + if (grsec_enable_socket_server &&
64658 + in_group_p(grsec_socket_server_gid) &&
64659 + sck && (sck->sk_family != AF_UNIX) &&
64660 + (sck->sk_family != AF_LOCAL)) {
64661 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
64662 + return -EACCES;
64663 + }
64664 +#endif
64665 + return 0;
64666 +}
64667 +
64668 +int
64669 +gr_handle_sock_client(const struct sockaddr *sck)
64670 +{
64671 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
64672 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
64673 + sck && (sck->sa_family != AF_UNIX) &&
64674 + (sck->sa_family != AF_LOCAL)) {
64675 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
64676 + return -EACCES;
64677 + }
64678 +#endif
64679 + return 0;
64680 +}
64681 +
64682 +kernel_cap_t
64683 +gr_cap_rtnetlink(struct sock *sock)
64684 +{
64685 +#ifdef CONFIG_GRKERNSEC
64686 + if (!gr_acl_is_enabled())
64687 + return current_cap();
64688 + else if (sock->sk_protocol == NETLINK_ISCSI &&
64689 + cap_raised(current_cap(), CAP_SYS_ADMIN) &&
64690 + gr_is_capable(CAP_SYS_ADMIN))
64691 + return current_cap();
64692 + else if (sock->sk_protocol == NETLINK_AUDIT &&
64693 + cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
64694 + gr_is_capable(CAP_AUDIT_WRITE) &&
64695 + cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
64696 + gr_is_capable(CAP_AUDIT_CONTROL))
64697 + return current_cap();
64698 + else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
64699 + ((sock->sk_protocol == NETLINK_ROUTE) ?
64700 + gr_is_capable_nolog(CAP_NET_ADMIN) :
64701 + gr_is_capable(CAP_NET_ADMIN)))
64702 + return current_cap();
64703 + else
64704 + return __cap_empty_set;
64705 +#else
64706 + return current_cap();
64707 +#endif
64708 +}
64709 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
64710 new file mode 100644
64711 index 0000000..31f3258
64712 --- /dev/null
64713 +++ b/grsecurity/grsec_sysctl.c
64714 @@ -0,0 +1,499 @@
64715 +#include <linux/kernel.h>
64716 +#include <linux/sched.h>
64717 +#include <linux/sysctl.h>
64718 +#include <linux/grsecurity.h>
64719 +#include <linux/grinternal.h>
64720 +
64721 +int
64722 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
64723 +{
64724 +#ifdef CONFIG_GRKERNSEC_SYSCTL
64725 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
64726 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
64727 + return -EACCES;
64728 + }
64729 +#endif
64730 + return 0;
64731 +}
64732 +
64733 +#ifdef CONFIG_GRKERNSEC_ROFS
64734 +static int __maybe_unused one = 1;
64735 +#endif
64736 +
64737 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
64738 +ctl_table grsecurity_table[] = {
64739 +#ifdef CONFIG_GRKERNSEC_SYSCTL
64740 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
64741 +#ifdef CONFIG_GRKERNSEC_IO
64742 + {
64743 + .ctl_name = CTL_UNNUMBERED,
64744 + .procname = "disable_priv_io",
64745 + .data = &grsec_disable_privio,
64746 + .maxlen = sizeof(int),
64747 + .mode = 0600,
64748 + .proc_handler = &proc_dointvec,
64749 + },
64750 +#endif
64751 +#endif
64752 +#ifdef CONFIG_GRKERNSEC_LINK
64753 + {
64754 + .ctl_name = CTL_UNNUMBERED,
64755 + .procname = "linking_restrictions",
64756 + .data = &grsec_enable_link,
64757 + .maxlen = sizeof(int),
64758 + .mode = 0600,
64759 + .proc_handler = &proc_dointvec,
64760 + },
64761 +#endif
64762 +#ifdef CONFIG_GRKERNSEC_BRUTE
64763 + {
64764 + .ctl_name = CTL_UNNUMBERED,
64765 + .procname = "deter_bruteforce",
64766 + .data = &grsec_enable_brute,
64767 + .maxlen = sizeof(int),
64768 + .mode = 0600,
64769 + .proc_handler = &proc_dointvec,
64770 + },
64771 +#endif
64772 +#ifdef CONFIG_GRKERNSEC_FIFO
64773 + {
64774 + .ctl_name = CTL_UNNUMBERED,
64775 + .procname = "fifo_restrictions",
64776 + .data = &grsec_enable_fifo,
64777 + .maxlen = sizeof(int),
64778 + .mode = 0600,
64779 + .proc_handler = &proc_dointvec,
64780 + },
64781 +#endif
64782 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
64783 + {
64784 + .ctl_name = CTL_UNNUMBERED,
64785 + .procname = "ptrace_readexec",
64786 + .data = &grsec_enable_ptrace_readexec,
64787 + .maxlen = sizeof(int),
64788 + .mode = 0600,
64789 + .proc_handler = &proc_dointvec,
64790 + },
64791 +#endif
64792 +#ifdef CONFIG_GRKERNSEC_SETXID
64793 + {
64794 + .ctl_name = CTL_UNNUMBERED,
64795 + .procname = "consistent_setxid",
64796 + .data = &grsec_enable_setxid,
64797 + .maxlen = sizeof(int),
64798 + .mode = 0600,
64799 + .proc_handler = &proc_dointvec,
64800 + },
64801 +#endif
64802 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
64803 + {
64804 + .ctl_name = CTL_UNNUMBERED,
64805 + .procname = "ip_blackhole",
64806 + .data = &grsec_enable_blackhole,
64807 + .maxlen = sizeof(int),
64808 + .mode = 0600,
64809 + .proc_handler = &proc_dointvec,
64810 + },
64811 + {
64812 + .ctl_name = CTL_UNNUMBERED,
64813 + .procname = "lastack_retries",
64814 + .data = &grsec_lastack_retries,
64815 + .maxlen = sizeof(int),
64816 + .mode = 0600,
64817 + .proc_handler = &proc_dointvec,
64818 + },
64819 +#endif
64820 +#ifdef CONFIG_GRKERNSEC_EXECLOG
64821 + {
64822 + .ctl_name = CTL_UNNUMBERED,
64823 + .procname = "exec_logging",
64824 + .data = &grsec_enable_execlog,
64825 + .maxlen = sizeof(int),
64826 + .mode = 0600,
64827 + .proc_handler = &proc_dointvec,
64828 + },
64829 +#endif
64830 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64831 + {
64832 + .ctl_name = CTL_UNNUMBERED,
64833 + .procname = "rwxmap_logging",
64834 + .data = &grsec_enable_log_rwxmaps,
64835 + .maxlen = sizeof(int),
64836 + .mode = 0600,
64837 + .proc_handler = &proc_dointvec,
64838 + },
64839 +#endif
64840 +#ifdef CONFIG_GRKERNSEC_SIGNAL
64841 + {
64842 + .ctl_name = CTL_UNNUMBERED,
64843 + .procname = "signal_logging",
64844 + .data = &grsec_enable_signal,
64845 + .maxlen = sizeof(int),
64846 + .mode = 0600,
64847 + .proc_handler = &proc_dointvec,
64848 + },
64849 +#endif
64850 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
64851 + {
64852 + .ctl_name = CTL_UNNUMBERED,
64853 + .procname = "forkfail_logging",
64854 + .data = &grsec_enable_forkfail,
64855 + .maxlen = sizeof(int),
64856 + .mode = 0600,
64857 + .proc_handler = &proc_dointvec,
64858 + },
64859 +#endif
64860 +#ifdef CONFIG_GRKERNSEC_TIME
64861 + {
64862 + .ctl_name = CTL_UNNUMBERED,
64863 + .procname = "timechange_logging",
64864 + .data = &grsec_enable_time,
64865 + .maxlen = sizeof(int),
64866 + .mode = 0600,
64867 + .proc_handler = &proc_dointvec,
64868 + },
64869 +#endif
64870 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
64871 + {
64872 + .ctl_name = CTL_UNNUMBERED,
64873 + .procname = "chroot_deny_shmat",
64874 + .data = &grsec_enable_chroot_shmat,
64875 + .maxlen = sizeof(int),
64876 + .mode = 0600,
64877 + .proc_handler = &proc_dointvec,
64878 + },
64879 +#endif
64880 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
64881 + {
64882 + .ctl_name = CTL_UNNUMBERED,
64883 + .procname = "chroot_deny_unix",
64884 + .data = &grsec_enable_chroot_unix,
64885 + .maxlen = sizeof(int),
64886 + .mode = 0600,
64887 + .proc_handler = &proc_dointvec,
64888 + },
64889 +#endif
64890 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
64891 + {
64892 + .ctl_name = CTL_UNNUMBERED,
64893 + .procname = "chroot_deny_mount",
64894 + .data = &grsec_enable_chroot_mount,
64895 + .maxlen = sizeof(int),
64896 + .mode = 0600,
64897 + .proc_handler = &proc_dointvec,
64898 + },
64899 +#endif
64900 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
64901 + {
64902 + .ctl_name = CTL_UNNUMBERED,
64903 + .procname = "chroot_deny_fchdir",
64904 + .data = &grsec_enable_chroot_fchdir,
64905 + .maxlen = sizeof(int),
64906 + .mode = 0600,
64907 + .proc_handler = &proc_dointvec,
64908 + },
64909 +#endif
64910 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
64911 + {
64912 + .ctl_name = CTL_UNNUMBERED,
64913 + .procname = "chroot_deny_chroot",
64914 + .data = &grsec_enable_chroot_double,
64915 + .maxlen = sizeof(int),
64916 + .mode = 0600,
64917 + .proc_handler = &proc_dointvec,
64918 + },
64919 +#endif
64920 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
64921 + {
64922 + .ctl_name = CTL_UNNUMBERED,
64923 + .procname = "chroot_deny_pivot",
64924 + .data = &grsec_enable_chroot_pivot,
64925 + .maxlen = sizeof(int),
64926 + .mode = 0600,
64927 + .proc_handler = &proc_dointvec,
64928 + },
64929 +#endif
64930 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
64931 + {
64932 + .ctl_name = CTL_UNNUMBERED,
64933 + .procname = "chroot_enforce_chdir",
64934 + .data = &grsec_enable_chroot_chdir,
64935 + .maxlen = sizeof(int),
64936 + .mode = 0600,
64937 + .proc_handler = &proc_dointvec,
64938 + },
64939 +#endif
64940 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
64941 + {
64942 + .ctl_name = CTL_UNNUMBERED,
64943 + .procname = "chroot_deny_chmod",
64944 + .data = &grsec_enable_chroot_chmod,
64945 + .maxlen = sizeof(int),
64946 + .mode = 0600,
64947 + .proc_handler = &proc_dointvec,
64948 + },
64949 +#endif
64950 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
64951 + {
64952 + .ctl_name = CTL_UNNUMBERED,
64953 + .procname = "chroot_deny_mknod",
64954 + .data = &grsec_enable_chroot_mknod,
64955 + .maxlen = sizeof(int),
64956 + .mode = 0600,
64957 + .proc_handler = &proc_dointvec,
64958 + },
64959 +#endif
64960 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
64961 + {
64962 + .ctl_name = CTL_UNNUMBERED,
64963 + .procname = "chroot_restrict_nice",
64964 + .data = &grsec_enable_chroot_nice,
64965 + .maxlen = sizeof(int),
64966 + .mode = 0600,
64967 + .proc_handler = &proc_dointvec,
64968 + },
64969 +#endif
64970 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
64971 + {
64972 + .ctl_name = CTL_UNNUMBERED,
64973 + .procname = "chroot_execlog",
64974 + .data = &grsec_enable_chroot_execlog,
64975 + .maxlen = sizeof(int),
64976 + .mode = 0600,
64977 + .proc_handler = &proc_dointvec,
64978 + },
64979 +#endif
64980 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64981 + {
64982 + .ctl_name = CTL_UNNUMBERED,
64983 + .procname = "chroot_caps",
64984 + .data = &grsec_enable_chroot_caps,
64985 + .maxlen = sizeof(int),
64986 + .mode = 0600,
64987 + .proc_handler = &proc_dointvec,
64988 + },
64989 +#endif
64990 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
64991 + {
64992 + .ctl_name = CTL_UNNUMBERED,
64993 + .procname = "chroot_deny_sysctl",
64994 + .data = &grsec_enable_chroot_sysctl,
64995 + .maxlen = sizeof(int),
64996 + .mode = 0600,
64997 + .proc_handler = &proc_dointvec,
64998 + },
64999 +#endif
65000 +#ifdef CONFIG_GRKERNSEC_TPE
65001 + {
65002 + .ctl_name = CTL_UNNUMBERED,
65003 + .procname = "tpe",
65004 + .data = &grsec_enable_tpe,
65005 + .maxlen = sizeof(int),
65006 + .mode = 0600,
65007 + .proc_handler = &proc_dointvec,
65008 + },
65009 + {
65010 + .ctl_name = CTL_UNNUMBERED,
65011 + .procname = "tpe_gid",
65012 + .data = &grsec_tpe_gid,
65013 + .maxlen = sizeof(int),
65014 + .mode = 0600,
65015 + .proc_handler = &proc_dointvec,
65016 + },
65017 +#endif
65018 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
65019 + {
65020 + .ctl_name = CTL_UNNUMBERED,
65021 + .procname = "tpe_invert",
65022 + .data = &grsec_enable_tpe_invert,
65023 + .maxlen = sizeof(int),
65024 + .mode = 0600,
65025 + .proc_handler = &proc_dointvec,
65026 + },
65027 +#endif
65028 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
65029 + {
65030 + .ctl_name = CTL_UNNUMBERED,
65031 + .procname = "tpe_restrict_all",
65032 + .data = &grsec_enable_tpe_all,
65033 + .maxlen = sizeof(int),
65034 + .mode = 0600,
65035 + .proc_handler = &proc_dointvec,
65036 + },
65037 +#endif
65038 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
65039 + {
65040 + .ctl_name = CTL_UNNUMBERED,
65041 + .procname = "socket_all",
65042 + .data = &grsec_enable_socket_all,
65043 + .maxlen = sizeof(int),
65044 + .mode = 0600,
65045 + .proc_handler = &proc_dointvec,
65046 + },
65047 + {
65048 + .ctl_name = CTL_UNNUMBERED,
65049 + .procname = "socket_all_gid",
65050 + .data = &grsec_socket_all_gid,
65051 + .maxlen = sizeof(int),
65052 + .mode = 0600,
65053 + .proc_handler = &proc_dointvec,
65054 + },
65055 +#endif
65056 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
65057 + {
65058 + .ctl_name = CTL_UNNUMBERED,
65059 + .procname = "socket_client",
65060 + .data = &grsec_enable_socket_client,
65061 + .maxlen = sizeof(int),
65062 + .mode = 0600,
65063 + .proc_handler = &proc_dointvec,
65064 + },
65065 + {
65066 + .ctl_name = CTL_UNNUMBERED,
65067 + .procname = "socket_client_gid",
65068 + .data = &grsec_socket_client_gid,
65069 + .maxlen = sizeof(int),
65070 + .mode = 0600,
65071 + .proc_handler = &proc_dointvec,
65072 + },
65073 +#endif
65074 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
65075 + {
65076 + .ctl_name = CTL_UNNUMBERED,
65077 + .procname = "socket_server",
65078 + .data = &grsec_enable_socket_server,
65079 + .maxlen = sizeof(int),
65080 + .mode = 0600,
65081 + .proc_handler = &proc_dointvec,
65082 + },
65083 + {
65084 + .ctl_name = CTL_UNNUMBERED,
65085 + .procname = "socket_server_gid",
65086 + .data = &grsec_socket_server_gid,
65087 + .maxlen = sizeof(int),
65088 + .mode = 0600,
65089 + .proc_handler = &proc_dointvec,
65090 + },
65091 +#endif
65092 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
65093 + {
65094 + .ctl_name = CTL_UNNUMBERED,
65095 + .procname = "audit_group",
65096 + .data = &grsec_enable_group,
65097 + .maxlen = sizeof(int),
65098 + .mode = 0600,
65099 + .proc_handler = &proc_dointvec,
65100 + },
65101 + {
65102 + .ctl_name = CTL_UNNUMBERED,
65103 + .procname = "audit_gid",
65104 + .data = &grsec_audit_gid,
65105 + .maxlen = sizeof(int),
65106 + .mode = 0600,
65107 + .proc_handler = &proc_dointvec,
65108 + },
65109 +#endif
65110 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
65111 + {
65112 + .ctl_name = CTL_UNNUMBERED,
65113 + .procname = "audit_chdir",
65114 + .data = &grsec_enable_chdir,
65115 + .maxlen = sizeof(int),
65116 + .mode = 0600,
65117 + .proc_handler = &proc_dointvec,
65118 + },
65119 +#endif
65120 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65121 + {
65122 + .ctl_name = CTL_UNNUMBERED,
65123 + .procname = "audit_mount",
65124 + .data = &grsec_enable_mount,
65125 + .maxlen = sizeof(int),
65126 + .mode = 0600,
65127 + .proc_handler = &proc_dointvec,
65128 + },
65129 +#endif
65130 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
65131 + {
65132 + .ctl_name = CTL_UNNUMBERED,
65133 + .procname = "audit_textrel",
65134 + .data = &grsec_enable_audit_textrel,
65135 + .maxlen = sizeof(int),
65136 + .mode = 0600,
65137 + .proc_handler = &proc_dointvec,
65138 + },
65139 +#endif
65140 +#ifdef CONFIG_GRKERNSEC_DMESG
65141 + {
65142 + .ctl_name = CTL_UNNUMBERED,
65143 + .procname = "dmesg",
65144 + .data = &grsec_enable_dmesg,
65145 + .maxlen = sizeof(int),
65146 + .mode = 0600,
65147 + .proc_handler = &proc_dointvec,
65148 + },
65149 +#endif
65150 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65151 + {
65152 + .ctl_name = CTL_UNNUMBERED,
65153 + .procname = "chroot_findtask",
65154 + .data = &grsec_enable_chroot_findtask,
65155 + .maxlen = sizeof(int),
65156 + .mode = 0600,
65157 + .proc_handler = &proc_dointvec,
65158 + },
65159 +#endif
65160 +#ifdef CONFIG_GRKERNSEC_RESLOG
65161 + {
65162 + .ctl_name = CTL_UNNUMBERED,
65163 + .procname = "resource_logging",
65164 + .data = &grsec_resource_logging,
65165 + .maxlen = sizeof(int),
65166 + .mode = 0600,
65167 + .proc_handler = &proc_dointvec,
65168 + },
65169 +#endif
65170 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
65171 + {
65172 + .ctl_name = CTL_UNNUMBERED,
65173 + .procname = "audit_ptrace",
65174 + .data = &grsec_enable_audit_ptrace,
65175 + .maxlen = sizeof(int),
65176 + .mode = 0600,
65177 + .proc_handler = &proc_dointvec,
65178 + },
65179 +#endif
65180 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
65181 + {
65182 + .ctl_name = CTL_UNNUMBERED,
65183 + .procname = "harden_ptrace",
65184 + .data = &grsec_enable_harden_ptrace,
65185 + .maxlen = sizeof(int),
65186 + .mode = 0600,
65187 + .proc_handler = &proc_dointvec,
65188 + },
65189 +#endif
65190 + {
65191 + .ctl_name = CTL_UNNUMBERED,
65192 + .procname = "grsec_lock",
65193 + .data = &grsec_lock,
65194 + .maxlen = sizeof(int),
65195 + .mode = 0600,
65196 + .proc_handler = &proc_dointvec,
65197 + },
65198 +#endif
65199 +#ifdef CONFIG_GRKERNSEC_ROFS
65200 + {
65201 + .ctl_name = CTL_UNNUMBERED,
65202 + .procname = "romount_protect",
65203 + .data = &grsec_enable_rofs,
65204 + .maxlen = sizeof(int),
65205 + .mode = 0600,
65206 + .proc_handler = &proc_dointvec_minmax,
65207 + .extra1 = &one,
65208 + .extra2 = &one,
65209 + },
65210 +#endif
65211 + { .ctl_name = 0 }
65212 +};
65213 +#endif
65214 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
65215 new file mode 100644
65216 index 0000000..0dc13c3
65217 --- /dev/null
65218 +++ b/grsecurity/grsec_time.c
65219 @@ -0,0 +1,16 @@
65220 +#include <linux/kernel.h>
65221 +#include <linux/sched.h>
65222 +#include <linux/grinternal.h>
65223 +#include <linux/module.h>
65224 +
65225 +void
65226 +gr_log_timechange(void)
65227 +{
65228 +#ifdef CONFIG_GRKERNSEC_TIME
65229 + if (grsec_enable_time)
65230 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
65231 +#endif
65232 + return;
65233 +}
65234 +
65235 +EXPORT_SYMBOL(gr_log_timechange);
65236 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
65237 new file mode 100644
65238 index 0000000..07e0dc0
65239 --- /dev/null
65240 +++ b/grsecurity/grsec_tpe.c
65241 @@ -0,0 +1,73 @@
65242 +#include <linux/kernel.h>
65243 +#include <linux/sched.h>
65244 +#include <linux/file.h>
65245 +#include <linux/fs.h>
65246 +#include <linux/grinternal.h>
65247 +
65248 +extern int gr_acl_tpe_check(void);
65249 +
65250 +int
65251 +gr_tpe_allow(const struct file *file)
65252 +{
65253 +#ifdef CONFIG_GRKERNSEC
65254 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
65255 + const struct cred *cred = current_cred();
65256 + char *msg = NULL;
65257 + char *msg2 = NULL;
65258 +
65259 + // never restrict root
65260 + if (!cred->uid)
65261 + return 1;
65262 +
65263 + if (grsec_enable_tpe) {
65264 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
65265 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
65266 + msg = "not being in trusted group";
65267 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
65268 + msg = "being in untrusted group";
65269 +#else
65270 + if (in_group_p(grsec_tpe_gid))
65271 + msg = "being in untrusted group";
65272 +#endif
65273 + }
65274 + if (!msg && gr_acl_tpe_check())
65275 + msg = "being in untrusted role";
65276 +
65277 + // not in any affected group/role
65278 + if (!msg)
65279 + goto next_check;
65280 +
65281 + if (inode->i_uid)
65282 + msg2 = "file in non-root-owned directory";
65283 + else if (inode->i_mode & S_IWOTH)
65284 + msg2 = "file in world-writable directory";
65285 + else if (inode->i_mode & S_IWGRP)
65286 + msg2 = "file in group-writable directory";
65287 +
65288 + if (msg && msg2) {
65289 + char fullmsg[70] = {0};
65290 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
65291 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
65292 + return 0;
65293 + }
65294 + msg = NULL;
65295 +next_check:
65296 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
65297 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
65298 + return 1;
65299 +
65300 + if (inode->i_uid && (inode->i_uid != cred->uid))
65301 + msg = "directory not owned by user";
65302 + else if (inode->i_mode & S_IWOTH)
65303 + msg = "file in world-writable directory";
65304 + else if (inode->i_mode & S_IWGRP)
65305 + msg = "file in group-writable directory";
65306 +
65307 + if (msg) {
65308 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
65309 + return 0;
65310 + }
65311 +#endif
65312 +#endif
65313 + return 1;
65314 +}
65315 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
65316 new file mode 100644
65317 index 0000000..9f7b1ac
65318 --- /dev/null
65319 +++ b/grsecurity/grsum.c
65320 @@ -0,0 +1,61 @@
65321 +#include <linux/err.h>
65322 +#include <linux/kernel.h>
65323 +#include <linux/sched.h>
65324 +#include <linux/mm.h>
65325 +#include <linux/scatterlist.h>
65326 +#include <linux/crypto.h>
65327 +#include <linux/gracl.h>
65328 +
65329 +
65330 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
65331 +#error "crypto and sha256 must be built into the kernel"
65332 +#endif
65333 +
65334 +int
65335 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
65336 +{
65337 + char *p;
65338 + struct crypto_hash *tfm;
65339 + struct hash_desc desc;
65340 + struct scatterlist sg;
65341 + unsigned char temp_sum[GR_SHA_LEN];
65342 + volatile int retval = 0;
65343 + volatile int dummy = 0;
65344 + unsigned int i;
65345 +
65346 + sg_init_table(&sg, 1);
65347 +
65348 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
65349 + if (IS_ERR(tfm)) {
65350 + /* should never happen, since sha256 should be built in */
65351 + return 1;
65352 + }
65353 +
65354 + desc.tfm = tfm;
65355 + desc.flags = 0;
65356 +
65357 + crypto_hash_init(&desc);
65358 +
65359 + p = salt;
65360 + sg_set_buf(&sg, p, GR_SALT_LEN);
65361 + crypto_hash_update(&desc, &sg, sg.length);
65362 +
65363 + p = entry->pw;
65364 + sg_set_buf(&sg, p, strlen(p));
65365 +
65366 + crypto_hash_update(&desc, &sg, sg.length);
65367 +
65368 + crypto_hash_final(&desc, temp_sum);
65369 +
65370 + memset(entry->pw, 0, GR_PW_LEN);
65371 +
65372 + for (i = 0; i < GR_SHA_LEN; i++)
65373 + if (sum[i] != temp_sum[i])
65374 + retval = 1;
65375 + else
65376 + dummy = 1; // waste a cycle
65377 +
65378 + crypto_free_hash(tfm);
65379 +
65380 + return retval;
65381 +}
65382 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
65383 index 3cd9ccd..fe16d47 100644
65384 --- a/include/acpi/acpi_bus.h
65385 +++ b/include/acpi/acpi_bus.h
65386 @@ -107,7 +107,7 @@ struct acpi_device_ops {
65387 acpi_op_bind bind;
65388 acpi_op_unbind unbind;
65389 acpi_op_notify notify;
65390 -};
65391 +} __no_const;
65392
65393 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
65394
65395 diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
65396 index f4906f6..71feb73 100644
65397 --- a/include/acpi/acpi_drivers.h
65398 +++ b/include/acpi/acpi_drivers.h
65399 @@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acpi_handle handle, int type);
65400 Dock Station
65401 -------------------------------------------------------------------------- */
65402 struct acpi_dock_ops {
65403 - acpi_notify_handler handler;
65404 - acpi_notify_handler uevent;
65405 + const acpi_notify_handler handler;
65406 + const acpi_notify_handler uevent;
65407 };
65408
65409 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
65410 @@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle handle);
65411 extern int register_dock_notifier(struct notifier_block *nb);
65412 extern void unregister_dock_notifier(struct notifier_block *nb);
65413 extern int register_hotplug_dock_device(acpi_handle handle,
65414 - struct acpi_dock_ops *ops,
65415 + const struct acpi_dock_ops *ops,
65416 void *context);
65417 extern void unregister_hotplug_dock_device(acpi_handle handle);
65418 #else
65419 @@ -144,7 +144,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb)
65420 {
65421 }
65422 static inline int register_hotplug_dock_device(acpi_handle handle,
65423 - struct acpi_dock_ops *ops,
65424 + const struct acpi_dock_ops *ops,
65425 void *context)
65426 {
65427 return -ENODEV;
65428 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
65429 index b7babf0..a9ac9fc 100644
65430 --- a/include/asm-generic/atomic-long.h
65431 +++ b/include/asm-generic/atomic-long.h
65432 @@ -22,6 +22,12 @@
65433
65434 typedef atomic64_t atomic_long_t;
65435
65436 +#ifdef CONFIG_PAX_REFCOUNT
65437 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
65438 +#else
65439 +typedef atomic64_t atomic_long_unchecked_t;
65440 +#endif
65441 +
65442 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
65443
65444 static inline long atomic_long_read(atomic_long_t *l)
65445 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
65446 return (long)atomic64_read(v);
65447 }
65448
65449 +#ifdef CONFIG_PAX_REFCOUNT
65450 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
65451 +{
65452 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65453 +
65454 + return (long)atomic64_read_unchecked(v);
65455 +}
65456 +#endif
65457 +
65458 static inline void atomic_long_set(atomic_long_t *l, long i)
65459 {
65460 atomic64_t *v = (atomic64_t *)l;
65461 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
65462 atomic64_set(v, i);
65463 }
65464
65465 +#ifdef CONFIG_PAX_REFCOUNT
65466 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
65467 +{
65468 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65469 +
65470 + atomic64_set_unchecked(v, i);
65471 +}
65472 +#endif
65473 +
65474 static inline void atomic_long_inc(atomic_long_t *l)
65475 {
65476 atomic64_t *v = (atomic64_t *)l;
65477 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
65478 atomic64_inc(v);
65479 }
65480
65481 +#ifdef CONFIG_PAX_REFCOUNT
65482 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
65483 +{
65484 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65485 +
65486 + atomic64_inc_unchecked(v);
65487 +}
65488 +#endif
65489 +
65490 static inline void atomic_long_dec(atomic_long_t *l)
65491 {
65492 atomic64_t *v = (atomic64_t *)l;
65493 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
65494 atomic64_dec(v);
65495 }
65496
65497 +#ifdef CONFIG_PAX_REFCOUNT
65498 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
65499 +{
65500 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65501 +
65502 + atomic64_dec_unchecked(v);
65503 +}
65504 +#endif
65505 +
65506 static inline void atomic_long_add(long i, atomic_long_t *l)
65507 {
65508 atomic64_t *v = (atomic64_t *)l;
65509 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
65510 atomic64_add(i, v);
65511 }
65512
65513 +#ifdef CONFIG_PAX_REFCOUNT
65514 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
65515 +{
65516 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65517 +
65518 + atomic64_add_unchecked(i, v);
65519 +}
65520 +#endif
65521 +
65522 static inline void atomic_long_sub(long i, atomic_long_t *l)
65523 {
65524 atomic64_t *v = (atomic64_t *)l;
65525 @@ -115,6 +166,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
65526 return (long)atomic64_inc_return(v);
65527 }
65528
65529 +#ifdef CONFIG_PAX_REFCOUNT
65530 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
65531 +{
65532 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65533 +
65534 + return (long)atomic64_inc_return_unchecked(v);
65535 +}
65536 +#endif
65537 +
65538 static inline long atomic_long_dec_return(atomic_long_t *l)
65539 {
65540 atomic64_t *v = (atomic64_t *)l;
65541 @@ -140,6 +200,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
65542
65543 typedef atomic_t atomic_long_t;
65544
65545 +#ifdef CONFIG_PAX_REFCOUNT
65546 +typedef atomic_unchecked_t atomic_long_unchecked_t;
65547 +#else
65548 +typedef atomic_t atomic_long_unchecked_t;
65549 +#endif
65550 +
65551 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
65552 static inline long atomic_long_read(atomic_long_t *l)
65553 {
65554 @@ -148,6 +214,15 @@ static inline long atomic_long_read(atomic_long_t *l)
65555 return (long)atomic_read(v);
65556 }
65557
65558 +#ifdef CONFIG_PAX_REFCOUNT
65559 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
65560 +{
65561 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65562 +
65563 + return (long)atomic_read_unchecked(v);
65564 +}
65565 +#endif
65566 +
65567 static inline void atomic_long_set(atomic_long_t *l, long i)
65568 {
65569 atomic_t *v = (atomic_t *)l;
65570 @@ -155,6 +230,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
65571 atomic_set(v, i);
65572 }
65573
65574 +#ifdef CONFIG_PAX_REFCOUNT
65575 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
65576 +{
65577 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65578 +
65579 + atomic_set_unchecked(v, i);
65580 +}
65581 +#endif
65582 +
65583 static inline void atomic_long_inc(atomic_long_t *l)
65584 {
65585 atomic_t *v = (atomic_t *)l;
65586 @@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
65587 atomic_inc(v);
65588 }
65589
65590 +#ifdef CONFIG_PAX_REFCOUNT
65591 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
65592 +{
65593 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65594 +
65595 + atomic_inc_unchecked(v);
65596 +}
65597 +#endif
65598 +
65599 static inline void atomic_long_dec(atomic_long_t *l)
65600 {
65601 atomic_t *v = (atomic_t *)l;
65602 @@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
65603 atomic_dec(v);
65604 }
65605
65606 +#ifdef CONFIG_PAX_REFCOUNT
65607 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
65608 +{
65609 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65610 +
65611 + atomic_dec_unchecked(v);
65612 +}
65613 +#endif
65614 +
65615 static inline void atomic_long_add(long i, atomic_long_t *l)
65616 {
65617 atomic_t *v = (atomic_t *)l;
65618 @@ -176,6 +278,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
65619 atomic_add(i, v);
65620 }
65621
65622 +#ifdef CONFIG_PAX_REFCOUNT
65623 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
65624 +{
65625 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65626 +
65627 + atomic_add_unchecked(i, v);
65628 +}
65629 +#endif
65630 +
65631 static inline void atomic_long_sub(long i, atomic_long_t *l)
65632 {
65633 atomic_t *v = (atomic_t *)l;
65634 @@ -232,6 +343,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
65635 return (long)atomic_inc_return(v);
65636 }
65637
65638 +#ifdef CONFIG_PAX_REFCOUNT
65639 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
65640 +{
65641 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65642 +
65643 + return (long)atomic_inc_return_unchecked(v);
65644 +}
65645 +#endif
65646 +
65647 static inline long atomic_long_dec_return(atomic_long_t *l)
65648 {
65649 atomic_t *v = (atomic_t *)l;
65650 @@ -255,4 +375,47 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
65651
65652 #endif /* BITS_PER_LONG == 64 */
65653
65654 +#ifdef CONFIG_PAX_REFCOUNT
65655 +static inline void pax_refcount_needs_these_functions(void)
65656 +{
65657 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
65658 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
65659 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
65660 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
65661 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
65662 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
65663 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
65664 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
65665 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
65666 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
65667 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
65668 +
65669 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
65670 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
65671 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
65672 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
65673 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
65674 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
65675 +}
65676 +#else
65677 +#define atomic_read_unchecked(v) atomic_read(v)
65678 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
65679 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
65680 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
65681 +#define atomic_inc_unchecked(v) atomic_inc(v)
65682 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
65683 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
65684 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
65685 +#define atomic_dec_unchecked(v) atomic_dec(v)
65686 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
65687 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
65688 +
65689 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
65690 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
65691 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
65692 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
65693 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
65694 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
65695 +#endif
65696 +
65697 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
65698 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
65699 index b18ce4f..2ee2843 100644
65700 --- a/include/asm-generic/atomic64.h
65701 +++ b/include/asm-generic/atomic64.h
65702 @@ -16,6 +16,8 @@ typedef struct {
65703 long long counter;
65704 } atomic64_t;
65705
65706 +typedef atomic64_t atomic64_unchecked_t;
65707 +
65708 #define ATOMIC64_INIT(i) { (i) }
65709
65710 extern long long atomic64_read(const atomic64_t *v);
65711 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
65712 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
65713 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
65714
65715 +#define atomic64_read_unchecked(v) atomic64_read(v)
65716 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
65717 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
65718 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
65719 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
65720 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
65721 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
65722 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
65723 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
65724 +
65725 #endif /* _ASM_GENERIC_ATOMIC64_H */
65726 diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
65727 index d48ddf0..656a0ac 100644
65728 --- a/include/asm-generic/bug.h
65729 +++ b/include/asm-generic/bug.h
65730 @@ -105,11 +105,11 @@ extern void warn_slowpath_null(const char *file, const int line);
65731
65732 #else /* !CONFIG_BUG */
65733 #ifndef HAVE_ARCH_BUG
65734 -#define BUG() do {} while(0)
65735 +#define BUG() do { for (;;) ; } while(0)
65736 #endif
65737
65738 #ifndef HAVE_ARCH_BUG_ON
65739 -#define BUG_ON(condition) do { if (condition) ; } while(0)
65740 +#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0)
65741 #endif
65742
65743 #ifndef HAVE_ARCH_WARN_ON
65744 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
65745 index 1bfcfe5..e04c5c9 100644
65746 --- a/include/asm-generic/cache.h
65747 +++ b/include/asm-generic/cache.h
65748 @@ -6,7 +6,7 @@
65749 * cache lines need to provide their own cache.h.
65750 */
65751
65752 -#define L1_CACHE_SHIFT 5
65753 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
65754 +#define L1_CACHE_SHIFT 5UL
65755 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
65756
65757 #endif /* __ASM_GENERIC_CACHE_H */
65758 diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
65759 index 6920695..41038bc 100644
65760 --- a/include/asm-generic/dma-mapping-common.h
65761 +++ b/include/asm-generic/dma-mapping-common.h
65762 @@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
65763 enum dma_data_direction dir,
65764 struct dma_attrs *attrs)
65765 {
65766 - struct dma_map_ops *ops = get_dma_ops(dev);
65767 + const struct dma_map_ops *ops = get_dma_ops(dev);
65768 dma_addr_t addr;
65769
65770 kmemcheck_mark_initialized(ptr, size);
65771 @@ -30,7 +30,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
65772 enum dma_data_direction dir,
65773 struct dma_attrs *attrs)
65774 {
65775 - struct dma_map_ops *ops = get_dma_ops(dev);
65776 + const struct dma_map_ops *ops = get_dma_ops(dev);
65777
65778 BUG_ON(!valid_dma_direction(dir));
65779 if (ops->unmap_page)
65780 @@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
65781 int nents, enum dma_data_direction dir,
65782 struct dma_attrs *attrs)
65783 {
65784 - struct dma_map_ops *ops = get_dma_ops(dev);
65785 + const struct dma_map_ops *ops = get_dma_ops(dev);
65786 int i, ents;
65787 struct scatterlist *s;
65788
65789 @@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
65790 int nents, enum dma_data_direction dir,
65791 struct dma_attrs *attrs)
65792 {
65793 - struct dma_map_ops *ops = get_dma_ops(dev);
65794 + const struct dma_map_ops *ops = get_dma_ops(dev);
65795
65796 BUG_ON(!valid_dma_direction(dir));
65797 debug_dma_unmap_sg(dev, sg, nents, dir);
65798 @@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
65799 size_t offset, size_t size,
65800 enum dma_data_direction dir)
65801 {
65802 - struct dma_map_ops *ops = get_dma_ops(dev);
65803 + const struct dma_map_ops *ops = get_dma_ops(dev);
65804 dma_addr_t addr;
65805
65806 kmemcheck_mark_initialized(page_address(page) + offset, size);
65807 @@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
65808 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
65809 size_t size, enum dma_data_direction dir)
65810 {
65811 - struct dma_map_ops *ops = get_dma_ops(dev);
65812 + const struct dma_map_ops *ops = get_dma_ops(dev);
65813
65814 BUG_ON(!valid_dma_direction(dir));
65815 if (ops->unmap_page)
65816 @@ -97,7 +97,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
65817 size_t size,
65818 enum dma_data_direction dir)
65819 {
65820 - struct dma_map_ops *ops = get_dma_ops(dev);
65821 + const struct dma_map_ops *ops = get_dma_ops(dev);
65822
65823 BUG_ON(!valid_dma_direction(dir));
65824 if (ops->sync_single_for_cpu)
65825 @@ -109,7 +109,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
65826 dma_addr_t addr, size_t size,
65827 enum dma_data_direction dir)
65828 {
65829 - struct dma_map_ops *ops = get_dma_ops(dev);
65830 + const struct dma_map_ops *ops = get_dma_ops(dev);
65831
65832 BUG_ON(!valid_dma_direction(dir));
65833 if (ops->sync_single_for_device)
65834 @@ -123,7 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
65835 size_t size,
65836 enum dma_data_direction dir)
65837 {
65838 - struct dma_map_ops *ops = get_dma_ops(dev);
65839 + const struct dma_map_ops *ops = get_dma_ops(dev);
65840
65841 BUG_ON(!valid_dma_direction(dir));
65842 if (ops->sync_single_range_for_cpu) {
65843 @@ -140,7 +140,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
65844 size_t size,
65845 enum dma_data_direction dir)
65846 {
65847 - struct dma_map_ops *ops = get_dma_ops(dev);
65848 + const struct dma_map_ops *ops = get_dma_ops(dev);
65849
65850 BUG_ON(!valid_dma_direction(dir));
65851 if (ops->sync_single_range_for_device) {
65852 @@ -155,7 +155,7 @@ static inline void
65853 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
65854 int nelems, enum dma_data_direction dir)
65855 {
65856 - struct dma_map_ops *ops = get_dma_ops(dev);
65857 + const struct dma_map_ops *ops = get_dma_ops(dev);
65858
65859 BUG_ON(!valid_dma_direction(dir));
65860 if (ops->sync_sg_for_cpu)
65861 @@ -167,7 +167,7 @@ static inline void
65862 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
65863 int nelems, enum dma_data_direction dir)
65864 {
65865 - struct dma_map_ops *ops = get_dma_ops(dev);
65866 + const struct dma_map_ops *ops = get_dma_ops(dev);
65867
65868 BUG_ON(!valid_dma_direction(dir));
65869 if (ops->sync_sg_for_device)
65870 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
65871 index 0d68a1e..b74a761 100644
65872 --- a/include/asm-generic/emergency-restart.h
65873 +++ b/include/asm-generic/emergency-restart.h
65874 @@ -1,7 +1,7 @@
65875 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
65876 #define _ASM_GENERIC_EMERGENCY_RESTART_H
65877
65878 -static inline void machine_emergency_restart(void)
65879 +static inline __noreturn void machine_emergency_restart(void)
65880 {
65881 machine_restart(NULL);
65882 }
65883 diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
65884 index 3c2344f..4590a7d 100644
65885 --- a/include/asm-generic/futex.h
65886 +++ b/include/asm-generic/futex.h
65887 @@ -6,7 +6,7 @@
65888 #include <asm/errno.h>
65889
65890 static inline int
65891 -futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
65892 +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
65893 {
65894 int op = (encoded_op >> 28) & 7;
65895 int cmp = (encoded_op >> 24) & 15;
65896 @@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
65897 }
65898
65899 static inline int
65900 -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
65901 +futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
65902 {
65903 return -ENOSYS;
65904 }
65905 diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
65906 index 1ca3efc..e3dc852 100644
65907 --- a/include/asm-generic/int-l64.h
65908 +++ b/include/asm-generic/int-l64.h
65909 @@ -46,6 +46,8 @@ typedef unsigned int u32;
65910 typedef signed long s64;
65911 typedef unsigned long u64;
65912
65913 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
65914 +
65915 #define S8_C(x) x
65916 #define U8_C(x) x ## U
65917 #define S16_C(x) x
65918 diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
65919 index f394147..b6152b9 100644
65920 --- a/include/asm-generic/int-ll64.h
65921 +++ b/include/asm-generic/int-ll64.h
65922 @@ -51,6 +51,8 @@ typedef unsigned int u32;
65923 typedef signed long long s64;
65924 typedef unsigned long long u64;
65925
65926 +typedef unsigned long long intoverflow_t;
65927 +
65928 #define S8_C(x) x
65929 #define U8_C(x) x ## U
65930 #define S16_C(x) x
65931 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
65932 index e5f234a..cdb16b3 100644
65933 --- a/include/asm-generic/kmap_types.h
65934 +++ b/include/asm-generic/kmap_types.h
65935 @@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
65936 KMAP_D(16) KM_IRQ_PTE,
65937 KMAP_D(17) KM_NMI,
65938 KMAP_D(18) KM_NMI_PTE,
65939 -KMAP_D(19) KM_TYPE_NR
65940 +KMAP_D(19) KM_CLEARPAGE,
65941 +KMAP_D(20) KM_TYPE_NR
65942 };
65943
65944 #undef KMAP_D
65945 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
65946 index 725612b..9cc513a 100644
65947 --- a/include/asm-generic/pgtable-nopmd.h
65948 +++ b/include/asm-generic/pgtable-nopmd.h
65949 @@ -1,14 +1,19 @@
65950 #ifndef _PGTABLE_NOPMD_H
65951 #define _PGTABLE_NOPMD_H
65952
65953 -#ifndef __ASSEMBLY__
65954 -
65955 #include <asm-generic/pgtable-nopud.h>
65956
65957 -struct mm_struct;
65958 -
65959 #define __PAGETABLE_PMD_FOLDED
65960
65961 +#define PMD_SHIFT PUD_SHIFT
65962 +#define PTRS_PER_PMD 1
65963 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
65964 +#define PMD_MASK (~(PMD_SIZE-1))
65965 +
65966 +#ifndef __ASSEMBLY__
65967 +
65968 +struct mm_struct;
65969 +
65970 /*
65971 * Having the pmd type consist of a pud gets the size right, and allows
65972 * us to conceptually access the pud entry that this pmd is folded into
65973 @@ -16,11 +21,6 @@ struct mm_struct;
65974 */
65975 typedef struct { pud_t pud; } pmd_t;
65976
65977 -#define PMD_SHIFT PUD_SHIFT
65978 -#define PTRS_PER_PMD 1
65979 -#define PMD_SIZE (1UL << PMD_SHIFT)
65980 -#define PMD_MASK (~(PMD_SIZE-1))
65981 -
65982 /*
65983 * The "pud_xxx()" functions here are trivial for a folded two-level
65984 * setup: the pmd is never bad, and a pmd always exists (as it's folded
65985 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
65986 index 810431d..ccc3638 100644
65987 --- a/include/asm-generic/pgtable-nopud.h
65988 +++ b/include/asm-generic/pgtable-nopud.h
65989 @@ -1,10 +1,15 @@
65990 #ifndef _PGTABLE_NOPUD_H
65991 #define _PGTABLE_NOPUD_H
65992
65993 -#ifndef __ASSEMBLY__
65994 -
65995 #define __PAGETABLE_PUD_FOLDED
65996
65997 +#define PUD_SHIFT PGDIR_SHIFT
65998 +#define PTRS_PER_PUD 1
65999 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
66000 +#define PUD_MASK (~(PUD_SIZE-1))
66001 +
66002 +#ifndef __ASSEMBLY__
66003 +
66004 /*
66005 * Having the pud type consist of a pgd gets the size right, and allows
66006 * us to conceptually access the pgd entry that this pud is folded into
66007 @@ -12,11 +17,6 @@
66008 */
66009 typedef struct { pgd_t pgd; } pud_t;
66010
66011 -#define PUD_SHIFT PGDIR_SHIFT
66012 -#define PTRS_PER_PUD 1
66013 -#define PUD_SIZE (1UL << PUD_SHIFT)
66014 -#define PUD_MASK (~(PUD_SIZE-1))
66015 -
66016 /*
66017 * The "pgd_xxx()" functions here are trivial for a folded two-level
66018 * setup: the pud is never bad, and a pud always exists (as it's folded
66019 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
66020 index e2bd73e..fea8ed3 100644
66021 --- a/include/asm-generic/pgtable.h
66022 +++ b/include/asm-generic/pgtable.h
66023 @@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
66024 unsigned long size);
66025 #endif
66026
66027 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
66028 +static inline unsigned long pax_open_kernel(void) { return 0; }
66029 +#endif
66030 +
66031 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
66032 +static inline unsigned long pax_close_kernel(void) { return 0; }
66033 +#endif
66034 +
66035 #endif /* !__ASSEMBLY__ */
66036
66037 #endif /* _ASM_GENERIC_PGTABLE_H */
66038 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
66039 index b6e818f..21aa58a 100644
66040 --- a/include/asm-generic/vmlinux.lds.h
66041 +++ b/include/asm-generic/vmlinux.lds.h
66042 @@ -199,6 +199,7 @@
66043 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
66044 VMLINUX_SYMBOL(__start_rodata) = .; \
66045 *(.rodata) *(.rodata.*) \
66046 + *(.data.read_only) \
66047 *(__vermagic) /* Kernel version magic */ \
66048 *(__markers_strings) /* Markers: strings */ \
66049 *(__tracepoints_strings)/* Tracepoints: strings */ \
66050 @@ -656,22 +657,24 @@
66051 * section in the linker script will go there too. @phdr should have
66052 * a leading colon.
66053 *
66054 - * Note that this macros defines __per_cpu_load as an absolute symbol.
66055 + * Note that this macros defines per_cpu_load as an absolute symbol.
66056 * If there is no need to put the percpu section at a predetermined
66057 * address, use PERCPU().
66058 */
66059 #define PERCPU_VADDR(vaddr, phdr) \
66060 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
66061 - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
66062 + per_cpu_load = .; \
66063 + .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
66064 - LOAD_OFFSET) { \
66065 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
66066 VMLINUX_SYMBOL(__per_cpu_start) = .; \
66067 *(.data.percpu.first) \
66068 - *(.data.percpu.page_aligned) \
66069 *(.data.percpu) \
66070 + . = ALIGN(PAGE_SIZE); \
66071 + *(.data.percpu.page_aligned) \
66072 *(.data.percpu.shared_aligned) \
66073 VMLINUX_SYMBOL(__per_cpu_end) = .; \
66074 } phdr \
66075 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
66076 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
66077
66078 /**
66079 * PERCPU - define output section for percpu area, simple version
66080 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
66081 index ebab6a6..351dba1 100644
66082 --- a/include/drm/drmP.h
66083 +++ b/include/drm/drmP.h
66084 @@ -71,6 +71,7 @@
66085 #include <linux/workqueue.h>
66086 #include <linux/poll.h>
66087 #include <asm/pgalloc.h>
66088 +#include <asm/local.h>
66089 #include "drm.h"
66090
66091 #include <linux/idr.h>
66092 @@ -814,7 +815,7 @@ struct drm_driver {
66093 void (*vgaarb_irq)(struct drm_device *dev, bool state);
66094
66095 /* Driver private ops for this object */
66096 - struct vm_operations_struct *gem_vm_ops;
66097 + const struct vm_operations_struct *gem_vm_ops;
66098
66099 int major;
66100 int minor;
66101 @@ -917,7 +918,7 @@ struct drm_device {
66102
66103 /** \name Usage Counters */
66104 /*@{ */
66105 - int open_count; /**< Outstanding files open */
66106 + local_t open_count; /**< Outstanding files open */
66107 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
66108 atomic_t vma_count; /**< Outstanding vma areas open */
66109 int buf_use; /**< Buffers in use -- cannot alloc */
66110 @@ -928,7 +929,7 @@ struct drm_device {
66111 /*@{ */
66112 unsigned long counters;
66113 enum drm_stat_type types[15];
66114 - atomic_t counts[15];
66115 + atomic_unchecked_t counts[15];
66116 /*@} */
66117
66118 struct list_head filelist;
66119 @@ -1016,7 +1017,7 @@ struct drm_device {
66120 struct pci_controller *hose;
66121 #endif
66122 struct drm_sg_mem *sg; /**< Scatter gather memory */
66123 - unsigned int num_crtcs; /**< Number of CRTCs on this device */
66124 + unsigned int num_crtcs; /**< Number of CRTCs on this device */
66125 void *dev_private; /**< device private data */
66126 void *mm_private;
66127 struct address_space *dev_mapping;
66128 @@ -1042,11 +1043,11 @@ struct drm_device {
66129 spinlock_t object_name_lock;
66130 struct idr object_name_idr;
66131 atomic_t object_count;
66132 - atomic_t object_memory;
66133 + atomic_unchecked_t object_memory;
66134 atomic_t pin_count;
66135 - atomic_t pin_memory;
66136 + atomic_unchecked_t pin_memory;
66137 atomic_t gtt_count;
66138 - atomic_t gtt_memory;
66139 + atomic_unchecked_t gtt_memory;
66140 uint32_t gtt_total;
66141 uint32_t invalidate_domains; /* domains pending invalidation */
66142 uint32_t flush_domains; /* domains pending flush */
66143 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
66144 index b29e201..3413cc9 100644
66145 --- a/include/drm/drm_crtc_helper.h
66146 +++ b/include/drm/drm_crtc_helper.h
66147 @@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
66148
66149 /* reload the current crtc LUT */
66150 void (*load_lut)(struct drm_crtc *crtc);
66151 -};
66152 +} __no_const;
66153
66154 struct drm_encoder_helper_funcs {
66155 void (*dpms)(struct drm_encoder *encoder, int mode);
66156 @@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
66157 struct drm_connector *connector);
66158 /* disable encoder when not in use - more explicit than dpms off */
66159 void (*disable)(struct drm_encoder *encoder);
66160 -};
66161 +} __no_const;
66162
66163 struct drm_connector_helper_funcs {
66164 int (*get_modes)(struct drm_connector *connector);
66165 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
66166 index b199170..6f9e64c 100644
66167 --- a/include/drm/ttm/ttm_memory.h
66168 +++ b/include/drm/ttm/ttm_memory.h
66169 @@ -47,7 +47,7 @@
66170
66171 struct ttm_mem_shrink {
66172 int (*do_shrink) (struct ttm_mem_shrink *);
66173 -};
66174 +} __no_const;
66175
66176 /**
66177 * struct ttm_mem_global - Global memory accounting structure.
66178 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
66179 index e86dfca..40cc55f 100644
66180 --- a/include/linux/a.out.h
66181 +++ b/include/linux/a.out.h
66182 @@ -39,6 +39,14 @@ enum machine_type {
66183 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
66184 };
66185
66186 +/* Constants for the N_FLAGS field */
66187 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
66188 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
66189 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
66190 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
66191 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
66192 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
66193 +
66194 #if !defined (N_MAGIC)
66195 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
66196 #endif
66197 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
66198 index 817b237..62c10bc 100644
66199 --- a/include/linux/atmdev.h
66200 +++ b/include/linux/atmdev.h
66201 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
66202 #endif
66203
66204 struct k_atm_aal_stats {
66205 -#define __HANDLE_ITEM(i) atomic_t i
66206 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
66207 __AAL_STAT_ITEMS
66208 #undef __HANDLE_ITEM
66209 };
66210 diff --git a/include/linux/backlight.h b/include/linux/backlight.h
66211 index 0f5f578..8c4f884 100644
66212 --- a/include/linux/backlight.h
66213 +++ b/include/linux/backlight.h
66214 @@ -36,18 +36,18 @@ struct backlight_device;
66215 struct fb_info;
66216
66217 struct backlight_ops {
66218 - unsigned int options;
66219 + const unsigned int options;
66220
66221 #define BL_CORE_SUSPENDRESUME (1 << 0)
66222
66223 /* Notify the backlight driver some property has changed */
66224 - int (*update_status)(struct backlight_device *);
66225 + int (* const update_status)(struct backlight_device *);
66226 /* Return the current backlight brightness (accounting for power,
66227 fb_blank etc.) */
66228 - int (*get_brightness)(struct backlight_device *);
66229 + int (* const get_brightness)(struct backlight_device *);
66230 /* Check if given framebuffer device is the one bound to this backlight;
66231 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
66232 - int (*check_fb)(struct fb_info *);
66233 + int (* const check_fb)(struct fb_info *);
66234 };
66235
66236 /* This structure defines all the properties of a backlight */
66237 @@ -86,7 +86,7 @@ struct backlight_device {
66238 registered this device has been unloaded, and if class_get_devdata()
66239 points to something in the body of that driver, it is also invalid. */
66240 struct mutex ops_lock;
66241 - struct backlight_ops *ops;
66242 + const struct backlight_ops *ops;
66243
66244 /* The framebuffer notifier block */
66245 struct notifier_block fb_notif;
66246 @@ -103,7 +103,7 @@ static inline void backlight_update_status(struct backlight_device *bd)
66247 }
66248
66249 extern struct backlight_device *backlight_device_register(const char *name,
66250 - struct device *dev, void *devdata, struct backlight_ops *ops);
66251 + struct device *dev, void *devdata, const struct backlight_ops *ops);
66252 extern void backlight_device_unregister(struct backlight_device *bd);
66253 extern void backlight_force_update(struct backlight_device *bd,
66254 enum backlight_update_reason reason);
66255 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
66256 index a3d802e..93a2ef4 100644
66257 --- a/include/linux/binfmts.h
66258 +++ b/include/linux/binfmts.h
66259 @@ -18,7 +18,7 @@ struct pt_regs;
66260 #define BINPRM_BUF_SIZE 128
66261
66262 #ifdef __KERNEL__
66263 -#include <linux/list.h>
66264 +#include <linux/sched.h>
66265
66266 #define CORENAME_MAX_SIZE 128
66267
66268 @@ -58,6 +58,7 @@ struct linux_binprm{
66269 unsigned interp_flags;
66270 unsigned interp_data;
66271 unsigned long loader, exec;
66272 + char tcomm[TASK_COMM_LEN];
66273 };
66274
66275 extern void acct_arg_size(struct linux_binprm *bprm, unsigned long pages);
66276 @@ -83,6 +84,7 @@ struct linux_binfmt {
66277 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
66278 int (*load_shlib)(struct file *);
66279 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
66280 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
66281 unsigned long min_coredump; /* minimal dump size */
66282 int hasvdso;
66283 };
66284 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
66285 index 5eb6cb0..a2906d2 100644
66286 --- a/include/linux/blkdev.h
66287 +++ b/include/linux/blkdev.h
66288 @@ -1281,7 +1281,7 @@ struct block_device_operations {
66289 int (*revalidate_disk) (struct gendisk *);
66290 int (*getgeo)(struct block_device *, struct hd_geometry *);
66291 struct module *owner;
66292 -};
66293 +} __do_const;
66294
66295 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
66296 unsigned long);
66297 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
66298 index 3b73b99..629d21b 100644
66299 --- a/include/linux/blktrace_api.h
66300 +++ b/include/linux/blktrace_api.h
66301 @@ -160,7 +160,7 @@ struct blk_trace {
66302 struct dentry *dir;
66303 struct dentry *dropped_file;
66304 struct dentry *msg_file;
66305 - atomic_t dropped;
66306 + atomic_unchecked_t dropped;
66307 };
66308
66309 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
66310 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
66311 index 83195fb..0b0f77d 100644
66312 --- a/include/linux/byteorder/little_endian.h
66313 +++ b/include/linux/byteorder/little_endian.h
66314 @@ -42,51 +42,51 @@
66315
66316 static inline __le64 __cpu_to_le64p(const __u64 *p)
66317 {
66318 - return (__force __le64)*p;
66319 + return (__force const __le64)*p;
66320 }
66321 static inline __u64 __le64_to_cpup(const __le64 *p)
66322 {
66323 - return (__force __u64)*p;
66324 + return (__force const __u64)*p;
66325 }
66326 static inline __le32 __cpu_to_le32p(const __u32 *p)
66327 {
66328 - return (__force __le32)*p;
66329 + return (__force const __le32)*p;
66330 }
66331 static inline __u32 __le32_to_cpup(const __le32 *p)
66332 {
66333 - return (__force __u32)*p;
66334 + return (__force const __u32)*p;
66335 }
66336 static inline __le16 __cpu_to_le16p(const __u16 *p)
66337 {
66338 - return (__force __le16)*p;
66339 + return (__force const __le16)*p;
66340 }
66341 static inline __u16 __le16_to_cpup(const __le16 *p)
66342 {
66343 - return (__force __u16)*p;
66344 + return (__force const __u16)*p;
66345 }
66346 static inline __be64 __cpu_to_be64p(const __u64 *p)
66347 {
66348 - return (__force __be64)__swab64p(p);
66349 + return (__force const __be64)__swab64p(p);
66350 }
66351 static inline __u64 __be64_to_cpup(const __be64 *p)
66352 {
66353 - return __swab64p((__u64 *)p);
66354 + return __swab64p((const __u64 *)p);
66355 }
66356 static inline __be32 __cpu_to_be32p(const __u32 *p)
66357 {
66358 - return (__force __be32)__swab32p(p);
66359 + return (__force const __be32)__swab32p(p);
66360 }
66361 static inline __u32 __be32_to_cpup(const __be32 *p)
66362 {
66363 - return __swab32p((__u32 *)p);
66364 + return __swab32p((const __u32 *)p);
66365 }
66366 static inline __be16 __cpu_to_be16p(const __u16 *p)
66367 {
66368 - return (__force __be16)__swab16p(p);
66369 + return (__force const __be16)__swab16p(p);
66370 }
66371 static inline __u16 __be16_to_cpup(const __be16 *p)
66372 {
66373 - return __swab16p((__u16 *)p);
66374 + return __swab16p((const __u16 *)p);
66375 }
66376 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
66377 #define __le64_to_cpus(x) do { (void)(x); } while (0)
66378 diff --git a/include/linux/cache.h b/include/linux/cache.h
66379 index 97e2488..e7576b9 100644
66380 --- a/include/linux/cache.h
66381 +++ b/include/linux/cache.h
66382 @@ -16,6 +16,10 @@
66383 #define __read_mostly
66384 #endif
66385
66386 +#ifndef __read_only
66387 +#define __read_only __read_mostly
66388 +#endif
66389 +
66390 #ifndef ____cacheline_aligned
66391 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
66392 #endif
66393 diff --git a/include/linux/capability.h b/include/linux/capability.h
66394 index c8f2a5f7..1618a5c 100644
66395 --- a/include/linux/capability.h
66396 +++ b/include/linux/capability.h
66397 @@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff_set;
66398 (security_real_capable_noaudit((t), (cap)) == 0)
66399
66400 extern int capable(int cap);
66401 +int capable_nolog(int cap);
66402
66403 /* audit system wants to get cap info from files as well */
66404 struct dentry;
66405 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
66406 index 450fa59..86019fb 100644
66407 --- a/include/linux/compiler-gcc4.h
66408 +++ b/include/linux/compiler-gcc4.h
66409 @@ -36,4 +36,16 @@
66410 the kernel context */
66411 #define __cold __attribute__((__cold__))
66412
66413 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
66414 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
66415 +#define __bos0(ptr) __bos((ptr), 0)
66416 +#define __bos1(ptr) __bos((ptr), 1)
66417 +
66418 +#if __GNUC_MINOR__ >= 5
66419 +#ifdef CONSTIFY_PLUGIN
66420 +#define __no_const __attribute__((no_const))
66421 +#define __do_const __attribute__((do_const))
66422 +#endif
66423 +#endif
66424 +
66425 #endif
66426 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
66427 index 04fb513..fd6477b 100644
66428 --- a/include/linux/compiler.h
66429 +++ b/include/linux/compiler.h
66430 @@ -5,11 +5,14 @@
66431
66432 #ifdef __CHECKER__
66433 # define __user __attribute__((noderef, address_space(1)))
66434 +# define __force_user __force __user
66435 # define __kernel /* default address space */
66436 +# define __force_kernel __force __kernel
66437 # define __safe __attribute__((safe))
66438 # define __force __attribute__((force))
66439 # define __nocast __attribute__((nocast))
66440 # define __iomem __attribute__((noderef, address_space(2)))
66441 +# define __force_iomem __force __iomem
66442 # define __acquires(x) __attribute__((context(x,0,1)))
66443 # define __releases(x) __attribute__((context(x,1,0)))
66444 # define __acquire(x) __context__(x,1)
66445 @@ -17,13 +20,34 @@
66446 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
66447 extern void __chk_user_ptr(const volatile void __user *);
66448 extern void __chk_io_ptr(const volatile void __iomem *);
66449 +#elif defined(CHECKER_PLUGIN)
66450 +//# define __user
66451 +//# define __force_user
66452 +//# define __kernel
66453 +//# define __force_kernel
66454 +# define __safe
66455 +# define __force
66456 +# define __nocast
66457 +# define __iomem
66458 +# define __force_iomem
66459 +# define __chk_user_ptr(x) (void)0
66460 +# define __chk_io_ptr(x) (void)0
66461 +# define __builtin_warning(x, y...) (1)
66462 +# define __acquires(x)
66463 +# define __releases(x)
66464 +# define __acquire(x) (void)0
66465 +# define __release(x) (void)0
66466 +# define __cond_lock(x,c) (c)
66467 #else
66468 # define __user
66469 +# define __force_user
66470 # define __kernel
66471 +# define __force_kernel
66472 # define __safe
66473 # define __force
66474 # define __nocast
66475 # define __iomem
66476 +# define __force_iomem
66477 # define __chk_user_ptr(x) (void)0
66478 # define __chk_io_ptr(x) (void)0
66479 # define __builtin_warning(x, y...) (1)
66480 @@ -247,6 +271,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66481 # define __attribute_const__ /* unimplemented */
66482 #endif
66483
66484 +#ifndef __no_const
66485 +# define __no_const
66486 +#endif
66487 +
66488 +#ifndef __do_const
66489 +# define __do_const
66490 +#endif
66491 +
66492 /*
66493 * Tell gcc if a function is cold. The compiler will assume any path
66494 * directly leading to the call is unlikely.
66495 @@ -256,6 +288,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66496 #define __cold
66497 #endif
66498
66499 +#ifndef __alloc_size
66500 +#define __alloc_size(...)
66501 +#endif
66502 +
66503 +#ifndef __bos
66504 +#define __bos(ptr, arg)
66505 +#endif
66506 +
66507 +#ifndef __bos0
66508 +#define __bos0(ptr)
66509 +#endif
66510 +
66511 +#ifndef __bos1
66512 +#define __bos1(ptr)
66513 +#endif
66514 +
66515 /* Simple shorthand for a section definition */
66516 #ifndef __section
66517 # define __section(S) __attribute__ ((__section__(#S)))
66518 @@ -278,6 +326,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66519 * use is to mediate communication between process-level code and irq/NMI
66520 * handlers, all running on the same CPU.
66521 */
66522 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
66523 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
66524 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
66525
66526 #endif /* __LINUX_COMPILER_H */
66527 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
66528 index fd92988..a3164bd 100644
66529 --- a/include/linux/crypto.h
66530 +++ b/include/linux/crypto.h
66531 @@ -394,7 +394,7 @@ struct cipher_tfm {
66532 const u8 *key, unsigned int keylen);
66533 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
66534 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
66535 -};
66536 +} __no_const;
66537
66538 struct hash_tfm {
66539 int (*init)(struct hash_desc *desc);
66540 @@ -415,13 +415,13 @@ struct compress_tfm {
66541 int (*cot_decompress)(struct crypto_tfm *tfm,
66542 const u8 *src, unsigned int slen,
66543 u8 *dst, unsigned int *dlen);
66544 -};
66545 +} __no_const;
66546
66547 struct rng_tfm {
66548 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
66549 unsigned int dlen);
66550 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
66551 -};
66552 +} __no_const;
66553
66554 #define crt_ablkcipher crt_u.ablkcipher
66555 #define crt_aead crt_u.aead
66556 diff --git a/include/linux/dcache.h b/include/linux/dcache.h
66557 index 30b93b2..cd7a8db 100644
66558 --- a/include/linux/dcache.h
66559 +++ b/include/linux/dcache.h
66560 @@ -119,6 +119,8 @@ struct dentry {
66561 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
66562 };
66563
66564 +#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
66565 +
66566 /*
66567 * dentry->d_lock spinlock nesting subclasses:
66568 *
66569 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
66570 index 3e9bd6a..f4e1aa0 100644
66571 --- a/include/linux/decompress/mm.h
66572 +++ b/include/linux/decompress/mm.h
66573 @@ -78,7 +78,7 @@ static void free(void *where)
66574 * warnings when not needed (indeed large_malloc / large_free are not
66575 * needed by inflate */
66576
66577 -#define malloc(a) kmalloc(a, GFP_KERNEL)
66578 +#define malloc(a) kmalloc((a), GFP_KERNEL)
66579 #define free(a) kfree(a)
66580
66581 #define large_malloc(a) vmalloc(a)
66582 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
66583 index 91b7618..92a93d32 100644
66584 --- a/include/linux/dma-mapping.h
66585 +++ b/include/linux/dma-mapping.h
66586 @@ -16,51 +16,51 @@ enum dma_data_direction {
66587 };
66588
66589 struct dma_map_ops {
66590 - void* (*alloc_coherent)(struct device *dev, size_t size,
66591 + void* (* const alloc_coherent)(struct device *dev, size_t size,
66592 dma_addr_t *dma_handle, gfp_t gfp);
66593 - void (*free_coherent)(struct device *dev, size_t size,
66594 + void (* const free_coherent)(struct device *dev, size_t size,
66595 void *vaddr, dma_addr_t dma_handle);
66596 - dma_addr_t (*map_page)(struct device *dev, struct page *page,
66597 + dma_addr_t (* const map_page)(struct device *dev, struct page *page,
66598 unsigned long offset, size_t size,
66599 enum dma_data_direction dir,
66600 struct dma_attrs *attrs);
66601 - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
66602 + void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
66603 size_t size, enum dma_data_direction dir,
66604 struct dma_attrs *attrs);
66605 - int (*map_sg)(struct device *dev, struct scatterlist *sg,
66606 + int (* const map_sg)(struct device *dev, struct scatterlist *sg,
66607 int nents, enum dma_data_direction dir,
66608 struct dma_attrs *attrs);
66609 - void (*unmap_sg)(struct device *dev,
66610 + void (* const unmap_sg)(struct device *dev,
66611 struct scatterlist *sg, int nents,
66612 enum dma_data_direction dir,
66613 struct dma_attrs *attrs);
66614 - void (*sync_single_for_cpu)(struct device *dev,
66615 + void (* const sync_single_for_cpu)(struct device *dev,
66616 dma_addr_t dma_handle, size_t size,
66617 enum dma_data_direction dir);
66618 - void (*sync_single_for_device)(struct device *dev,
66619 + void (* const sync_single_for_device)(struct device *dev,
66620 dma_addr_t dma_handle, size_t size,
66621 enum dma_data_direction dir);
66622 - void (*sync_single_range_for_cpu)(struct device *dev,
66623 + void (* const sync_single_range_for_cpu)(struct device *dev,
66624 dma_addr_t dma_handle,
66625 unsigned long offset,
66626 size_t size,
66627 enum dma_data_direction dir);
66628 - void (*sync_single_range_for_device)(struct device *dev,
66629 + void (* const sync_single_range_for_device)(struct device *dev,
66630 dma_addr_t dma_handle,
66631 unsigned long offset,
66632 size_t size,
66633 enum dma_data_direction dir);
66634 - void (*sync_sg_for_cpu)(struct device *dev,
66635 + void (* const sync_sg_for_cpu)(struct device *dev,
66636 struct scatterlist *sg, int nents,
66637 enum dma_data_direction dir);
66638 - void (*sync_sg_for_device)(struct device *dev,
66639 + void (* const sync_sg_for_device)(struct device *dev,
66640 struct scatterlist *sg, int nents,
66641 enum dma_data_direction dir);
66642 - int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
66643 - int (*dma_supported)(struct device *dev, u64 mask);
66644 + int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
66645 + int (* const dma_supported)(struct device *dev, u64 mask);
66646 int (*set_dma_mask)(struct device *dev, u64 mask);
66647 int is_phys;
66648 -};
66649 +} __do_const;
66650
66651 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
66652
66653 diff --git a/include/linux/dst.h b/include/linux/dst.h
66654 index e26fed8..b976d9f 100644
66655 --- a/include/linux/dst.h
66656 +++ b/include/linux/dst.h
66657 @@ -380,7 +380,7 @@ struct dst_node
66658 struct thread_pool *pool;
66659
66660 /* Transaction IDs live here */
66661 - atomic_long_t gen;
66662 + atomic_long_unchecked_t gen;
66663
66664 /*
66665 * How frequently and how many times transaction
66666 diff --git a/include/linux/elf.h b/include/linux/elf.h
66667 index 90a4ed0..d652617 100644
66668 --- a/include/linux/elf.h
66669 +++ b/include/linux/elf.h
66670 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
66671 #define PT_GNU_EH_FRAME 0x6474e550
66672
66673 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
66674 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
66675 +
66676 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
66677 +
66678 +/* Constants for the e_flags field */
66679 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
66680 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
66681 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
66682 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
66683 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
66684 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
66685
66686 /* These constants define the different elf file types */
66687 #define ET_NONE 0
66688 @@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
66689 #define DT_DEBUG 21
66690 #define DT_TEXTREL 22
66691 #define DT_JMPREL 23
66692 +#define DT_FLAGS 30
66693 + #define DF_TEXTREL 0x00000004
66694 #define DT_ENCODING 32
66695 #define OLD_DT_LOOS 0x60000000
66696 #define DT_LOOS 0x6000000d
66697 @@ -230,6 +243,19 @@ typedef struct elf64_hdr {
66698 #define PF_W 0x2
66699 #define PF_X 0x1
66700
66701 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
66702 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
66703 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
66704 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
66705 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
66706 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
66707 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
66708 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
66709 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
66710 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
66711 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
66712 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
66713 +
66714 typedef struct elf32_phdr{
66715 Elf32_Word p_type;
66716 Elf32_Off p_offset;
66717 @@ -322,6 +348,8 @@ typedef struct elf64_shdr {
66718 #define EI_OSABI 7
66719 #define EI_PAD 8
66720
66721 +#define EI_PAX 14
66722 +
66723 #define ELFMAG0 0x7f /* EI_MAG */
66724 #define ELFMAG1 'E'
66725 #define ELFMAG2 'L'
66726 @@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
66727 #define elf_phdr elf32_phdr
66728 #define elf_note elf32_note
66729 #define elf_addr_t Elf32_Off
66730 +#define elf_dyn Elf32_Dyn
66731
66732 #else
66733
66734 @@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
66735 #define elf_phdr elf64_phdr
66736 #define elf_note elf64_note
66737 #define elf_addr_t Elf64_Off
66738 +#define elf_dyn Elf64_Dyn
66739
66740 #endif
66741
66742 diff --git a/include/linux/fs.h b/include/linux/fs.h
66743 index 1b9a47a..6fe2934 100644
66744 --- a/include/linux/fs.h
66745 +++ b/include/linux/fs.h
66746 @@ -568,41 +568,41 @@ typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
66747 unsigned long, unsigned long);
66748
66749 struct address_space_operations {
66750 - int (*writepage)(struct page *page, struct writeback_control *wbc);
66751 - int (*readpage)(struct file *, struct page *);
66752 - void (*sync_page)(struct page *);
66753 + int (* const writepage)(struct page *page, struct writeback_control *wbc);
66754 + int (* const readpage)(struct file *, struct page *);
66755 + void (* const sync_page)(struct page *);
66756
66757 /* Write back some dirty pages from this mapping. */
66758 - int (*writepages)(struct address_space *, struct writeback_control *);
66759 + int (* const writepages)(struct address_space *, struct writeback_control *);
66760
66761 /* Set a page dirty. Return true if this dirtied it */
66762 - int (*set_page_dirty)(struct page *page);
66763 + int (* const set_page_dirty)(struct page *page);
66764
66765 - int (*readpages)(struct file *filp, struct address_space *mapping,
66766 + int (* const readpages)(struct file *filp, struct address_space *mapping,
66767 struct list_head *pages, unsigned nr_pages);
66768
66769 - int (*write_begin)(struct file *, struct address_space *mapping,
66770 + int (* const write_begin)(struct file *, struct address_space *mapping,
66771 loff_t pos, unsigned len, unsigned flags,
66772 struct page **pagep, void **fsdata);
66773 - int (*write_end)(struct file *, struct address_space *mapping,
66774 + int (* const write_end)(struct file *, struct address_space *mapping,
66775 loff_t pos, unsigned len, unsigned copied,
66776 struct page *page, void *fsdata);
66777
66778 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
66779 - sector_t (*bmap)(struct address_space *, sector_t);
66780 - void (*invalidatepage) (struct page *, unsigned long);
66781 - int (*releasepage) (struct page *, gfp_t);
66782 - ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
66783 + sector_t (* const bmap)(struct address_space *, sector_t);
66784 + void (* const invalidatepage) (struct page *, unsigned long);
66785 + int (* const releasepage) (struct page *, gfp_t);
66786 + ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
66787 loff_t offset, unsigned long nr_segs);
66788 - int (*get_xip_mem)(struct address_space *, pgoff_t, int,
66789 + int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
66790 void **, unsigned long *);
66791 /* migrate the contents of a page to the specified target */
66792 - int (*migratepage) (struct address_space *,
66793 + int (* const migratepage) (struct address_space *,
66794 struct page *, struct page *);
66795 - int (*launder_page) (struct page *);
66796 - int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
66797 + int (* const launder_page) (struct page *);
66798 + int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
66799 unsigned long);
66800 - int (*error_remove_page)(struct address_space *, struct page *);
66801 + int (* const error_remove_page)(struct address_space *, struct page *);
66802 };
66803
66804 /*
66805 @@ -1031,19 +1031,19 @@ static inline int file_check_writeable(struct file *filp)
66806 typedef struct files_struct *fl_owner_t;
66807
66808 struct file_lock_operations {
66809 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
66810 - void (*fl_release_private)(struct file_lock *);
66811 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
66812 + void (* const fl_release_private)(struct file_lock *);
66813 };
66814
66815 struct lock_manager_operations {
66816 - int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
66817 - void (*fl_notify)(struct file_lock *); /* unblock callback */
66818 - int (*fl_grant)(struct file_lock *, struct file_lock *, int);
66819 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
66820 - void (*fl_release_private)(struct file_lock *);
66821 - void (*fl_break)(struct file_lock *);
66822 - int (*fl_mylease)(struct file_lock *, struct file_lock *);
66823 - int (*fl_change)(struct file_lock **, int);
66824 + int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
66825 + void (* const fl_notify)(struct file_lock *); /* unblock callback */
66826 + int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
66827 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
66828 + void (* const fl_release_private)(struct file_lock *);
66829 + void (* const fl_break)(struct file_lock *);
66830 + int (* const fl_mylease)(struct file_lock *, struct file_lock *);
66831 + int (* const fl_change)(struct file_lock **, int);
66832 };
66833
66834 struct lock_manager {
66835 @@ -1442,7 +1442,7 @@ struct fiemap_extent_info {
66836 unsigned int fi_flags; /* Flags as passed from user */
66837 unsigned int fi_extents_mapped; /* Number of mapped extents */
66838 unsigned int fi_extents_max; /* Size of fiemap_extent array */
66839 - struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
66840 + struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
66841 * array */
66842 };
66843 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
66844 @@ -1512,7 +1512,8 @@ struct file_operations {
66845 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
66846 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
66847 int (*setlease)(struct file *, long, struct file_lock **);
66848 -};
66849 +} __do_const;
66850 +typedef struct file_operations __no_const file_operations_no_const;
66851
66852 struct inode_operations {
66853 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
66854 @@ -1559,30 +1560,30 @@ extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
66855 unsigned long, loff_t *);
66856
66857 struct super_operations {
66858 - struct inode *(*alloc_inode)(struct super_block *sb);
66859 - void (*destroy_inode)(struct inode *);
66860 + struct inode *(* const alloc_inode)(struct super_block *sb);
66861 + void (* const destroy_inode)(struct inode *);
66862
66863 - void (*dirty_inode) (struct inode *);
66864 - int (*write_inode) (struct inode *, int);
66865 - void (*drop_inode) (struct inode *);
66866 - void (*delete_inode) (struct inode *);
66867 - void (*put_super) (struct super_block *);
66868 - void (*write_super) (struct super_block *);
66869 - int (*sync_fs)(struct super_block *sb, int wait);
66870 - int (*freeze_fs) (struct super_block *);
66871 - int (*unfreeze_fs) (struct super_block *);
66872 - int (*statfs) (struct dentry *, struct kstatfs *);
66873 - int (*remount_fs) (struct super_block *, int *, char *);
66874 - void (*clear_inode) (struct inode *);
66875 - void (*umount_begin) (struct super_block *);
66876 + void (* const dirty_inode) (struct inode *);
66877 + int (* const write_inode) (struct inode *, int);
66878 + void (* const drop_inode) (struct inode *);
66879 + void (* const delete_inode) (struct inode *);
66880 + void (* const put_super) (struct super_block *);
66881 + void (* const write_super) (struct super_block *);
66882 + int (* const sync_fs)(struct super_block *sb, int wait);
66883 + int (* const freeze_fs) (struct super_block *);
66884 + int (* const unfreeze_fs) (struct super_block *);
66885 + int (* const statfs) (struct dentry *, struct kstatfs *);
66886 + int (* const remount_fs) (struct super_block *, int *, char *);
66887 + void (* const clear_inode) (struct inode *);
66888 + void (* const umount_begin) (struct super_block *);
66889
66890 - int (*show_options)(struct seq_file *, struct vfsmount *);
66891 - int (*show_stats)(struct seq_file *, struct vfsmount *);
66892 + int (* const show_options)(struct seq_file *, struct vfsmount *);
66893 + int (* const show_stats)(struct seq_file *, struct vfsmount *);
66894 #ifdef CONFIG_QUOTA
66895 - ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
66896 - ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
66897 + ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
66898 + ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
66899 #endif
66900 - int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
66901 + int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
66902 };
66903
66904 /*
66905 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
66906 index 78a05bf..2a7d3e1 100644
66907 --- a/include/linux/fs_struct.h
66908 +++ b/include/linux/fs_struct.h
66909 @@ -4,7 +4,7 @@
66910 #include <linux/path.h>
66911
66912 struct fs_struct {
66913 - int users;
66914 + atomic_t users;
66915 rwlock_t lock;
66916 int umask;
66917 int in_exec;
66918 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
66919 index 7be0c6f..2f63a2b 100644
66920 --- a/include/linux/fscache-cache.h
66921 +++ b/include/linux/fscache-cache.h
66922 @@ -116,7 +116,7 @@ struct fscache_operation {
66923 #endif
66924 };
66925
66926 -extern atomic_t fscache_op_debug_id;
66927 +extern atomic_unchecked_t fscache_op_debug_id;
66928 extern const struct slow_work_ops fscache_op_slow_work_ops;
66929
66930 extern void fscache_enqueue_operation(struct fscache_operation *);
66931 @@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
66932 fscache_operation_release_t release)
66933 {
66934 atomic_set(&op->usage, 1);
66935 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
66936 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
66937 op->release = release;
66938 INIT_LIST_HEAD(&op->pend_link);
66939 fscache_set_op_state(op, "Init");
66940 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
66941 index 4d6f47b..00bcedb 100644
66942 --- a/include/linux/fsnotify_backend.h
66943 +++ b/include/linux/fsnotify_backend.h
66944 @@ -86,6 +86,7 @@ struct fsnotify_ops {
66945 void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
66946 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
66947 };
66948 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
66949
66950 /*
66951 * A group is a "thing" that wants to receive notification about filesystem
66952 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
66953 index 4ec5e67..42f1eb9 100644
66954 --- a/include/linux/ftrace_event.h
66955 +++ b/include/linux/ftrace_event.h
66956 @@ -163,7 +163,7 @@ extern int trace_define_field(struct ftrace_event_call *call,
66957 int filter_type);
66958 extern int trace_define_common_fields(struct ftrace_event_call *call);
66959
66960 -#define is_signed_type(type) (((type)(-1)) < 0)
66961 +#define is_signed_type(type) (((type)(-1)) < (type)1)
66962
66963 int trace_set_clr_event(const char *system, const char *event, int set);
66964
66965 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
66966 index 297df45..b6a74ff 100644
66967 --- a/include/linux/genhd.h
66968 +++ b/include/linux/genhd.h
66969 @@ -161,7 +161,7 @@ struct gendisk {
66970
66971 struct timer_rand_state *random;
66972
66973 - atomic_t sync_io; /* RAID */
66974 + atomic_unchecked_t sync_io; /* RAID */
66975 struct work_struct async_notify;
66976 #ifdef CONFIG_BLK_DEV_INTEGRITY
66977 struct blk_integrity *integrity;
66978 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
66979 new file mode 100644
66980 index 0000000..af663cf
66981 --- /dev/null
66982 +++ b/include/linux/gracl.h
66983 @@ -0,0 +1,319 @@
66984 +#ifndef GR_ACL_H
66985 +#define GR_ACL_H
66986 +
66987 +#include <linux/grdefs.h>
66988 +#include <linux/resource.h>
66989 +#include <linux/capability.h>
66990 +#include <linux/dcache.h>
66991 +#include <asm/resource.h>
66992 +
66993 +/* Major status information */
66994 +
66995 +#define GR_VERSION "grsecurity 2.9"
66996 +#define GRSECURITY_VERSION 0x2900
66997 +
66998 +enum {
66999 + GR_SHUTDOWN = 0,
67000 + GR_ENABLE = 1,
67001 + GR_SPROLE = 2,
67002 + GR_RELOAD = 3,
67003 + GR_SEGVMOD = 4,
67004 + GR_STATUS = 5,
67005 + GR_UNSPROLE = 6,
67006 + GR_PASSSET = 7,
67007 + GR_SPROLEPAM = 8,
67008 +};
67009 +
67010 +/* Password setup definitions
67011 + * kernel/grhash.c */
67012 +enum {
67013 + GR_PW_LEN = 128,
67014 + GR_SALT_LEN = 16,
67015 + GR_SHA_LEN = 32,
67016 +};
67017 +
67018 +enum {
67019 + GR_SPROLE_LEN = 64,
67020 +};
67021 +
67022 +enum {
67023 + GR_NO_GLOB = 0,
67024 + GR_REG_GLOB,
67025 + GR_CREATE_GLOB
67026 +};
67027 +
67028 +#define GR_NLIMITS 32
67029 +
67030 +/* Begin Data Structures */
67031 +
67032 +struct sprole_pw {
67033 + unsigned char *rolename;
67034 + unsigned char salt[GR_SALT_LEN];
67035 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
67036 +};
67037 +
67038 +struct name_entry {
67039 + __u32 key;
67040 + ino_t inode;
67041 + dev_t device;
67042 + char *name;
67043 + __u16 len;
67044 + __u8 deleted;
67045 + struct name_entry *prev;
67046 + struct name_entry *next;
67047 +};
67048 +
67049 +struct inodev_entry {
67050 + struct name_entry *nentry;
67051 + struct inodev_entry *prev;
67052 + struct inodev_entry *next;
67053 +};
67054 +
67055 +struct acl_role_db {
67056 + struct acl_role_label **r_hash;
67057 + __u32 r_size;
67058 +};
67059 +
67060 +struct inodev_db {
67061 + struct inodev_entry **i_hash;
67062 + __u32 i_size;
67063 +};
67064 +
67065 +struct name_db {
67066 + struct name_entry **n_hash;
67067 + __u32 n_size;
67068 +};
67069 +
67070 +struct crash_uid {
67071 + uid_t uid;
67072 + unsigned long expires;
67073 +};
67074 +
67075 +struct gr_hash_struct {
67076 + void **table;
67077 + void **nametable;
67078 + void *first;
67079 + __u32 table_size;
67080 + __u32 used_size;
67081 + int type;
67082 +};
67083 +
67084 +/* Userspace Grsecurity ACL data structures */
67085 +
67086 +struct acl_subject_label {
67087 + char *filename;
67088 + ino_t inode;
67089 + dev_t device;
67090 + __u32 mode;
67091 + kernel_cap_t cap_mask;
67092 + kernel_cap_t cap_lower;
67093 + kernel_cap_t cap_invert_audit;
67094 +
67095 + struct rlimit res[GR_NLIMITS];
67096 + __u32 resmask;
67097 +
67098 + __u8 user_trans_type;
67099 + __u8 group_trans_type;
67100 + uid_t *user_transitions;
67101 + gid_t *group_transitions;
67102 + __u16 user_trans_num;
67103 + __u16 group_trans_num;
67104 +
67105 + __u32 sock_families[2];
67106 + __u32 ip_proto[8];
67107 + __u32 ip_type;
67108 + struct acl_ip_label **ips;
67109 + __u32 ip_num;
67110 + __u32 inaddr_any_override;
67111 +
67112 + __u32 crashes;
67113 + unsigned long expires;
67114 +
67115 + struct acl_subject_label *parent_subject;
67116 + struct gr_hash_struct *hash;
67117 + struct acl_subject_label *prev;
67118 + struct acl_subject_label *next;
67119 +
67120 + struct acl_object_label **obj_hash;
67121 + __u32 obj_hash_size;
67122 + __u16 pax_flags;
67123 +};
67124 +
67125 +struct role_allowed_ip {
67126 + __u32 addr;
67127 + __u32 netmask;
67128 +
67129 + struct role_allowed_ip *prev;
67130 + struct role_allowed_ip *next;
67131 +};
67132 +
67133 +struct role_transition {
67134 + char *rolename;
67135 +
67136 + struct role_transition *prev;
67137 + struct role_transition *next;
67138 +};
67139 +
67140 +struct acl_role_label {
67141 + char *rolename;
67142 + uid_t uidgid;
67143 + __u16 roletype;
67144 +
67145 + __u16 auth_attempts;
67146 + unsigned long expires;
67147 +
67148 + struct acl_subject_label *root_label;
67149 + struct gr_hash_struct *hash;
67150 +
67151 + struct acl_role_label *prev;
67152 + struct acl_role_label *next;
67153 +
67154 + struct role_transition *transitions;
67155 + struct role_allowed_ip *allowed_ips;
67156 + uid_t *domain_children;
67157 + __u16 domain_child_num;
67158 +
67159 + mode_t umask;
67160 +
67161 + struct acl_subject_label **subj_hash;
67162 + __u32 subj_hash_size;
67163 +};
67164 +
67165 +struct user_acl_role_db {
67166 + struct acl_role_label **r_table;
67167 + __u32 num_pointers; /* Number of allocations to track */
67168 + __u32 num_roles; /* Number of roles */
67169 + __u32 num_domain_children; /* Number of domain children */
67170 + __u32 num_subjects; /* Number of subjects */
67171 + __u32 num_objects; /* Number of objects */
67172 +};
67173 +
67174 +struct acl_object_label {
67175 + char *filename;
67176 + ino_t inode;
67177 + dev_t device;
67178 + __u32 mode;
67179 +
67180 + struct acl_subject_label *nested;
67181 + struct acl_object_label *globbed;
67182 +
67183 + /* next two structures not used */
67184 +
67185 + struct acl_object_label *prev;
67186 + struct acl_object_label *next;
67187 +};
67188 +
67189 +struct acl_ip_label {
67190 + char *iface;
67191 + __u32 addr;
67192 + __u32 netmask;
67193 + __u16 low, high;
67194 + __u8 mode;
67195 + __u32 type;
67196 + __u32 proto[8];
67197 +
67198 + /* next two structures not used */
67199 +
67200 + struct acl_ip_label *prev;
67201 + struct acl_ip_label *next;
67202 +};
67203 +
67204 +struct gr_arg {
67205 + struct user_acl_role_db role_db;
67206 + unsigned char pw[GR_PW_LEN];
67207 + unsigned char salt[GR_SALT_LEN];
67208 + unsigned char sum[GR_SHA_LEN];
67209 + unsigned char sp_role[GR_SPROLE_LEN];
67210 + struct sprole_pw *sprole_pws;
67211 + dev_t segv_device;
67212 + ino_t segv_inode;
67213 + uid_t segv_uid;
67214 + __u16 num_sprole_pws;
67215 + __u16 mode;
67216 +};
67217 +
67218 +struct gr_arg_wrapper {
67219 + struct gr_arg *arg;
67220 + __u32 version;
67221 + __u32 size;
67222 +};
67223 +
67224 +struct subject_map {
67225 + struct acl_subject_label *user;
67226 + struct acl_subject_label *kernel;
67227 + struct subject_map *prev;
67228 + struct subject_map *next;
67229 +};
67230 +
67231 +struct acl_subj_map_db {
67232 + struct subject_map **s_hash;
67233 + __u32 s_size;
67234 +};
67235 +
67236 +/* End Data Structures Section */
67237 +
67238 +/* Hash functions generated by empirical testing by Brad Spengler
67239 + Makes good use of the low bits of the inode. Generally 0-1 times
67240 + in loop for successful match. 0-3 for unsuccessful match.
67241 + Shift/add algorithm with modulus of table size and an XOR*/
67242 +
67243 +static __inline__ unsigned int
67244 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
67245 +{
67246 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
67247 +}
67248 +
67249 + static __inline__ unsigned int
67250 +shash(const struct acl_subject_label *userp, const unsigned int sz)
67251 +{
67252 + return ((const unsigned long)userp % sz);
67253 +}
67254 +
67255 +static __inline__ unsigned int
67256 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
67257 +{
67258 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
67259 +}
67260 +
67261 +static __inline__ unsigned int
67262 +nhash(const char *name, const __u16 len, const unsigned int sz)
67263 +{
67264 + return full_name_hash((const unsigned char *)name, len) % sz;
67265 +}
67266 +
67267 +#define FOR_EACH_ROLE_START(role) \
67268 + role = role_list; \
67269 + while (role) {
67270 +
67271 +#define FOR_EACH_ROLE_END(role) \
67272 + role = role->prev; \
67273 + }
67274 +
67275 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
67276 + subj = NULL; \
67277 + iter = 0; \
67278 + while (iter < role->subj_hash_size) { \
67279 + if (subj == NULL) \
67280 + subj = role->subj_hash[iter]; \
67281 + if (subj == NULL) { \
67282 + iter++; \
67283 + continue; \
67284 + }
67285 +
67286 +#define FOR_EACH_SUBJECT_END(subj,iter) \
67287 + subj = subj->next; \
67288 + if (subj == NULL) \
67289 + iter++; \
67290 + }
67291 +
67292 +
67293 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
67294 + subj = role->hash->first; \
67295 + while (subj != NULL) {
67296 +
67297 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
67298 + subj = subj->next; \
67299 + }
67300 +
67301 +#endif
67302 +
67303 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
67304 new file mode 100644
67305 index 0000000..323ecf2
67306 --- /dev/null
67307 +++ b/include/linux/gralloc.h
67308 @@ -0,0 +1,9 @@
67309 +#ifndef __GRALLOC_H
67310 +#define __GRALLOC_H
67311 +
67312 +void acl_free_all(void);
67313 +int acl_alloc_stack_init(unsigned long size);
67314 +void *acl_alloc(unsigned long len);
67315 +void *acl_alloc_num(unsigned long num, unsigned long len);
67316 +
67317 +#endif
67318 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
67319 new file mode 100644
67320 index 0000000..70d6cd5
67321 --- /dev/null
67322 +++ b/include/linux/grdefs.h
67323 @@ -0,0 +1,140 @@
67324 +#ifndef GRDEFS_H
67325 +#define GRDEFS_H
67326 +
67327 +/* Begin grsecurity status declarations */
67328 +
67329 +enum {
67330 + GR_READY = 0x01,
67331 + GR_STATUS_INIT = 0x00 // disabled state
67332 +};
67333 +
67334 +/* Begin ACL declarations */
67335 +
67336 +/* Role flags */
67337 +
67338 +enum {
67339 + GR_ROLE_USER = 0x0001,
67340 + GR_ROLE_GROUP = 0x0002,
67341 + GR_ROLE_DEFAULT = 0x0004,
67342 + GR_ROLE_SPECIAL = 0x0008,
67343 + GR_ROLE_AUTH = 0x0010,
67344 + GR_ROLE_NOPW = 0x0020,
67345 + GR_ROLE_GOD = 0x0040,
67346 + GR_ROLE_LEARN = 0x0080,
67347 + GR_ROLE_TPE = 0x0100,
67348 + GR_ROLE_DOMAIN = 0x0200,
67349 + GR_ROLE_PAM = 0x0400,
67350 + GR_ROLE_PERSIST = 0x800
67351 +};
67352 +
67353 +/* ACL Subject and Object mode flags */
67354 +enum {
67355 + GR_DELETED = 0x80000000
67356 +};
67357 +
67358 +/* ACL Object-only mode flags */
67359 +enum {
67360 + GR_READ = 0x00000001,
67361 + GR_APPEND = 0x00000002,
67362 + GR_WRITE = 0x00000004,
67363 + GR_EXEC = 0x00000008,
67364 + GR_FIND = 0x00000010,
67365 + GR_INHERIT = 0x00000020,
67366 + GR_SETID = 0x00000040,
67367 + GR_CREATE = 0x00000080,
67368 + GR_DELETE = 0x00000100,
67369 + GR_LINK = 0x00000200,
67370 + GR_AUDIT_READ = 0x00000400,
67371 + GR_AUDIT_APPEND = 0x00000800,
67372 + GR_AUDIT_WRITE = 0x00001000,
67373 + GR_AUDIT_EXEC = 0x00002000,
67374 + GR_AUDIT_FIND = 0x00004000,
67375 + GR_AUDIT_INHERIT= 0x00008000,
67376 + GR_AUDIT_SETID = 0x00010000,
67377 + GR_AUDIT_CREATE = 0x00020000,
67378 + GR_AUDIT_DELETE = 0x00040000,
67379 + GR_AUDIT_LINK = 0x00080000,
67380 + GR_PTRACERD = 0x00100000,
67381 + GR_NOPTRACE = 0x00200000,
67382 + GR_SUPPRESS = 0x00400000,
67383 + GR_NOLEARN = 0x00800000,
67384 + GR_INIT_TRANSFER= 0x01000000
67385 +};
67386 +
67387 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
67388 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
67389 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
67390 +
67391 +/* ACL subject-only mode flags */
67392 +enum {
67393 + GR_KILL = 0x00000001,
67394 + GR_VIEW = 0x00000002,
67395 + GR_PROTECTED = 0x00000004,
67396 + GR_LEARN = 0x00000008,
67397 + GR_OVERRIDE = 0x00000010,
67398 + /* just a placeholder, this mode is only used in userspace */
67399 + GR_DUMMY = 0x00000020,
67400 + GR_PROTSHM = 0x00000040,
67401 + GR_KILLPROC = 0x00000080,
67402 + GR_KILLIPPROC = 0x00000100,
67403 + /* just a placeholder, this mode is only used in userspace */
67404 + GR_NOTROJAN = 0x00000200,
67405 + GR_PROTPROCFD = 0x00000400,
67406 + GR_PROCACCT = 0x00000800,
67407 + GR_RELAXPTRACE = 0x00001000,
67408 + GR_NESTED = 0x00002000,
67409 + GR_INHERITLEARN = 0x00004000,
67410 + GR_PROCFIND = 0x00008000,
67411 + GR_POVERRIDE = 0x00010000,
67412 + GR_KERNELAUTH = 0x00020000,
67413 + GR_ATSECURE = 0x00040000,
67414 + GR_SHMEXEC = 0x00080000
67415 +};
67416 +
67417 +enum {
67418 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
67419 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
67420 + GR_PAX_ENABLE_MPROTECT = 0x0004,
67421 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
67422 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
67423 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
67424 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
67425 + GR_PAX_DISABLE_MPROTECT = 0x0400,
67426 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
67427 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
67428 +};
67429 +
67430 +enum {
67431 + GR_ID_USER = 0x01,
67432 + GR_ID_GROUP = 0x02,
67433 +};
67434 +
67435 +enum {
67436 + GR_ID_ALLOW = 0x01,
67437 + GR_ID_DENY = 0x02,
67438 +};
67439 +
67440 +#define GR_CRASH_RES 31
67441 +#define GR_UIDTABLE_MAX 500
67442 +
67443 +/* begin resource learning section */
67444 +enum {
67445 + GR_RLIM_CPU_BUMP = 60,
67446 + GR_RLIM_FSIZE_BUMP = 50000,
67447 + GR_RLIM_DATA_BUMP = 10000,
67448 + GR_RLIM_STACK_BUMP = 1000,
67449 + GR_RLIM_CORE_BUMP = 10000,
67450 + GR_RLIM_RSS_BUMP = 500000,
67451 + GR_RLIM_NPROC_BUMP = 1,
67452 + GR_RLIM_NOFILE_BUMP = 5,
67453 + GR_RLIM_MEMLOCK_BUMP = 50000,
67454 + GR_RLIM_AS_BUMP = 500000,
67455 + GR_RLIM_LOCKS_BUMP = 2,
67456 + GR_RLIM_SIGPENDING_BUMP = 5,
67457 + GR_RLIM_MSGQUEUE_BUMP = 10000,
67458 + GR_RLIM_NICE_BUMP = 1,
67459 + GR_RLIM_RTPRIO_BUMP = 1,
67460 + GR_RLIM_RTTIME_BUMP = 1000000
67461 +};
67462 +
67463 +#endif
67464 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
67465 new file mode 100644
67466 index 0000000..3826b91
67467 --- /dev/null
67468 +++ b/include/linux/grinternal.h
67469 @@ -0,0 +1,219 @@
67470 +#ifndef __GRINTERNAL_H
67471 +#define __GRINTERNAL_H
67472 +
67473 +#ifdef CONFIG_GRKERNSEC
67474 +
67475 +#include <linux/fs.h>
67476 +#include <linux/mnt_namespace.h>
67477 +#include <linux/nsproxy.h>
67478 +#include <linux/gracl.h>
67479 +#include <linux/grdefs.h>
67480 +#include <linux/grmsg.h>
67481 +
67482 +void gr_add_learn_entry(const char *fmt, ...)
67483 + __attribute__ ((format (printf, 1, 2)));
67484 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
67485 + const struct vfsmount *mnt);
67486 +__u32 gr_check_create(const struct dentry *new_dentry,
67487 + const struct dentry *parent,
67488 + const struct vfsmount *mnt, const __u32 mode);
67489 +int gr_check_protected_task(const struct task_struct *task);
67490 +__u32 to_gr_audit(const __u32 reqmode);
67491 +int gr_set_acls(const int type);
67492 +int gr_apply_subject_to_task(struct task_struct *task);
67493 +int gr_acl_is_enabled(void);
67494 +char gr_roletype_to_char(void);
67495 +
67496 +void gr_handle_alertkill(struct task_struct *task);
67497 +char *gr_to_filename(const struct dentry *dentry,
67498 + const struct vfsmount *mnt);
67499 +char *gr_to_filename1(const struct dentry *dentry,
67500 + const struct vfsmount *mnt);
67501 +char *gr_to_filename2(const struct dentry *dentry,
67502 + const struct vfsmount *mnt);
67503 +char *gr_to_filename3(const struct dentry *dentry,
67504 + const struct vfsmount *mnt);
67505 +
67506 +extern int grsec_enable_ptrace_readexec;
67507 +extern int grsec_enable_harden_ptrace;
67508 +extern int grsec_enable_link;
67509 +extern int grsec_enable_fifo;
67510 +extern int grsec_enable_shm;
67511 +extern int grsec_enable_execlog;
67512 +extern int grsec_enable_signal;
67513 +extern int grsec_enable_audit_ptrace;
67514 +extern int grsec_enable_forkfail;
67515 +extern int grsec_enable_time;
67516 +extern int grsec_enable_rofs;
67517 +extern int grsec_enable_chroot_shmat;
67518 +extern int grsec_enable_chroot_mount;
67519 +extern int grsec_enable_chroot_double;
67520 +extern int grsec_enable_chroot_pivot;
67521 +extern int grsec_enable_chroot_chdir;
67522 +extern int grsec_enable_chroot_chmod;
67523 +extern int grsec_enable_chroot_mknod;
67524 +extern int grsec_enable_chroot_fchdir;
67525 +extern int grsec_enable_chroot_nice;
67526 +extern int grsec_enable_chroot_execlog;
67527 +extern int grsec_enable_chroot_caps;
67528 +extern int grsec_enable_chroot_sysctl;
67529 +extern int grsec_enable_chroot_unix;
67530 +extern int grsec_enable_tpe;
67531 +extern int grsec_tpe_gid;
67532 +extern int grsec_enable_tpe_all;
67533 +extern int grsec_enable_tpe_invert;
67534 +extern int grsec_enable_socket_all;
67535 +extern int grsec_socket_all_gid;
67536 +extern int grsec_enable_socket_client;
67537 +extern int grsec_socket_client_gid;
67538 +extern int grsec_enable_socket_server;
67539 +extern int grsec_socket_server_gid;
67540 +extern int grsec_audit_gid;
67541 +extern int grsec_enable_group;
67542 +extern int grsec_enable_audit_textrel;
67543 +extern int grsec_enable_log_rwxmaps;
67544 +extern int grsec_enable_mount;
67545 +extern int grsec_enable_chdir;
67546 +extern int grsec_resource_logging;
67547 +extern int grsec_enable_blackhole;
67548 +extern int grsec_lastack_retries;
67549 +extern int grsec_enable_brute;
67550 +extern int grsec_lock;
67551 +
67552 +extern spinlock_t grsec_alert_lock;
67553 +extern unsigned long grsec_alert_wtime;
67554 +extern unsigned long grsec_alert_fyet;
67555 +
67556 +extern spinlock_t grsec_audit_lock;
67557 +
67558 +extern rwlock_t grsec_exec_file_lock;
67559 +
67560 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
67561 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
67562 + (tsk)->exec_file->f_vfsmnt) : "/")
67563 +
67564 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
67565 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
67566 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
67567 +
67568 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
67569 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
67570 + (tsk)->exec_file->f_vfsmnt) : "/")
67571 +
67572 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
67573 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
67574 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
67575 +
67576 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
67577 +
67578 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
67579 +
67580 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
67581 + (task)->pid, (cred)->uid, \
67582 + (cred)->euid, (cred)->gid, (cred)->egid, \
67583 + gr_parent_task_fullpath(task), \
67584 + (task)->real_parent->comm, (task)->real_parent->pid, \
67585 + (pcred)->uid, (pcred)->euid, \
67586 + (pcred)->gid, (pcred)->egid
67587 +
67588 +#define GR_CHROOT_CAPS {{ \
67589 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
67590 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
67591 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
67592 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
67593 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
67594 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
67595 + CAP_TO_MASK(CAP_MAC_ADMIN) }}
67596 +
67597 +#define security_learn(normal_msg,args...) \
67598 +({ \
67599 + read_lock(&grsec_exec_file_lock); \
67600 + gr_add_learn_entry(normal_msg "\n", ## args); \
67601 + read_unlock(&grsec_exec_file_lock); \
67602 +})
67603 +
67604 +enum {
67605 + GR_DO_AUDIT,
67606 + GR_DONT_AUDIT,
67607 + GR_DONT_AUDIT_GOOD
67608 +};
67609 +
67610 +enum {
67611 + GR_TTYSNIFF,
67612 + GR_RBAC,
67613 + GR_RBAC_STR,
67614 + GR_STR_RBAC,
67615 + GR_RBAC_MODE2,
67616 + GR_RBAC_MODE3,
67617 + GR_FILENAME,
67618 + GR_SYSCTL_HIDDEN,
67619 + GR_NOARGS,
67620 + GR_ONE_INT,
67621 + GR_ONE_INT_TWO_STR,
67622 + GR_ONE_STR,
67623 + GR_STR_INT,
67624 + GR_TWO_STR_INT,
67625 + GR_TWO_INT,
67626 + GR_TWO_U64,
67627 + GR_THREE_INT,
67628 + GR_FIVE_INT_TWO_STR,
67629 + GR_TWO_STR,
67630 + GR_THREE_STR,
67631 + GR_FOUR_STR,
67632 + GR_STR_FILENAME,
67633 + GR_FILENAME_STR,
67634 + GR_FILENAME_TWO_INT,
67635 + GR_FILENAME_TWO_INT_STR,
67636 + GR_TEXTREL,
67637 + GR_PTRACE,
67638 + GR_RESOURCE,
67639 + GR_CAP,
67640 + GR_SIG,
67641 + GR_SIG2,
67642 + GR_CRASH1,
67643 + GR_CRASH2,
67644 + GR_PSACCT,
67645 + GR_RWXMAP
67646 +};
67647 +
67648 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
67649 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
67650 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
67651 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
67652 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
67653 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
67654 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
67655 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
67656 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
67657 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
67658 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
67659 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
67660 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
67661 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
67662 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
67663 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
67664 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
67665 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
67666 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
67667 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
67668 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
67669 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
67670 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
67671 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
67672 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
67673 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
67674 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
67675 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
67676 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
67677 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
67678 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
67679 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
67680 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
67681 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
67682 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
67683 +
67684 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
67685 +
67686 +#endif
67687 +
67688 +#endif
67689 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
67690 new file mode 100644
67691 index 0000000..f885406
67692 --- /dev/null
67693 +++ b/include/linux/grmsg.h
67694 @@ -0,0 +1,109 @@
67695 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
67696 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
67697 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
67698 +#define GR_STOPMOD_MSG "denied modification of module state by "
67699 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
67700 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
67701 +#define GR_IOPERM_MSG "denied use of ioperm() by "
67702 +#define GR_IOPL_MSG "denied use of iopl() by "
67703 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
67704 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
67705 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
67706 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
67707 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
67708 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
67709 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
67710 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
67711 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
67712 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
67713 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
67714 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
67715 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
67716 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
67717 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
67718 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
67719 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
67720 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
67721 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
67722 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
67723 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
67724 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
67725 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
67726 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
67727 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
67728 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
67729 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
67730 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
67731 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
67732 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
67733 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
67734 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
67735 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
67736 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
67737 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
67738 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
67739 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
67740 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
67741 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
67742 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
67743 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
67744 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
67745 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
67746 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
67747 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
67748 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
67749 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
67750 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
67751 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
67752 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
67753 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
67754 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
67755 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
67756 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
67757 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
67758 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
67759 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
67760 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
67761 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
67762 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
67763 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
67764 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
67765 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
67766 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
67767 +#define GR_NICE_CHROOT_MSG "denied priority change by "
67768 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
67769 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
67770 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
67771 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
67772 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
67773 +#define GR_TIME_MSG "time set by "
67774 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
67775 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
67776 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
67777 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
67778 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
67779 +#define GR_BIND_MSG "denied bind() by "
67780 +#define GR_CONNECT_MSG "denied connect() by "
67781 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
67782 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
67783 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
67784 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
67785 +#define GR_CAP_ACL_MSG "use of %s denied for "
67786 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
67787 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
67788 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
67789 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
67790 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
67791 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
67792 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
67793 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
67794 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
67795 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
67796 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
67797 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
67798 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
67799 +#define GR_VM86_MSG "denied use of vm86 by "
67800 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
67801 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
67802 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
67803 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
67804 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
67805 new file mode 100644
67806 index 0000000..c1793ae
67807 --- /dev/null
67808 +++ b/include/linux/grsecurity.h
67809 @@ -0,0 +1,219 @@
67810 +#ifndef GR_SECURITY_H
67811 +#define GR_SECURITY_H
67812 +#include <linux/fs.h>
67813 +#include <linux/fs_struct.h>
67814 +#include <linux/binfmts.h>
67815 +#include <linux/gracl.h>
67816 +#include <linux/compat.h>
67817 +
67818 +/* notify of brain-dead configs */
67819 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67820 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
67821 +#endif
67822 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
67823 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
67824 +#endif
67825 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
67826 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
67827 +#endif
67828 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
67829 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
67830 +#endif
67831 +
67832 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
67833 +void gr_handle_brute_check(void);
67834 +void gr_handle_kernel_exploit(void);
67835 +int gr_process_user_ban(void);
67836 +
67837 +char gr_roletype_to_char(void);
67838 +
67839 +int gr_acl_enable_at_secure(void);
67840 +
67841 +int gr_check_user_change(int real, int effective, int fs);
67842 +int gr_check_group_change(int real, int effective, int fs);
67843 +
67844 +void gr_del_task_from_ip_table(struct task_struct *p);
67845 +
67846 +int gr_pid_is_chrooted(struct task_struct *p);
67847 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
67848 +int gr_handle_chroot_nice(void);
67849 +int gr_handle_chroot_sysctl(const int op);
67850 +int gr_handle_chroot_setpriority(struct task_struct *p,
67851 + const int niceval);
67852 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
67853 +int gr_handle_chroot_chroot(const struct dentry *dentry,
67854 + const struct vfsmount *mnt);
67855 +void gr_handle_chroot_chdir(struct path *path);
67856 +int gr_handle_chroot_chmod(const struct dentry *dentry,
67857 + const struct vfsmount *mnt, const int mode);
67858 +int gr_handle_chroot_mknod(const struct dentry *dentry,
67859 + const struct vfsmount *mnt, const int mode);
67860 +int gr_handle_chroot_mount(const struct dentry *dentry,
67861 + const struct vfsmount *mnt,
67862 + const char *dev_name);
67863 +int gr_handle_chroot_pivot(void);
67864 +int gr_handle_chroot_unix(const pid_t pid);
67865 +
67866 +int gr_handle_rawio(const struct inode *inode);
67867 +
67868 +void gr_handle_ioperm(void);
67869 +void gr_handle_iopl(void);
67870 +
67871 +umode_t gr_acl_umask(void);
67872 +
67873 +int gr_tpe_allow(const struct file *file);
67874 +
67875 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
67876 +void gr_clear_chroot_entries(struct task_struct *task);
67877 +
67878 +void gr_log_forkfail(const int retval);
67879 +void gr_log_timechange(void);
67880 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
67881 +void gr_log_chdir(const struct dentry *dentry,
67882 + const struct vfsmount *mnt);
67883 +void gr_log_chroot_exec(const struct dentry *dentry,
67884 + const struct vfsmount *mnt);
67885 +void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
67886 +#ifdef CONFIG_COMPAT
67887 +void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
67888 +#endif
67889 +void gr_log_remount(const char *devname, const int retval);
67890 +void gr_log_unmount(const char *devname, const int retval);
67891 +void gr_log_mount(const char *from, const char *to, const int retval);
67892 +void gr_log_textrel(struct vm_area_struct *vma);
67893 +void gr_log_rwxmmap(struct file *file);
67894 +void gr_log_rwxmprotect(struct file *file);
67895 +
67896 +int gr_handle_follow_link(const struct inode *parent,
67897 + const struct inode *inode,
67898 + const struct dentry *dentry,
67899 + const struct vfsmount *mnt);
67900 +int gr_handle_fifo(const struct dentry *dentry,
67901 + const struct vfsmount *mnt,
67902 + const struct dentry *dir, const int flag,
67903 + const int acc_mode);
67904 +int gr_handle_hardlink(const struct dentry *dentry,
67905 + const struct vfsmount *mnt,
67906 + struct inode *inode,
67907 + const int mode, const char *to);
67908 +
67909 +int gr_is_capable(const int cap);
67910 +int gr_is_capable_nolog(const int cap);
67911 +void gr_learn_resource(const struct task_struct *task, const int limit,
67912 + const unsigned long wanted, const int gt);
67913 +void gr_copy_label(struct task_struct *tsk);
67914 +void gr_handle_crash(struct task_struct *task, const int sig);
67915 +int gr_handle_signal(const struct task_struct *p, const int sig);
67916 +int gr_check_crash_uid(const uid_t uid);
67917 +int gr_check_protected_task(const struct task_struct *task);
67918 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
67919 +int gr_acl_handle_mmap(const struct file *file,
67920 + const unsigned long prot);
67921 +int gr_acl_handle_mprotect(const struct file *file,
67922 + const unsigned long prot);
67923 +int gr_check_hidden_task(const struct task_struct *tsk);
67924 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
67925 + const struct vfsmount *mnt);
67926 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
67927 + const struct vfsmount *mnt);
67928 +__u32 gr_acl_handle_access(const struct dentry *dentry,
67929 + const struct vfsmount *mnt, const int fmode);
67930 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
67931 + const struct vfsmount *mnt, umode_t *mode);
67932 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
67933 + const struct vfsmount *mnt);
67934 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
67935 + const struct vfsmount *mnt);
67936 +int gr_handle_ptrace(struct task_struct *task, const long request);
67937 +int gr_handle_proc_ptrace(struct task_struct *task);
67938 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
67939 + const struct vfsmount *mnt);
67940 +int gr_check_crash_exec(const struct file *filp);
67941 +int gr_acl_is_enabled(void);
67942 +void gr_set_kernel_label(struct task_struct *task);
67943 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
67944 + const gid_t gid);
67945 +int gr_set_proc_label(const struct dentry *dentry,
67946 + const struct vfsmount *mnt,
67947 + const int unsafe_flags);
67948 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
67949 + const struct vfsmount *mnt);
67950 +__u32 gr_acl_handle_open(const struct dentry *dentry,
67951 + const struct vfsmount *mnt, int acc_mode);
67952 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
67953 + const struct dentry *p_dentry,
67954 + const struct vfsmount *p_mnt,
67955 + int open_flags, int acc_mode, const int imode);
67956 +void gr_handle_create(const struct dentry *dentry,
67957 + const struct vfsmount *mnt);
67958 +void gr_handle_proc_create(const struct dentry *dentry,
67959 + const struct inode *inode);
67960 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
67961 + const struct dentry *parent_dentry,
67962 + const struct vfsmount *parent_mnt,
67963 + const int mode);
67964 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
67965 + const struct dentry *parent_dentry,
67966 + const struct vfsmount *parent_mnt);
67967 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
67968 + const struct vfsmount *mnt);
67969 +void gr_handle_delete(const ino_t ino, const dev_t dev);
67970 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
67971 + const struct vfsmount *mnt);
67972 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
67973 + const struct dentry *parent_dentry,
67974 + const struct vfsmount *parent_mnt,
67975 + const char *from);
67976 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
67977 + const struct dentry *parent_dentry,
67978 + const struct vfsmount *parent_mnt,
67979 + const struct dentry *old_dentry,
67980 + const struct vfsmount *old_mnt, const char *to);
67981 +int gr_acl_handle_rename(struct dentry *new_dentry,
67982 + struct dentry *parent_dentry,
67983 + const struct vfsmount *parent_mnt,
67984 + struct dentry *old_dentry,
67985 + struct inode *old_parent_inode,
67986 + struct vfsmount *old_mnt, const char *newname);
67987 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
67988 + struct dentry *old_dentry,
67989 + struct dentry *new_dentry,
67990 + struct vfsmount *mnt, const __u8 replace);
67991 +__u32 gr_check_link(const struct dentry *new_dentry,
67992 + const struct dentry *parent_dentry,
67993 + const struct vfsmount *parent_mnt,
67994 + const struct dentry *old_dentry,
67995 + const struct vfsmount *old_mnt);
67996 +int gr_acl_handle_filldir(const struct file *file, const char *name,
67997 + const unsigned int namelen, const ino_t ino);
67998 +
67999 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
68000 + const struct vfsmount *mnt);
68001 +void gr_acl_handle_exit(void);
68002 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
68003 +int gr_acl_handle_procpidmem(const struct task_struct *task);
68004 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
68005 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
68006 +void gr_audit_ptrace(struct task_struct *task);
68007 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
68008 +
68009 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
68010 +
68011 +#ifdef CONFIG_GRKERNSEC
68012 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
68013 +void gr_handle_vm86(void);
68014 +void gr_handle_mem_readwrite(u64 from, u64 to);
68015 +
68016 +void gr_log_badprocpid(const char *entry);
68017 +
68018 +extern int grsec_enable_dmesg;
68019 +extern int grsec_disable_privio;
68020 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68021 +extern int grsec_enable_chroot_findtask;
68022 +#endif
68023 +#ifdef CONFIG_GRKERNSEC_SETXID
68024 +extern int grsec_enable_setxid;
68025 +#endif
68026 +#endif
68027 +
68028 +#endif
68029 diff --git a/include/linux/hdpu_features.h b/include/linux/hdpu_features.h
68030 index 6a87154..a3ce57b 100644
68031 --- a/include/linux/hdpu_features.h
68032 +++ b/include/linux/hdpu_features.h
68033 @@ -3,7 +3,7 @@
68034 struct cpustate_t {
68035 spinlock_t lock;
68036 int excl;
68037 - int open_count;
68038 + atomic_t open_count;
68039 unsigned char cached_val;
68040 int inited;
68041 unsigned long *set_addr;
68042 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
68043 index 211ff44..00ab6d7 100644
68044 --- a/include/linux/highmem.h
68045 +++ b/include/linux/highmem.h
68046 @@ -137,6 +137,18 @@ static inline void clear_highpage(struct page *page)
68047 kunmap_atomic(kaddr, KM_USER0);
68048 }
68049
68050 +static inline void sanitize_highpage(struct page *page)
68051 +{
68052 + void *kaddr;
68053 + unsigned long flags;
68054 +
68055 + local_irq_save(flags);
68056 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
68057 + clear_page(kaddr);
68058 + kunmap_atomic(kaddr, KM_CLEARPAGE);
68059 + local_irq_restore(flags);
68060 +}
68061 +
68062 static inline void zero_user_segments(struct page *page,
68063 unsigned start1, unsigned end1,
68064 unsigned start2, unsigned end2)
68065 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
68066 index 7b40cda..24eb44e 100644
68067 --- a/include/linux/i2c.h
68068 +++ b/include/linux/i2c.h
68069 @@ -325,6 +325,7 @@ struct i2c_algorithm {
68070 /* To determine what the adapter supports */
68071 u32 (*functionality) (struct i2c_adapter *);
68072 };
68073 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
68074
68075 /*
68076 * i2c_adapter is the structure used to identify a physical i2c bus along
68077 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
68078 index 4c4e57d..f3c5303 100644
68079 --- a/include/linux/i2o.h
68080 +++ b/include/linux/i2o.h
68081 @@ -564,7 +564,7 @@ struct i2o_controller {
68082 struct i2o_device *exec; /* Executive */
68083 #if BITS_PER_LONG == 64
68084 spinlock_t context_list_lock; /* lock for context_list */
68085 - atomic_t context_list_counter; /* needed for unique contexts */
68086 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
68087 struct list_head context_list; /* list of context id's
68088 and pointers */
68089 #endif
68090 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
68091 index 21a6f5d..dc42eab 100644
68092 --- a/include/linux/init_task.h
68093 +++ b/include/linux/init_task.h
68094 @@ -83,6 +83,12 @@ extern struct group_info init_groups;
68095 #define INIT_IDS
68096 #endif
68097
68098 +#ifdef CONFIG_X86
68099 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
68100 +#else
68101 +#define INIT_TASK_THREAD_INFO
68102 +#endif
68103 +
68104 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
68105 /*
68106 * Because of the reduced scope of CAP_SETPCAP when filesystem
68107 @@ -156,6 +162,7 @@ extern struct cred init_cred;
68108 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
68109 .comm = "swapper", \
68110 .thread = INIT_THREAD, \
68111 + INIT_TASK_THREAD_INFO \
68112 .fs = &init_fs, \
68113 .files = &init_files, \
68114 .signal = &init_signals, \
68115 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
68116 index 4f0a72a..a849599 100644
68117 --- a/include/linux/intel-iommu.h
68118 +++ b/include/linux/intel-iommu.h
68119 @@ -296,7 +296,7 @@ struct iommu_flush {
68120 u8 fm, u64 type);
68121 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
68122 unsigned int size_order, u64 type);
68123 -};
68124 +} __no_const;
68125
68126 enum {
68127 SR_DMAR_FECTL_REG,
68128 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
68129 index c739150..be577b5 100644
68130 --- a/include/linux/interrupt.h
68131 +++ b/include/linux/interrupt.h
68132 @@ -369,7 +369,7 @@ enum
68133 /* map softirq index to softirq name. update 'softirq_to_name' in
68134 * kernel/softirq.c when adding a new softirq.
68135 */
68136 -extern char *softirq_to_name[NR_SOFTIRQS];
68137 +extern const char * const softirq_to_name[NR_SOFTIRQS];
68138
68139 /* softirq mask and active fields moved to irq_cpustat_t in
68140 * asm/hardirq.h to get better cache usage. KAO
68141 @@ -377,12 +377,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
68142
68143 struct softirq_action
68144 {
68145 - void (*action)(struct softirq_action *);
68146 + void (*action)(void);
68147 };
68148
68149 asmlinkage void do_softirq(void);
68150 asmlinkage void __do_softirq(void);
68151 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
68152 +extern void open_softirq(int nr, void (*action)(void));
68153 extern void softirq_init(void);
68154 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
68155 extern void raise_softirq_irqoff(unsigned int nr);
68156 diff --git a/include/linux/irq.h b/include/linux/irq.h
68157 index 9e5f45a..025865b 100644
68158 --- a/include/linux/irq.h
68159 +++ b/include/linux/irq.h
68160 @@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
68161 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
68162 bool boot)
68163 {
68164 +#ifdef CONFIG_CPUMASK_OFFSTACK
68165 gfp_t gfp = GFP_ATOMIC;
68166
68167 if (boot)
68168 gfp = GFP_NOWAIT;
68169
68170 -#ifdef CONFIG_CPUMASK_OFFSTACK
68171 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
68172 return false;
68173
68174 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
68175 index 7922742..27306a2 100644
68176 --- a/include/linux/kallsyms.h
68177 +++ b/include/linux/kallsyms.h
68178 @@ -15,7 +15,8 @@
68179
68180 struct module;
68181
68182 -#ifdef CONFIG_KALLSYMS
68183 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
68184 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
68185 /* Lookup the address for a symbol. Returns 0 if not found. */
68186 unsigned long kallsyms_lookup_name(const char *name);
68187
68188 @@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
68189 /* Stupid that this does nothing, but I didn't create this mess. */
68190 #define __print_symbol(fmt, addr)
68191 #endif /*CONFIG_KALLSYMS*/
68192 +#else /* when included by kallsyms.c, vsnprintf.c, or
68193 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
68194 +extern void __print_symbol(const char *fmt, unsigned long address);
68195 +extern int sprint_symbol(char *buffer, unsigned long address);
68196 +const char *kallsyms_lookup(unsigned long addr,
68197 + unsigned long *symbolsize,
68198 + unsigned long *offset,
68199 + char **modname, char *namebuf);
68200 +#endif
68201
68202 /* This macro allows us to keep printk typechecking */
68203 static void __check_printsym_format(const char *fmt, ...)
68204 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
68205 index 6adcc29..13369e8 100644
68206 --- a/include/linux/kgdb.h
68207 +++ b/include/linux/kgdb.h
68208 @@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
68209
68210 extern int kgdb_connected;
68211
68212 -extern atomic_t kgdb_setting_breakpoint;
68213 -extern atomic_t kgdb_cpu_doing_single_step;
68214 +extern atomic_unchecked_t kgdb_setting_breakpoint;
68215 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
68216
68217 extern struct task_struct *kgdb_usethread;
68218 extern struct task_struct *kgdb_contthread;
68219 @@ -235,7 +235,7 @@ struct kgdb_arch {
68220 int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
68221 void (*remove_all_hw_break)(void);
68222 void (*correct_hw_break)(void);
68223 -};
68224 +} __do_const;
68225
68226 /**
68227 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
68228 @@ -257,14 +257,14 @@ struct kgdb_io {
68229 int (*init) (void);
68230 void (*pre_exception) (void);
68231 void (*post_exception) (void);
68232 -};
68233 +} __do_const;
68234
68235 -extern struct kgdb_arch arch_kgdb_ops;
68236 +extern const struct kgdb_arch arch_kgdb_ops;
68237
68238 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
68239
68240 -extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
68241 -extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
68242 +extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
68243 +extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
68244
68245 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
68246 extern int kgdb_mem2hex(char *mem, char *buf, int count);
68247 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
68248 index 384ca8b..83dd97d 100644
68249 --- a/include/linux/kmod.h
68250 +++ b/include/linux/kmod.h
68251 @@ -31,6 +31,8 @@
68252 * usually useless though. */
68253 extern int __request_module(bool wait, const char *name, ...) \
68254 __attribute__((format(printf, 2, 3)));
68255 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
68256 + __attribute__((format(printf, 3, 4)));
68257 #define request_module(mod...) __request_module(true, mod)
68258 #define request_module_nowait(mod...) __request_module(false, mod)
68259 #define try_then_request_module(x, mod...) \
68260 diff --git a/include/linux/kobject.h b/include/linux/kobject.h
68261 index 58ae8e0..3950d3c 100644
68262 --- a/include/linux/kobject.h
68263 +++ b/include/linux/kobject.h
68264 @@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
68265
68266 struct kobj_type {
68267 void (*release)(struct kobject *kobj);
68268 - struct sysfs_ops *sysfs_ops;
68269 + const struct sysfs_ops *sysfs_ops;
68270 struct attribute **default_attrs;
68271 };
68272
68273 @@ -118,9 +118,9 @@ struct kobj_uevent_env {
68274 };
68275
68276 struct kset_uevent_ops {
68277 - int (*filter)(struct kset *kset, struct kobject *kobj);
68278 - const char *(*name)(struct kset *kset, struct kobject *kobj);
68279 - int (*uevent)(struct kset *kset, struct kobject *kobj,
68280 + int (* const filter)(struct kset *kset, struct kobject *kobj);
68281 + const char *(* const name)(struct kset *kset, struct kobject *kobj);
68282 + int (* const uevent)(struct kset *kset, struct kobject *kobj,
68283 struct kobj_uevent_env *env);
68284 };
68285
68286 @@ -132,7 +132,7 @@ struct kobj_attribute {
68287 const char *buf, size_t count);
68288 };
68289
68290 -extern struct sysfs_ops kobj_sysfs_ops;
68291 +extern const struct sysfs_ops kobj_sysfs_ops;
68292
68293 /**
68294 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
68295 @@ -155,14 +155,14 @@ struct kset {
68296 struct list_head list;
68297 spinlock_t list_lock;
68298 struct kobject kobj;
68299 - struct kset_uevent_ops *uevent_ops;
68300 + const struct kset_uevent_ops *uevent_ops;
68301 };
68302
68303 extern void kset_init(struct kset *kset);
68304 extern int __must_check kset_register(struct kset *kset);
68305 extern void kset_unregister(struct kset *kset);
68306 extern struct kset * __must_check kset_create_and_add(const char *name,
68307 - struct kset_uevent_ops *u,
68308 + const struct kset_uevent_ops *u,
68309 struct kobject *parent_kobj);
68310
68311 static inline struct kset *to_kset(struct kobject *kobj)
68312 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
68313 index c728a50..752d821 100644
68314 --- a/include/linux/kvm_host.h
68315 +++ b/include/linux/kvm_host.h
68316 @@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
68317 void vcpu_load(struct kvm_vcpu *vcpu);
68318 void vcpu_put(struct kvm_vcpu *vcpu);
68319
68320 -int kvm_init(void *opaque, unsigned int vcpu_size,
68321 +int kvm_init(const void *opaque, unsigned int vcpu_size,
68322 struct module *module);
68323 void kvm_exit(void);
68324
68325 @@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
68326 struct kvm_guest_debug *dbg);
68327 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
68328
68329 -int kvm_arch_init(void *opaque);
68330 +int kvm_arch_init(const void *opaque);
68331 void kvm_arch_exit(void);
68332
68333 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
68334 diff --git a/include/linux/libata.h b/include/linux/libata.h
68335 index a069916..223edde 100644
68336 --- a/include/linux/libata.h
68337 +++ b/include/linux/libata.h
68338 @@ -525,11 +525,11 @@ struct ata_ioports {
68339
68340 struct ata_host {
68341 spinlock_t lock;
68342 - struct device *dev;
68343 + struct device *dev;
68344 void __iomem * const *iomap;
68345 unsigned int n_ports;
68346 void *private_data;
68347 - struct ata_port_operations *ops;
68348 + const struct ata_port_operations *ops;
68349 unsigned long flags;
68350 #ifdef CONFIG_ATA_ACPI
68351 acpi_handle acpi_handle;
68352 @@ -710,7 +710,7 @@ struct ata_link {
68353
68354 struct ata_port {
68355 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
68356 - struct ata_port_operations *ops;
68357 + const struct ata_port_operations *ops;
68358 spinlock_t *lock;
68359 /* Flags owned by the EH context. Only EH should touch these once the
68360 port is active */
68361 @@ -884,7 +884,7 @@ struct ata_port_operations {
68362 * fields must be pointers.
68363 */
68364 const struct ata_port_operations *inherits;
68365 -};
68366 +} __do_const;
68367
68368 struct ata_port_info {
68369 unsigned long flags;
68370 @@ -892,7 +892,7 @@ struct ata_port_info {
68371 unsigned long pio_mask;
68372 unsigned long mwdma_mask;
68373 unsigned long udma_mask;
68374 - struct ata_port_operations *port_ops;
68375 + const struct ata_port_operations *port_ops;
68376 void *private_data;
68377 };
68378
68379 @@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timing_normal[];
68380 extern const unsigned long sata_deb_timing_hotplug[];
68381 extern const unsigned long sata_deb_timing_long[];
68382
68383 -extern struct ata_port_operations ata_dummy_port_ops;
68384 +extern const struct ata_port_operations ata_dummy_port_ops;
68385 extern const struct ata_port_info ata_dummy_port_info;
68386
68387 static inline const unsigned long *
68388 @@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_host *host, int irq,
68389 struct scsi_host_template *sht);
68390 extern void ata_host_detach(struct ata_host *host);
68391 extern void ata_host_init(struct ata_host *, struct device *,
68392 - unsigned long, struct ata_port_operations *);
68393 + unsigned long, const struct ata_port_operations *);
68394 extern int ata_scsi_detect(struct scsi_host_template *sht);
68395 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
68396 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
68397 diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
68398 index fbc48f8..0886e57 100644
68399 --- a/include/linux/lockd/bind.h
68400 +++ b/include/linux/lockd/bind.h
68401 @@ -23,13 +23,13 @@ struct svc_rqst;
68402 * This is the set of functions for lockd->nfsd communication
68403 */
68404 struct nlmsvc_binding {
68405 - __be32 (*fopen)(struct svc_rqst *,
68406 + __be32 (* const fopen)(struct svc_rqst *,
68407 struct nfs_fh *,
68408 struct file **);
68409 - void (*fclose)(struct file *);
68410 + void (* const fclose)(struct file *);
68411 };
68412
68413 -extern struct nlmsvc_binding * nlmsvc_ops;
68414 +extern const struct nlmsvc_binding * nlmsvc_ops;
68415
68416 /*
68417 * Similar to nfs_client_initdata, but without the NFS-specific
68418 diff --git a/include/linux/mca.h b/include/linux/mca.h
68419 index 3797270..7765ede 100644
68420 --- a/include/linux/mca.h
68421 +++ b/include/linux/mca.h
68422 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
68423 int region);
68424 void * (*mca_transform_memory)(struct mca_device *,
68425 void *memory);
68426 -};
68427 +} __no_const;
68428
68429 struct mca_bus {
68430 u64 default_dma_mask;
68431 diff --git a/include/linux/memory.h b/include/linux/memory.h
68432 index 37fa19b..b597c85 100644
68433 --- a/include/linux/memory.h
68434 +++ b/include/linux/memory.h
68435 @@ -108,7 +108,7 @@ struct memory_accessor {
68436 size_t count);
68437 ssize_t (*write)(struct memory_accessor *, const char *buf,
68438 off_t offset, size_t count);
68439 -};
68440 +} __no_const;
68441
68442 /*
68443 * Kernel text modification mutex, used for code patching. Users of this lock
68444 diff --git a/include/linux/mm.h b/include/linux/mm.h
68445 index 11e5be6..1ff2423 100644
68446 --- a/include/linux/mm.h
68447 +++ b/include/linux/mm.h
68448 @@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void *objp);
68449
68450 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
68451 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
68452 +
68453 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68454 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
68455 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
68456 +#else
68457 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
68458 +#endif
68459 +
68460 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
68461 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
68462
68463 @@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
68464 int set_page_dirty_lock(struct page *page);
68465 int clear_page_dirty_for_io(struct page *page);
68466
68467 -/* Is the vma a continuation of the stack vma above it? */
68468 -static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
68469 -{
68470 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
68471 -}
68472 -
68473 extern unsigned long move_page_tables(struct vm_area_struct *vma,
68474 unsigned long old_addr, struct vm_area_struct *new_vma,
68475 unsigned long new_addr, unsigned long len);
68476 @@ -890,6 +891,8 @@ struct shrinker {
68477 extern void register_shrinker(struct shrinker *);
68478 extern void unregister_shrinker(struct shrinker *);
68479
68480 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
68481 +
68482 int vma_wants_writenotify(struct vm_area_struct *vma);
68483
68484 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
68485 @@ -1162,6 +1165,7 @@ out:
68486 }
68487
68488 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
68489 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
68490
68491 extern unsigned long do_brk(unsigned long, unsigned long);
68492
68493 @@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
68494 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
68495 struct vm_area_struct **pprev);
68496
68497 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
68498 +extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
68499 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
68500 +
68501 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
68502 NULL if none. Assume start_addr < end_addr. */
68503 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
68504 @@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
68505 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
68506 }
68507
68508 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
68509 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
68510 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
68511 unsigned long pfn, unsigned long size, pgprot_t);
68512 @@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long pfn, int trapno);
68513 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
68514 extern int sysctl_memory_failure_early_kill;
68515 extern int sysctl_memory_failure_recovery;
68516 -extern atomic_long_t mce_bad_pages;
68517 +extern atomic_long_unchecked_t mce_bad_pages;
68518 +
68519 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
68520 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
68521 +#else
68522 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
68523 +#endif
68524
68525 #endif /* __KERNEL__ */
68526 #endif /* _LINUX_MM_H */
68527 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
68528 index 9d12ed5..6d9707a 100644
68529 --- a/include/linux/mm_types.h
68530 +++ b/include/linux/mm_types.h
68531 @@ -186,6 +186,8 @@ struct vm_area_struct {
68532 #ifdef CONFIG_NUMA
68533 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
68534 #endif
68535 +
68536 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
68537 };
68538
68539 struct core_thread {
68540 @@ -287,6 +289,24 @@ struct mm_struct {
68541 #ifdef CONFIG_MMU_NOTIFIER
68542 struct mmu_notifier_mm *mmu_notifier_mm;
68543 #endif
68544 +
68545 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
68546 + unsigned long pax_flags;
68547 +#endif
68548 +
68549 +#ifdef CONFIG_PAX_DLRESOLVE
68550 + unsigned long call_dl_resolve;
68551 +#endif
68552 +
68553 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
68554 + unsigned long call_syscall;
68555 +#endif
68556 +
68557 +#ifdef CONFIG_PAX_ASLR
68558 + unsigned long delta_mmap; /* randomized offset */
68559 + unsigned long delta_stack; /* randomized offset */
68560 +#endif
68561 +
68562 };
68563
68564 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
68565 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
68566 index 4e02ee2..afb159e 100644
68567 --- a/include/linux/mmu_notifier.h
68568 +++ b/include/linux/mmu_notifier.h
68569 @@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
68570 */
68571 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
68572 ({ \
68573 - pte_t __pte; \
68574 + pte_t ___pte; \
68575 struct vm_area_struct *___vma = __vma; \
68576 unsigned long ___address = __address; \
68577 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
68578 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
68579 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
68580 - __pte; \
68581 + ___pte; \
68582 })
68583
68584 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
68585 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
68586 index 6c31a2a..4b0e930 100644
68587 --- a/include/linux/mmzone.h
68588 +++ b/include/linux/mmzone.h
68589 @@ -350,7 +350,7 @@ struct zone {
68590 unsigned long flags; /* zone flags, see below */
68591
68592 /* Zone statistics */
68593 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68594 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68595
68596 /*
68597 * prev_priority holds the scanning priority for this zone. It is
68598 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
68599 index f58e9d8..3503935 100644
68600 --- a/include/linux/mod_devicetable.h
68601 +++ b/include/linux/mod_devicetable.h
68602 @@ -12,7 +12,7 @@
68603 typedef unsigned long kernel_ulong_t;
68604 #endif
68605
68606 -#define PCI_ANY_ID (~0)
68607 +#define PCI_ANY_ID ((__u16)~0)
68608
68609 struct pci_device_id {
68610 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
68611 @@ -131,7 +131,7 @@ struct usb_device_id {
68612 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
68613 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
68614
68615 -#define HID_ANY_ID (~0)
68616 +#define HID_ANY_ID (~0U)
68617
68618 struct hid_device_id {
68619 __u16 bus;
68620 diff --git a/include/linux/module.h b/include/linux/module.h
68621 index 482efc8..642032b 100644
68622 --- a/include/linux/module.h
68623 +++ b/include/linux/module.h
68624 @@ -16,6 +16,7 @@
68625 #include <linux/kobject.h>
68626 #include <linux/moduleparam.h>
68627 #include <linux/tracepoint.h>
68628 +#include <linux/fs.h>
68629
68630 #include <asm/local.h>
68631 #include <asm/module.h>
68632 @@ -287,16 +288,16 @@ struct module
68633 int (*init)(void);
68634
68635 /* If this is non-NULL, vfree after init() returns */
68636 - void *module_init;
68637 + void *module_init_rx, *module_init_rw;
68638
68639 /* Here is the actual code + data, vfree'd on unload. */
68640 - void *module_core;
68641 + void *module_core_rx, *module_core_rw;
68642
68643 /* Here are the sizes of the init and core sections */
68644 - unsigned int init_size, core_size;
68645 + unsigned int init_size_rw, core_size_rw;
68646
68647 /* The size of the executable code in each section. */
68648 - unsigned int init_text_size, core_text_size;
68649 + unsigned int init_size_rx, core_size_rx;
68650
68651 /* Arch-specific module values */
68652 struct mod_arch_specific arch;
68653 @@ -345,6 +346,10 @@ struct module
68654 #ifdef CONFIG_EVENT_TRACING
68655 struct ftrace_event_call *trace_events;
68656 unsigned int num_trace_events;
68657 + struct file_operations trace_id;
68658 + struct file_operations trace_enable;
68659 + struct file_operations trace_format;
68660 + struct file_operations trace_filter;
68661 #endif
68662 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
68663 unsigned long *ftrace_callsites;
68664 @@ -393,16 +398,46 @@ struct module *__module_address(unsigned long addr);
68665 bool is_module_address(unsigned long addr);
68666 bool is_module_text_address(unsigned long addr);
68667
68668 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
68669 +{
68670 +
68671 +#ifdef CONFIG_PAX_KERNEXEC
68672 + if (ktla_ktva(addr) >= (unsigned long)start &&
68673 + ktla_ktva(addr) < (unsigned long)start + size)
68674 + return 1;
68675 +#endif
68676 +
68677 + return ((void *)addr >= start && (void *)addr < start + size);
68678 +}
68679 +
68680 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
68681 +{
68682 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
68683 +}
68684 +
68685 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
68686 +{
68687 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
68688 +}
68689 +
68690 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
68691 +{
68692 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
68693 +}
68694 +
68695 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
68696 +{
68697 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
68698 +}
68699 +
68700 static inline int within_module_core(unsigned long addr, struct module *mod)
68701 {
68702 - return (unsigned long)mod->module_core <= addr &&
68703 - addr < (unsigned long)mod->module_core + mod->core_size;
68704 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
68705 }
68706
68707 static inline int within_module_init(unsigned long addr, struct module *mod)
68708 {
68709 - return (unsigned long)mod->module_init <= addr &&
68710 - addr < (unsigned long)mod->module_init + mod->init_size;
68711 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
68712 }
68713
68714 /* Search for module by name: must hold module_mutex. */
68715 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
68716 index c1f40c2..682ca53 100644
68717 --- a/include/linux/moduleloader.h
68718 +++ b/include/linux/moduleloader.h
68719 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
68720 sections. Returns NULL on failure. */
68721 void *module_alloc(unsigned long size);
68722
68723 +#ifdef CONFIG_PAX_KERNEXEC
68724 +void *module_alloc_exec(unsigned long size);
68725 +#else
68726 +#define module_alloc_exec(x) module_alloc(x)
68727 +#endif
68728 +
68729 /* Free memory returned from module_alloc. */
68730 void module_free(struct module *mod, void *module_region);
68731
68732 +#ifdef CONFIG_PAX_KERNEXEC
68733 +void module_free_exec(struct module *mod, void *module_region);
68734 +#else
68735 +#define module_free_exec(x, y) module_free((x), (y))
68736 +#endif
68737 +
68738 /* Apply the given relocation to the (simplified) ELF. Return -error
68739 or 0. */
68740 int apply_relocate(Elf_Shdr *sechdrs,
68741 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
68742 index 82a9124..8a5f622 100644
68743 --- a/include/linux/moduleparam.h
68744 +++ b/include/linux/moduleparam.h
68745 @@ -132,7 +132,7 @@ struct kparam_array
68746
68747 /* Actually copy string: maxlen param is usually sizeof(string). */
68748 #define module_param_string(name, string, len, perm) \
68749 - static const struct kparam_string __param_string_##name \
68750 + static const struct kparam_string __param_string_##name __used \
68751 = { len, string }; \
68752 __module_param_call(MODULE_PARAM_PREFIX, name, \
68753 param_set_copystring, param_get_string, \
68754 @@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffer, struct kernel_param *kp);
68755
68756 /* Comma-separated array: *nump is set to number they actually specified. */
68757 #define module_param_array_named(name, array, type, nump, perm) \
68758 - static const struct kparam_array __param_arr_##name \
68759 + static const struct kparam_array __param_arr_##name __used \
68760 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
68761 sizeof(array[0]), array }; \
68762 __module_param_call(MODULE_PARAM_PREFIX, name, \
68763 diff --git a/include/linux/mutex.h b/include/linux/mutex.h
68764 index 878cab4..c92cb3e 100644
68765 --- a/include/linux/mutex.h
68766 +++ b/include/linux/mutex.h
68767 @@ -51,7 +51,7 @@ struct mutex {
68768 spinlock_t wait_lock;
68769 struct list_head wait_list;
68770 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
68771 - struct thread_info *owner;
68772 + struct task_struct *owner;
68773 #endif
68774 #ifdef CONFIG_DEBUG_MUTEXES
68775 const char *name;
68776 diff --git a/include/linux/namei.h b/include/linux/namei.h
68777 index ec0f607..d19e675 100644
68778 --- a/include/linux/namei.h
68779 +++ b/include/linux/namei.h
68780 @@ -22,7 +22,7 @@ struct nameidata {
68781 unsigned int flags;
68782 int last_type;
68783 unsigned depth;
68784 - char *saved_names[MAX_NESTED_LINKS + 1];
68785 + const char *saved_names[MAX_NESTED_LINKS + 1];
68786
68787 /* Intent data */
68788 union {
68789 @@ -84,12 +84,12 @@ extern int follow_up(struct path *);
68790 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
68791 extern void unlock_rename(struct dentry *, struct dentry *);
68792
68793 -static inline void nd_set_link(struct nameidata *nd, char *path)
68794 +static inline void nd_set_link(struct nameidata *nd, const char *path)
68795 {
68796 nd->saved_names[nd->depth] = path;
68797 }
68798
68799 -static inline char *nd_get_link(struct nameidata *nd)
68800 +static inline const char *nd_get_link(const struct nameidata *nd)
68801 {
68802 return nd->saved_names[nd->depth];
68803 }
68804 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
68805 index 9d7e8f7..04428c5 100644
68806 --- a/include/linux/netdevice.h
68807 +++ b/include/linux/netdevice.h
68808 @@ -637,6 +637,7 @@ struct net_device_ops {
68809 u16 xid);
68810 #endif
68811 };
68812 +typedef struct net_device_ops __no_const net_device_ops_no_const;
68813
68814 /*
68815 * The DEVICE structure.
68816 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
68817 new file mode 100644
68818 index 0000000..33f4af8
68819 --- /dev/null
68820 +++ b/include/linux/netfilter/xt_gradm.h
68821 @@ -0,0 +1,9 @@
68822 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
68823 +#define _LINUX_NETFILTER_XT_GRADM_H 1
68824 +
68825 +struct xt_gradm_mtinfo {
68826 + __u16 flags;
68827 + __u16 invflags;
68828 +};
68829 +
68830 +#endif
68831 diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
68832 index b359c4a..c08b334 100644
68833 --- a/include/linux/nodemask.h
68834 +++ b/include/linux/nodemask.h
68835 @@ -464,11 +464,11 @@ static inline int num_node_state(enum node_states state)
68836
68837 #define any_online_node(mask) \
68838 ({ \
68839 - int node; \
68840 - for_each_node_mask(node, (mask)) \
68841 - if (node_online(node)) \
68842 + int __node; \
68843 + for_each_node_mask(__node, (mask)) \
68844 + if (node_online(__node)) \
68845 break; \
68846 - node; \
68847 + __node; \
68848 })
68849
68850 #define num_online_nodes() num_node_state(N_ONLINE)
68851 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
68852 index 5171639..7cf4235 100644
68853 --- a/include/linux/oprofile.h
68854 +++ b/include/linux/oprofile.h
68855 @@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
68856 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
68857 char const * name, ulong * val);
68858
68859 -/** Create a file for read-only access to an atomic_t. */
68860 +/** Create a file for read-only access to an atomic_unchecked_t. */
68861 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
68862 - char const * name, atomic_t * val);
68863 + char const * name, atomic_unchecked_t * val);
68864
68865 /** create a directory */
68866 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
68867 diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
68868 index 3c62ed4..8924c7c 100644
68869 --- a/include/linux/pagemap.h
68870 +++ b/include/linux/pagemap.h
68871 @@ -425,7 +425,9 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
68872 if (((unsigned long)uaddr & PAGE_MASK) !=
68873 ((unsigned long)end & PAGE_MASK))
68874 ret = __get_user(c, end);
68875 + (void)c;
68876 }
68877 + (void)c;
68878 return ret;
68879 }
68880
68881 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
68882 index 81c9689..a567a55 100644
68883 --- a/include/linux/perf_event.h
68884 +++ b/include/linux/perf_event.h
68885 @@ -476,7 +476,7 @@ struct hw_perf_event {
68886 struct hrtimer hrtimer;
68887 };
68888 };
68889 - atomic64_t prev_count;
68890 + atomic64_unchecked_t prev_count;
68891 u64 sample_period;
68892 u64 last_period;
68893 atomic64_t period_left;
68894 @@ -557,7 +557,7 @@ struct perf_event {
68895 const struct pmu *pmu;
68896
68897 enum perf_event_active_state state;
68898 - atomic64_t count;
68899 + atomic64_unchecked_t count;
68900
68901 /*
68902 * These are the total time in nanoseconds that the event
68903 @@ -595,8 +595,8 @@ struct perf_event {
68904 * These accumulate total time (in nanoseconds) that children
68905 * events have been enabled and running, respectively.
68906 */
68907 - atomic64_t child_total_time_enabled;
68908 - atomic64_t child_total_time_running;
68909 + atomic64_unchecked_t child_total_time_enabled;
68910 + atomic64_unchecked_t child_total_time_running;
68911
68912 /*
68913 * Protect attach/detach and child_list:
68914 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
68915 index b43a9e0..b77d869 100644
68916 --- a/include/linux/pipe_fs_i.h
68917 +++ b/include/linux/pipe_fs_i.h
68918 @@ -46,9 +46,9 @@ struct pipe_inode_info {
68919 wait_queue_head_t wait;
68920 unsigned int nrbufs, curbuf;
68921 struct page *tmp_page;
68922 - unsigned int readers;
68923 - unsigned int writers;
68924 - unsigned int waiting_writers;
68925 + atomic_t readers;
68926 + atomic_t writers;
68927 + atomic_t waiting_writers;
68928 unsigned int r_counter;
68929 unsigned int w_counter;
68930 struct fasync_struct *fasync_readers;
68931 diff --git a/include/linux/poison.h b/include/linux/poison.h
68932 index 34066ff..e95d744 100644
68933 --- a/include/linux/poison.h
68934 +++ b/include/linux/poison.h
68935 @@ -19,8 +19,8 @@
68936 * under normal circumstances, used to verify that nobody uses
68937 * non-initialized list entries.
68938 */
68939 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
68940 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
68941 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
68942 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
68943
68944 /********** include/linux/timer.h **********/
68945 /*
68946 diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
68947 index 4f71bf4..cd2f68e 100644
68948 --- a/include/linux/posix-timers.h
68949 +++ b/include/linux/posix-timers.h
68950 @@ -82,7 +82,8 @@ struct k_clock {
68951 #define TIMER_RETRY 1
68952 void (*timer_get) (struct k_itimer * timr,
68953 struct itimerspec * cur_setting);
68954 -};
68955 +} __do_const;
68956 +typedef struct k_clock __no_const k_clock_no_const;
68957
68958 void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock);
68959
68960 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
68961 index 72b1a10..13303a9 100644
68962 --- a/include/linux/preempt.h
68963 +++ b/include/linux/preempt.h
68964 @@ -110,7 +110,7 @@ struct preempt_ops {
68965 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
68966 void (*sched_out)(struct preempt_notifier *notifier,
68967 struct task_struct *next);
68968 -};
68969 +} __no_const;
68970
68971 /**
68972 * preempt_notifier - key for installing preemption notifiers
68973 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
68974 index 379eaed..1bf73e3 100644
68975 --- a/include/linux/proc_fs.h
68976 +++ b/include/linux/proc_fs.h
68977 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
68978 return proc_create_data(name, mode, parent, proc_fops, NULL);
68979 }
68980
68981 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
68982 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
68983 +{
68984 +#ifdef CONFIG_GRKERNSEC_PROC_USER
68985 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
68986 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68987 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
68988 +#else
68989 + return proc_create_data(name, mode, parent, proc_fops, NULL);
68990 +#endif
68991 +}
68992 +
68993 +
68994 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
68995 mode_t mode, struct proc_dir_entry *base,
68996 read_proc_t *read_proc, void * data)
68997 @@ -256,7 +269,7 @@ union proc_op {
68998 int (*proc_show)(struct seq_file *m,
68999 struct pid_namespace *ns, struct pid *pid,
69000 struct task_struct *task);
69001 -};
69002 +} __no_const;
69003
69004 struct ctl_table_header;
69005 struct ctl_table;
69006 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
69007 index 7456d7d..6c1cfc9 100644
69008 --- a/include/linux/ptrace.h
69009 +++ b/include/linux/ptrace.h
69010 @@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_struct *child);
69011 extern void exit_ptrace(struct task_struct *tracer);
69012 #define PTRACE_MODE_READ 1
69013 #define PTRACE_MODE_ATTACH 2
69014 -/* Returns 0 on success, -errno on denial. */
69015 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
69016 /* Returns true on success, false on denial. */
69017 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
69018 +/* Returns true on success, false on denial. */
69019 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
69020
69021 static inline int ptrace_reparented(struct task_struct *child)
69022 {
69023 diff --git a/include/linux/random.h b/include/linux/random.h
69024 index 2948046..3262567 100644
69025 --- a/include/linux/random.h
69026 +++ b/include/linux/random.h
69027 @@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
69028 u32 random32(void);
69029 void srandom32(u32 seed);
69030
69031 +static inline unsigned long pax_get_random_long(void)
69032 +{
69033 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
69034 +}
69035 +
69036 #endif /* __KERNEL___ */
69037
69038 #endif /* _LINUX_RANDOM_H */
69039 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
69040 index 988e55f..17cb4ef 100644
69041 --- a/include/linux/reboot.h
69042 +++ b/include/linux/reboot.h
69043 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
69044 * Architecture-specific implementations of sys_reboot commands.
69045 */
69046
69047 -extern void machine_restart(char *cmd);
69048 -extern void machine_halt(void);
69049 -extern void machine_power_off(void);
69050 +extern void machine_restart(char *cmd) __noreturn;
69051 +extern void machine_halt(void) __noreturn;
69052 +extern void machine_power_off(void) __noreturn;
69053
69054 extern void machine_shutdown(void);
69055 struct pt_regs;
69056 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
69057 */
69058
69059 extern void kernel_restart_prepare(char *cmd);
69060 -extern void kernel_restart(char *cmd);
69061 -extern void kernel_halt(void);
69062 -extern void kernel_power_off(void);
69063 +extern void kernel_restart(char *cmd) __noreturn;
69064 +extern void kernel_halt(void) __noreturn;
69065 +extern void kernel_power_off(void) __noreturn;
69066
69067 void ctrl_alt_del(void);
69068
69069 @@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
69070 * Emergency restart, callable from an interrupt handler.
69071 */
69072
69073 -extern void emergency_restart(void);
69074 +extern void emergency_restart(void) __noreturn;
69075 #include <asm/emergency-restart.h>
69076
69077 #endif
69078 diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
69079 index dd31e7b..5b03c5c 100644
69080 --- a/include/linux/reiserfs_fs.h
69081 +++ b/include/linux/reiserfs_fs.h
69082 @@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
69083 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
69084
69085 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
69086 -#define get_generation(s) atomic_read (&fs_generation(s))
69087 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
69088 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
69089 #define __fs_changed(gen,s) (gen != get_generation (s))
69090 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
69091 @@ -1534,24 +1534,24 @@ static inline struct super_block *sb_from_bi(struct buffer_info *bi)
69092 */
69093
69094 struct item_operations {
69095 - int (*bytes_number) (struct item_head * ih, int block_size);
69096 - void (*decrement_key) (struct cpu_key *);
69097 - int (*is_left_mergeable) (struct reiserfs_key * ih,
69098 + int (* const bytes_number) (struct item_head * ih, int block_size);
69099 + void (* const decrement_key) (struct cpu_key *);
69100 + int (* const is_left_mergeable) (struct reiserfs_key * ih,
69101 unsigned long bsize);
69102 - void (*print_item) (struct item_head *, char *item);
69103 - void (*check_item) (struct item_head *, char *item);
69104 + void (* const print_item) (struct item_head *, char *item);
69105 + void (* const check_item) (struct item_head *, char *item);
69106
69107 - int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
69108 + int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
69109 int is_affected, int insert_size);
69110 - int (*check_left) (struct virtual_item * vi, int free,
69111 + int (* const check_left) (struct virtual_item * vi, int free,
69112 int start_skip, int end_skip);
69113 - int (*check_right) (struct virtual_item * vi, int free);
69114 - int (*part_size) (struct virtual_item * vi, int from, int to);
69115 - int (*unit_num) (struct virtual_item * vi);
69116 - void (*print_vi) (struct virtual_item * vi);
69117 + int (* const check_right) (struct virtual_item * vi, int free);
69118 + int (* const part_size) (struct virtual_item * vi, int from, int to);
69119 + int (* const unit_num) (struct virtual_item * vi);
69120 + void (* const print_vi) (struct virtual_item * vi);
69121 };
69122
69123 -extern struct item_operations *item_ops[TYPE_ANY + 1];
69124 +extern const struct item_operations * const item_ops[TYPE_ANY + 1];
69125
69126 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
69127 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
69128 diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
69129 index dab68bb..0688727 100644
69130 --- a/include/linux/reiserfs_fs_sb.h
69131 +++ b/include/linux/reiserfs_fs_sb.h
69132 @@ -377,7 +377,7 @@ struct reiserfs_sb_info {
69133 /* Comment? -Hans */
69134 wait_queue_head_t s_wait;
69135 /* To be obsoleted soon by per buffer seals.. -Hans */
69136 - atomic_t s_generation_counter; // increased by one every time the
69137 + atomic_unchecked_t s_generation_counter; // increased by one every time the
69138 // tree gets re-balanced
69139 unsigned long s_properties; /* File system properties. Currently holds
69140 on-disk FS format */
69141 diff --git a/include/linux/relay.h b/include/linux/relay.h
69142 index 14a86bc..17d0700 100644
69143 --- a/include/linux/relay.h
69144 +++ b/include/linux/relay.h
69145 @@ -159,7 +159,7 @@ struct rchan_callbacks
69146 * The callback should return 0 if successful, negative if not.
69147 */
69148 int (*remove_buf_file)(struct dentry *dentry);
69149 -};
69150 +} __no_const;
69151
69152 /*
69153 * CONFIG_RELAY kernel API, kernel/relay.c
69154 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
69155 index 3392c59..a746428 100644
69156 --- a/include/linux/rfkill.h
69157 +++ b/include/linux/rfkill.h
69158 @@ -144,6 +144,7 @@ struct rfkill_ops {
69159 void (*query)(struct rfkill *rfkill, void *data);
69160 int (*set_block)(void *data, bool blocked);
69161 };
69162 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
69163
69164 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
69165 /**
69166 diff --git a/include/linux/sched.h b/include/linux/sched.h
69167 index 71849bf..2ef383dc3 100644
69168 --- a/include/linux/sched.h
69169 +++ b/include/linux/sched.h
69170 @@ -101,6 +101,7 @@ struct bio;
69171 struct fs_struct;
69172 struct bts_context;
69173 struct perf_event_context;
69174 +struct linux_binprm;
69175
69176 /*
69177 * List of flags we want to share for kernel threads,
69178 @@ -350,7 +351,7 @@ extern signed long schedule_timeout_killable(signed long timeout);
69179 extern signed long schedule_timeout_uninterruptible(signed long timeout);
69180 asmlinkage void __schedule(void);
69181 asmlinkage void schedule(void);
69182 -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
69183 +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
69184
69185 struct nsproxy;
69186 struct user_namespace;
69187 @@ -371,9 +372,12 @@ struct user_namespace;
69188 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
69189
69190 extern int sysctl_max_map_count;
69191 +extern unsigned long sysctl_heap_stack_gap;
69192
69193 #include <linux/aio.h>
69194
69195 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
69196 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
69197 extern unsigned long
69198 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
69199 unsigned long, unsigned long);
69200 @@ -666,6 +670,16 @@ struct signal_struct {
69201 struct tty_audit_buf *tty_audit_buf;
69202 #endif
69203
69204 +#ifdef CONFIG_GRKERNSEC
69205 + u32 curr_ip;
69206 + u32 saved_ip;
69207 + u32 gr_saddr;
69208 + u32 gr_daddr;
69209 + u16 gr_sport;
69210 + u16 gr_dport;
69211 + u8 used_accept:1;
69212 +#endif
69213 +
69214 int oom_adj; /* OOM kill score adjustment (bit shift) */
69215 };
69216
69217 @@ -723,6 +737,11 @@ struct user_struct {
69218 struct key *session_keyring; /* UID's default session keyring */
69219 #endif
69220
69221 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
69222 + unsigned int banned;
69223 + unsigned long ban_expires;
69224 +#endif
69225 +
69226 /* Hash table maintenance information */
69227 struct hlist_node uidhash_node;
69228 uid_t uid;
69229 @@ -1328,8 +1347,8 @@ struct task_struct {
69230 struct list_head thread_group;
69231
69232 struct completion *vfork_done; /* for vfork() */
69233 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
69234 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
69235 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
69236 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
69237
69238 cputime_t utime, stime, utimescaled, stimescaled;
69239 cputime_t gtime;
69240 @@ -1343,16 +1362,6 @@ struct task_struct {
69241 struct task_cputime cputime_expires;
69242 struct list_head cpu_timers[3];
69243
69244 -/* process credentials */
69245 - const struct cred *real_cred; /* objective and real subjective task
69246 - * credentials (COW) */
69247 - const struct cred *cred; /* effective (overridable) subjective task
69248 - * credentials (COW) */
69249 - struct mutex cred_guard_mutex; /* guard against foreign influences on
69250 - * credential calculations
69251 - * (notably. ptrace) */
69252 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
69253 -
69254 char comm[TASK_COMM_LEN]; /* executable name excluding path
69255 - access with [gs]et_task_comm (which lock
69256 it with task_lock())
69257 @@ -1369,6 +1378,10 @@ struct task_struct {
69258 #endif
69259 /* CPU-specific state of this task */
69260 struct thread_struct thread;
69261 +/* thread_info moved to task_struct */
69262 +#ifdef CONFIG_X86
69263 + struct thread_info tinfo;
69264 +#endif
69265 /* filesystem information */
69266 struct fs_struct *fs;
69267 /* open file information */
69268 @@ -1436,6 +1449,15 @@ struct task_struct {
69269 int hardirq_context;
69270 int softirq_context;
69271 #endif
69272 +
69273 +/* process credentials */
69274 + const struct cred *real_cred; /* objective and real subjective task
69275 + * credentials (COW) */
69276 + struct mutex cred_guard_mutex; /* guard against foreign influences on
69277 + * credential calculations
69278 + * (notably. ptrace) */
69279 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
69280 +
69281 #ifdef CONFIG_LOCKDEP
69282 # define MAX_LOCK_DEPTH 48UL
69283 u64 curr_chain_key;
69284 @@ -1456,6 +1478,9 @@ struct task_struct {
69285
69286 struct backing_dev_info *backing_dev_info;
69287
69288 + const struct cred *cred; /* effective (overridable) subjective task
69289 + * credentials (COW) */
69290 +
69291 struct io_context *io_context;
69292
69293 unsigned long ptrace_message;
69294 @@ -1519,6 +1544,27 @@ struct task_struct {
69295 unsigned long default_timer_slack_ns;
69296
69297 struct list_head *scm_work_list;
69298 +
69299 +#ifdef CONFIG_GRKERNSEC
69300 + /* grsecurity */
69301 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
69302 + u64 exec_id;
69303 +#endif
69304 +#ifdef CONFIG_GRKERNSEC_SETXID
69305 + const struct cred *delayed_cred;
69306 +#endif
69307 + struct dentry *gr_chroot_dentry;
69308 + struct acl_subject_label *acl;
69309 + struct acl_role_label *role;
69310 + struct file *exec_file;
69311 + u16 acl_role_id;
69312 + /* is this the task that authenticated to the special role */
69313 + u8 acl_sp_role;
69314 + u8 is_writable;
69315 + u8 brute;
69316 + u8 gr_is_chrooted;
69317 +#endif
69318 +
69319 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
69320 /* Index of current stored adress in ret_stack */
69321 int curr_ret_stack;
69322 @@ -1542,6 +1588,57 @@ struct task_struct {
69323 #endif /* CONFIG_TRACING */
69324 };
69325
69326 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
69327 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
69328 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
69329 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
69330 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
69331 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
69332 +
69333 +#ifdef CONFIG_PAX_SOFTMODE
69334 +extern int pax_softmode;
69335 +#endif
69336 +
69337 +extern int pax_check_flags(unsigned long *);
69338 +
69339 +/* if tsk != current then task_lock must be held on it */
69340 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
69341 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
69342 +{
69343 + if (likely(tsk->mm))
69344 + return tsk->mm->pax_flags;
69345 + else
69346 + return 0UL;
69347 +}
69348 +
69349 +/* if tsk != current then task_lock must be held on it */
69350 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
69351 +{
69352 + if (likely(tsk->mm)) {
69353 + tsk->mm->pax_flags = flags;
69354 + return 0;
69355 + }
69356 + return -EINVAL;
69357 +}
69358 +#endif
69359 +
69360 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
69361 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
69362 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
69363 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
69364 +#endif
69365 +
69366 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
69367 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
69368 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
69369 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
69370 +
69371 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
69372 +extern void pax_track_stack(void);
69373 +#else
69374 +static inline void pax_track_stack(void) {}
69375 +#endif
69376 +
69377 /* Future-safe accessor for struct task_struct's cpus_allowed. */
69378 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
69379
69380 @@ -1740,7 +1837,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
69381 #define PF_DUMPCORE 0x00000200 /* dumped core */
69382 #define PF_SIGNALED 0x00000400 /* killed by a signal */
69383 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
69384 -#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
69385 +#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
69386 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
69387 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
69388 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
69389 @@ -1978,7 +2075,9 @@ void yield(void);
69390 extern struct exec_domain default_exec_domain;
69391
69392 union thread_union {
69393 +#ifndef CONFIG_X86
69394 struct thread_info thread_info;
69395 +#endif
69396 unsigned long stack[THREAD_SIZE/sizeof(long)];
69397 };
69398
69399 @@ -2011,6 +2110,7 @@ extern struct pid_namespace init_pid_ns;
69400 */
69401
69402 extern struct task_struct *find_task_by_vpid(pid_t nr);
69403 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
69404 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
69405 struct pid_namespace *ns);
69406
69407 @@ -2155,7 +2255,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
69408 extern void exit_itimers(struct signal_struct *);
69409 extern void flush_itimer_signals(void);
69410
69411 -extern NORET_TYPE void do_group_exit(int);
69412 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
69413
69414 extern void daemonize(const char *, ...);
69415 extern int allow_signal(int);
69416 @@ -2284,13 +2384,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
69417
69418 #endif
69419
69420 -static inline int object_is_on_stack(void *obj)
69421 +static inline int object_starts_on_stack(void *obj)
69422 {
69423 - void *stack = task_stack_page(current);
69424 + const void *stack = task_stack_page(current);
69425
69426 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
69427 }
69428
69429 +#ifdef CONFIG_PAX_USERCOPY
69430 +extern int object_is_on_stack(const void *obj, unsigned long len);
69431 +#endif
69432 +
69433 extern void thread_info_cache_init(void);
69434
69435 #ifdef CONFIG_DEBUG_STACK_USAGE
69436 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
69437 index 1ee2c05..81b7ec4 100644
69438 --- a/include/linux/screen_info.h
69439 +++ b/include/linux/screen_info.h
69440 @@ -42,7 +42,8 @@ struct screen_info {
69441 __u16 pages; /* 0x32 */
69442 __u16 vesa_attributes; /* 0x34 */
69443 __u32 capabilities; /* 0x36 */
69444 - __u8 _reserved[6]; /* 0x3a */
69445 + __u16 vesapm_size; /* 0x3a */
69446 + __u8 _reserved[4]; /* 0x3c */
69447 } __attribute__((packed));
69448
69449 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
69450 diff --git a/include/linux/security.h b/include/linux/security.h
69451 index d40d23f..d739b08 100644
69452 --- a/include/linux/security.h
69453 +++ b/include/linux/security.h
69454 @@ -34,6 +34,7 @@
69455 #include <linux/key.h>
69456 #include <linux/xfrm.h>
69457 #include <linux/gfp.h>
69458 +#include <linux/grsecurity.h>
69459 #include <net/flow.h>
69460
69461 /* Maximum number of letters for an LSM name string */
69462 @@ -76,7 +77,7 @@ extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
69463 extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
69464 extern int cap_task_setioprio(struct task_struct *p, int ioprio);
69465 extern int cap_task_setnice(struct task_struct *p, int nice);
69466 -extern int cap_syslog(int type);
69467 +extern int cap_syslog(int type, bool from_file);
69468 extern int cap_vm_enough_memory(struct mm_struct *mm, long pages);
69469
69470 struct msghdr;
69471 @@ -1331,6 +1332,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
69472 * logging to the console.
69473 * See the syslog(2) manual page for an explanation of the @type values.
69474 * @type contains the type of action.
69475 + * @from_file indicates the context of action (if it came from /proc).
69476 * Return 0 if permission is granted.
69477 * @settime:
69478 * Check permission to change the system time.
69479 @@ -1445,7 +1447,7 @@ struct security_operations {
69480 int (*sysctl) (struct ctl_table *table, int op);
69481 int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
69482 int (*quota_on) (struct dentry *dentry);
69483 - int (*syslog) (int type);
69484 + int (*syslog) (int type, bool from_file);
69485 int (*settime) (struct timespec *ts, struct timezone *tz);
69486 int (*vm_enough_memory) (struct mm_struct *mm, long pages);
69487
69488 @@ -1740,7 +1742,7 @@ int security_acct(struct file *file);
69489 int security_sysctl(struct ctl_table *table, int op);
69490 int security_quotactl(int cmds, int type, int id, struct super_block *sb);
69491 int security_quota_on(struct dentry *dentry);
69492 -int security_syslog(int type);
69493 +int security_syslog(int type, bool from_file);
69494 int security_settime(struct timespec *ts, struct timezone *tz);
69495 int security_vm_enough_memory(long pages);
69496 int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
69497 @@ -1986,9 +1988,9 @@ static inline int security_quota_on(struct dentry *dentry)
69498 return 0;
69499 }
69500
69501 -static inline int security_syslog(int type)
69502 +static inline int security_syslog(int type, bool from_file)
69503 {
69504 - return cap_syslog(type);
69505 + return cap_syslog(type, from_file);
69506 }
69507
69508 static inline int security_settime(struct timespec *ts, struct timezone *tz)
69509 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
69510 index 8366d8f..cc5f9d6 100644
69511 --- a/include/linux/seq_file.h
69512 +++ b/include/linux/seq_file.h
69513 @@ -23,6 +23,9 @@ struct seq_file {
69514 u64 version;
69515 struct mutex lock;
69516 const struct seq_operations *op;
69517 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
69518 + u64 exec_id;
69519 +#endif
69520 void *private;
69521 };
69522
69523 @@ -32,6 +35,7 @@ struct seq_operations {
69524 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
69525 int (*show) (struct seq_file *m, void *v);
69526 };
69527 +typedef struct seq_operations __no_const seq_operations_no_const;
69528
69529 #define SEQ_SKIP 1
69530
69531 diff --git a/include/linux/shm.h b/include/linux/shm.h
69532 index eca6235..c7417ed 100644
69533 --- a/include/linux/shm.h
69534 +++ b/include/linux/shm.h
69535 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the kernel */
69536 pid_t shm_cprid;
69537 pid_t shm_lprid;
69538 struct user_struct *mlock_user;
69539 +#ifdef CONFIG_GRKERNSEC
69540 + time_t shm_createtime;
69541 + pid_t shm_lapid;
69542 +#endif
69543 };
69544
69545 /* shm_mode upper byte flags */
69546 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
69547 index bcdd660..6e12e11 100644
69548 --- a/include/linux/skbuff.h
69549 +++ b/include/linux/skbuff.h
69550 @@ -14,6 +14,7 @@
69551 #ifndef _LINUX_SKBUFF_H
69552 #define _LINUX_SKBUFF_H
69553
69554 +#include <linux/const.h>
69555 #include <linux/kernel.h>
69556 #include <linux/kmemcheck.h>
69557 #include <linux/compiler.h>
69558 @@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_tx(struct sk_buff *skb)
69559 */
69560 static inline int skb_queue_empty(const struct sk_buff_head *list)
69561 {
69562 - return list->next == (struct sk_buff *)list;
69563 + return list->next == (const struct sk_buff *)list;
69564 }
69565
69566 /**
69567 @@ -557,7 +558,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
69568 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
69569 const struct sk_buff *skb)
69570 {
69571 - return (skb->next == (struct sk_buff *) list);
69572 + return (skb->next == (const struct sk_buff *) list);
69573 }
69574
69575 /**
69576 @@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
69577 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
69578 const struct sk_buff *skb)
69579 {
69580 - return (skb->prev == (struct sk_buff *) list);
69581 + return (skb->prev == (const struct sk_buff *) list);
69582 }
69583
69584 /**
69585 @@ -1367,7 +1368,7 @@ static inline int skb_network_offset(const struct sk_buff *skb)
69586 * headroom, you should not reduce this.
69587 */
69588 #ifndef NET_SKB_PAD
69589 -#define NET_SKB_PAD 32
69590 +#define NET_SKB_PAD (_AC(32,UL))
69591 #endif
69592
69593 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
69594 diff --git a/include/linux/slab.h b/include/linux/slab.h
69595 index 2da8372..a3be824 100644
69596 --- a/include/linux/slab.h
69597 +++ b/include/linux/slab.h
69598 @@ -11,12 +11,20 @@
69599
69600 #include <linux/gfp.h>
69601 #include <linux/types.h>
69602 +#include <linux/err.h>
69603
69604 /*
69605 * Flags to pass to kmem_cache_create().
69606 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
69607 */
69608 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
69609 +
69610 +#ifdef CONFIG_PAX_USERCOPY
69611 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
69612 +#else
69613 +#define SLAB_USERCOPY 0x00000000UL
69614 +#endif
69615 +
69616 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
69617 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
69618 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
69619 @@ -82,10 +90,13 @@
69620 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
69621 * Both make kfree a no-op.
69622 */
69623 -#define ZERO_SIZE_PTR ((void *)16)
69624 +#define ZERO_SIZE_PTR \
69625 +({ \
69626 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
69627 + (void *)(-MAX_ERRNO-1L); \
69628 +})
69629
69630 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
69631 - (unsigned long)ZERO_SIZE_PTR)
69632 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
69633
69634 /*
69635 * struct kmem_cache related prototypes
69636 @@ -138,6 +149,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
69637 void kfree(const void *);
69638 void kzfree(const void *);
69639 size_t ksize(const void *);
69640 +void check_object_size(const void *ptr, unsigned long n, bool to);
69641
69642 /*
69643 * Allocator specific definitions. These are mainly used to establish optimized
69644 @@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
69645
69646 void __init kmem_cache_init_late(void);
69647
69648 +#define kmalloc(x, y) \
69649 +({ \
69650 + void *___retval; \
69651 + intoverflow_t ___x = (intoverflow_t)x; \
69652 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
69653 + ___retval = NULL; \
69654 + else \
69655 + ___retval = kmalloc((size_t)___x, (y)); \
69656 + ___retval; \
69657 +})
69658 +
69659 +#define kmalloc_node(x, y, z) \
69660 +({ \
69661 + void *___retval; \
69662 + intoverflow_t ___x = (intoverflow_t)x; \
69663 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
69664 + ___retval = NULL; \
69665 + else \
69666 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
69667 + ___retval; \
69668 +})
69669 +
69670 +#define kzalloc(x, y) \
69671 +({ \
69672 + void *___retval; \
69673 + intoverflow_t ___x = (intoverflow_t)x; \
69674 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
69675 + ___retval = NULL; \
69676 + else \
69677 + ___retval = kzalloc((size_t)___x, (y)); \
69678 + ___retval; \
69679 +})
69680 +
69681 #endif /* _LINUX_SLAB_H */
69682 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
69683 index 850d057..d9dfe3c 100644
69684 --- a/include/linux/slab_def.h
69685 +++ b/include/linux/slab_def.h
69686 @@ -69,10 +69,10 @@ struct kmem_cache {
69687 unsigned long node_allocs;
69688 unsigned long node_frees;
69689 unsigned long node_overflow;
69690 - atomic_t allochit;
69691 - atomic_t allocmiss;
69692 - atomic_t freehit;
69693 - atomic_t freemiss;
69694 + atomic_unchecked_t allochit;
69695 + atomic_unchecked_t allocmiss;
69696 + atomic_unchecked_t freehit;
69697 + atomic_unchecked_t freemiss;
69698
69699 /*
69700 * If debugging is enabled, then the allocator can add additional
69701 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
69702 index 5ad70a6..57f9f65 100644
69703 --- a/include/linux/slub_def.h
69704 +++ b/include/linux/slub_def.h
69705 @@ -86,7 +86,7 @@ struct kmem_cache {
69706 struct kmem_cache_order_objects max;
69707 struct kmem_cache_order_objects min;
69708 gfp_t allocflags; /* gfp flags to use on each alloc */
69709 - int refcount; /* Refcount for slab cache destroy */
69710 + atomic_t refcount; /* Refcount for slab cache destroy */
69711 void (*ctor)(void *);
69712 int inuse; /* Offset to metadata */
69713 int align; /* Alignment */
69714 @@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
69715 #endif
69716
69717 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
69718 -void *__kmalloc(size_t size, gfp_t flags);
69719 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
69720
69721 #ifdef CONFIG_KMEMTRACE
69722 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
69723 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
69724 index 67ad11f..0bbd8af 100644
69725 --- a/include/linux/sonet.h
69726 +++ b/include/linux/sonet.h
69727 @@ -61,7 +61,7 @@ struct sonet_stats {
69728 #include <asm/atomic.h>
69729
69730 struct k_sonet_stats {
69731 -#define __HANDLE_ITEM(i) atomic_t i
69732 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
69733 __SONET_ITEMS
69734 #undef __HANDLE_ITEM
69735 };
69736 diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
69737 index 6f52b4d..5500323 100644
69738 --- a/include/linux/sunrpc/cache.h
69739 +++ b/include/linux/sunrpc/cache.h
69740 @@ -125,7 +125,7 @@ struct cache_detail {
69741 */
69742 struct cache_req {
69743 struct cache_deferred_req *(*defer)(struct cache_req *req);
69744 -};
69745 +} __no_const;
69746 /* this must be embedded in a deferred_request that is being
69747 * delayed awaiting cache-fill
69748 */
69749 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
69750 index 8ed9642..101ceab 100644
69751 --- a/include/linux/sunrpc/clnt.h
69752 +++ b/include/linux/sunrpc/clnt.h
69753 @@ -167,9 +167,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
69754 {
69755 switch (sap->sa_family) {
69756 case AF_INET:
69757 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
69758 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
69759 case AF_INET6:
69760 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
69761 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
69762 }
69763 return 0;
69764 }
69765 @@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
69766 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
69767 const struct sockaddr *src)
69768 {
69769 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
69770 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
69771 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
69772
69773 dsin->sin_family = ssin->sin_family;
69774 @@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
69775 if (sa->sa_family != AF_INET6)
69776 return 0;
69777
69778 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
69779 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
69780 }
69781
69782 #endif /* __KERNEL__ */
69783 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
69784 index c14fe86..393245e 100644
69785 --- a/include/linux/sunrpc/svc_rdma.h
69786 +++ b/include/linux/sunrpc/svc_rdma.h
69787 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
69788 extern unsigned int svcrdma_max_requests;
69789 extern unsigned int svcrdma_max_req_size;
69790
69791 -extern atomic_t rdma_stat_recv;
69792 -extern atomic_t rdma_stat_read;
69793 -extern atomic_t rdma_stat_write;
69794 -extern atomic_t rdma_stat_sq_starve;
69795 -extern atomic_t rdma_stat_rq_starve;
69796 -extern atomic_t rdma_stat_rq_poll;
69797 -extern atomic_t rdma_stat_rq_prod;
69798 -extern atomic_t rdma_stat_sq_poll;
69799 -extern atomic_t rdma_stat_sq_prod;
69800 +extern atomic_unchecked_t rdma_stat_recv;
69801 +extern atomic_unchecked_t rdma_stat_read;
69802 +extern atomic_unchecked_t rdma_stat_write;
69803 +extern atomic_unchecked_t rdma_stat_sq_starve;
69804 +extern atomic_unchecked_t rdma_stat_rq_starve;
69805 +extern atomic_unchecked_t rdma_stat_rq_poll;
69806 +extern atomic_unchecked_t rdma_stat_rq_prod;
69807 +extern atomic_unchecked_t rdma_stat_sq_poll;
69808 +extern atomic_unchecked_t rdma_stat_sq_prod;
69809
69810 #define RPCRDMA_VERSION 1
69811
69812 diff --git a/include/linux/suspend.h b/include/linux/suspend.h
69813 index 5e781d8..1e62818 100644
69814 --- a/include/linux/suspend.h
69815 +++ b/include/linux/suspend.h
69816 @@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
69817 * which require special recovery actions in that situation.
69818 */
69819 struct platform_suspend_ops {
69820 - int (*valid)(suspend_state_t state);
69821 - int (*begin)(suspend_state_t state);
69822 - int (*prepare)(void);
69823 - int (*prepare_late)(void);
69824 - int (*enter)(suspend_state_t state);
69825 - void (*wake)(void);
69826 - void (*finish)(void);
69827 - void (*end)(void);
69828 - void (*recover)(void);
69829 + int (* const valid)(suspend_state_t state);
69830 + int (* const begin)(suspend_state_t state);
69831 + int (* const prepare)(void);
69832 + int (* const prepare_late)(void);
69833 + int (* const enter)(suspend_state_t state);
69834 + void (* const wake)(void);
69835 + void (* const finish)(void);
69836 + void (* const end)(void);
69837 + void (* const recover)(void);
69838 };
69839
69840 #ifdef CONFIG_SUSPEND
69841 @@ -120,7 +120,7 @@ struct platform_suspend_ops {
69842 * suspend_set_ops - set platform dependent suspend operations
69843 * @ops: The new suspend operations to set.
69844 */
69845 -extern void suspend_set_ops(struct platform_suspend_ops *ops);
69846 +extern void suspend_set_ops(const struct platform_suspend_ops *ops);
69847 extern int suspend_valid_only_mem(suspend_state_t state);
69848
69849 /**
69850 @@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t state);
69851 #else /* !CONFIG_SUSPEND */
69852 #define suspend_valid_only_mem NULL
69853
69854 -static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
69855 +static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
69856 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
69857 #endif /* !CONFIG_SUSPEND */
69858
69859 @@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone *zone);
69860 * platforms which require special recovery actions in that situation.
69861 */
69862 struct platform_hibernation_ops {
69863 - int (*begin)(void);
69864 - void (*end)(void);
69865 - int (*pre_snapshot)(void);
69866 - void (*finish)(void);
69867 - int (*prepare)(void);
69868 - int (*enter)(void);
69869 - void (*leave)(void);
69870 - int (*pre_restore)(void);
69871 - void (*restore_cleanup)(void);
69872 - void (*recover)(void);
69873 + int (* const begin)(void);
69874 + void (* const end)(void);
69875 + int (* const pre_snapshot)(void);
69876 + void (* const finish)(void);
69877 + int (* const prepare)(void);
69878 + int (* const enter)(void);
69879 + void (* const leave)(void);
69880 + int (* const pre_restore)(void);
69881 + void (* const restore_cleanup)(void);
69882 + void (* const recover)(void);
69883 };
69884
69885 #ifdef CONFIG_HIBERNATION
69886 @@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct page *);
69887 extern void swsusp_unset_page_free(struct page *);
69888 extern unsigned long get_safe_page(gfp_t gfp_mask);
69889
69890 -extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
69891 +extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
69892 extern int hibernate(void);
69893 extern bool system_entering_hibernation(void);
69894 #else /* CONFIG_HIBERNATION */
69895 @@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
69896 static inline void swsusp_set_page_free(struct page *p) {}
69897 static inline void swsusp_unset_page_free(struct page *p) {}
69898
69899 -static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
69900 +static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
69901 static inline int hibernate(void) { return -ENOSYS; }
69902 static inline bool system_entering_hibernation(void) { return false; }
69903 #endif /* CONFIG_HIBERNATION */
69904 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
69905 index 0eb6942..a805cb6 100644
69906 --- a/include/linux/sysctl.h
69907 +++ b/include/linux/sysctl.h
69908 @@ -164,7 +164,11 @@ enum
69909 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
69910 };
69911
69912 -
69913 +#ifdef CONFIG_PAX_SOFTMODE
69914 +enum {
69915 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
69916 +};
69917 +#endif
69918
69919 /* CTL_VM names: */
69920 enum
69921 @@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
69922
69923 extern int proc_dostring(struct ctl_table *, int,
69924 void __user *, size_t *, loff_t *);
69925 +extern int proc_dostring_modpriv(struct ctl_table *, int,
69926 + void __user *, size_t *, loff_t *);
69927 extern int proc_dointvec(struct ctl_table *, int,
69928 void __user *, size_t *, loff_t *);
69929 extern int proc_dointvec_minmax(struct ctl_table *, int,
69930 @@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name, int nlen,
69931
69932 extern ctl_handler sysctl_data;
69933 extern ctl_handler sysctl_string;
69934 +extern ctl_handler sysctl_string_modpriv;
69935 extern ctl_handler sysctl_intvec;
69936 extern ctl_handler sysctl_jiffies;
69937 extern ctl_handler sysctl_ms_jiffies;
69938 diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
69939 index 9d68fed..71f02cc 100644
69940 --- a/include/linux/sysfs.h
69941 +++ b/include/linux/sysfs.h
69942 @@ -75,8 +75,8 @@ struct bin_attribute {
69943 };
69944
69945 struct sysfs_ops {
69946 - ssize_t (*show)(struct kobject *, struct attribute *,char *);
69947 - ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
69948 + ssize_t (* const show)(struct kobject *, struct attribute *,char *);
69949 + ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
69950 };
69951
69952 struct sysfs_dirent;
69953 diff --git a/include/linux/syslog.h b/include/linux/syslog.h
69954 new file mode 100644
69955 index 0000000..3891139
69956 --- /dev/null
69957 +++ b/include/linux/syslog.h
69958 @@ -0,0 +1,52 @@
69959 +/* Syslog internals
69960 + *
69961 + * Copyright 2010 Canonical, Ltd.
69962 + * Author: Kees Cook <kees.cook@canonical.com>
69963 + *
69964 + * This program is free software; you can redistribute it and/or modify
69965 + * it under the terms of the GNU General Public License as published by
69966 + * the Free Software Foundation; either version 2, or (at your option)
69967 + * any later version.
69968 + *
69969 + * This program is distributed in the hope that it will be useful,
69970 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
69971 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
69972 + * GNU General Public License for more details.
69973 + *
69974 + * You should have received a copy of the GNU General Public License
69975 + * along with this program; see the file COPYING. If not, write to
69976 + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
69977 + */
69978 +
69979 +#ifndef _LINUX_SYSLOG_H
69980 +#define _LINUX_SYSLOG_H
69981 +
69982 +/* Close the log. Currently a NOP. */
69983 +#define SYSLOG_ACTION_CLOSE 0
69984 +/* Open the log. Currently a NOP. */
69985 +#define SYSLOG_ACTION_OPEN 1
69986 +/* Read from the log. */
69987 +#define SYSLOG_ACTION_READ 2
69988 +/* Read all messages remaining in the ring buffer. */
69989 +#define SYSLOG_ACTION_READ_ALL 3
69990 +/* Read and clear all messages remaining in the ring buffer */
69991 +#define SYSLOG_ACTION_READ_CLEAR 4
69992 +/* Clear ring buffer. */
69993 +#define SYSLOG_ACTION_CLEAR 5
69994 +/* Disable printk's to console */
69995 +#define SYSLOG_ACTION_CONSOLE_OFF 6
69996 +/* Enable printk's to console */
69997 +#define SYSLOG_ACTION_CONSOLE_ON 7
69998 +/* Set level of messages printed to console */
69999 +#define SYSLOG_ACTION_CONSOLE_LEVEL 8
70000 +/* Return number of unread characters in the log buffer */
70001 +#define SYSLOG_ACTION_SIZE_UNREAD 9
70002 +/* Return size of the log buffer */
70003 +#define SYSLOG_ACTION_SIZE_BUFFER 10
70004 +
70005 +#define SYSLOG_FROM_CALL 0
70006 +#define SYSLOG_FROM_FILE 1
70007 +
70008 +int do_syslog(int type, char __user *buf, int count, bool from_file);
70009 +
70010 +#endif /* _LINUX_SYSLOG_H */
70011 diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
70012 index a8cc4e1..98d3b85 100644
70013 --- a/include/linux/thread_info.h
70014 +++ b/include/linux/thread_info.h
70015 @@ -23,7 +23,7 @@ struct restart_block {
70016 };
70017 /* For futex_wait and futex_wait_requeue_pi */
70018 struct {
70019 - u32 *uaddr;
70020 + u32 __user *uaddr;
70021 u32 val;
70022 u32 flags;
70023 u32 bitset;
70024 diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
70025 index 1eb44a9..f582df3 100644
70026 --- a/include/linux/tracehook.h
70027 +++ b/include/linux/tracehook.h
70028 @@ -69,12 +69,12 @@ static inline int tracehook_expect_breakpoints(struct task_struct *task)
70029 /*
70030 * ptrace report for syscall entry and exit looks identical.
70031 */
70032 -static inline void ptrace_report_syscall(struct pt_regs *regs)
70033 +static inline int ptrace_report_syscall(struct pt_regs *regs)
70034 {
70035 int ptrace = task_ptrace(current);
70036
70037 if (!(ptrace & PT_PTRACED))
70038 - return;
70039 + return 0;
70040
70041 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
70042
70043 @@ -87,6 +87,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
70044 send_sig(current->exit_code, current, 1);
70045 current->exit_code = 0;
70046 }
70047 +
70048 + return fatal_signal_pending(current);
70049 }
70050
70051 /**
70052 @@ -111,8 +113,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
70053 static inline __must_check int tracehook_report_syscall_entry(
70054 struct pt_regs *regs)
70055 {
70056 - ptrace_report_syscall(regs);
70057 - return 0;
70058 + return ptrace_report_syscall(regs);
70059 }
70060
70061 /**
70062 diff --git a/include/linux/tty.h b/include/linux/tty.h
70063 index e9c57e9..ee6d489 100644
70064 --- a/include/linux/tty.h
70065 +++ b/include/linux/tty.h
70066 @@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
70067 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
70068 extern void tty_ldisc_enable(struct tty_struct *tty);
70069
70070 -
70071 /* n_tty.c */
70072 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
70073
70074 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
70075 index 0c4ee9b..9f7c426 100644
70076 --- a/include/linux/tty_ldisc.h
70077 +++ b/include/linux/tty_ldisc.h
70078 @@ -139,7 +139,7 @@ struct tty_ldisc_ops {
70079
70080 struct module *owner;
70081
70082 - int refcount;
70083 + atomic_t refcount;
70084 };
70085
70086 struct tty_ldisc {
70087 diff --git a/include/linux/types.h b/include/linux/types.h
70088 index c42724f..d190eee 100644
70089 --- a/include/linux/types.h
70090 +++ b/include/linux/types.h
70091 @@ -191,10 +191,26 @@ typedef struct {
70092 volatile int counter;
70093 } atomic_t;
70094
70095 +#ifdef CONFIG_PAX_REFCOUNT
70096 +typedef struct {
70097 + volatile int counter;
70098 +} atomic_unchecked_t;
70099 +#else
70100 +typedef atomic_t atomic_unchecked_t;
70101 +#endif
70102 +
70103 #ifdef CONFIG_64BIT
70104 typedef struct {
70105 volatile long counter;
70106 } atomic64_t;
70107 +
70108 +#ifdef CONFIG_PAX_REFCOUNT
70109 +typedef struct {
70110 + volatile long counter;
70111 +} atomic64_unchecked_t;
70112 +#else
70113 +typedef atomic64_t atomic64_unchecked_t;
70114 +#endif
70115 #endif
70116
70117 struct ustat {
70118 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
70119 index 6b58367..53a3e8e 100644
70120 --- a/include/linux/uaccess.h
70121 +++ b/include/linux/uaccess.h
70122 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
70123 long ret; \
70124 mm_segment_t old_fs = get_fs(); \
70125 \
70126 - set_fs(KERNEL_DS); \
70127 pagefault_disable(); \
70128 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
70129 - pagefault_enable(); \
70130 + set_fs(KERNEL_DS); \
70131 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
70132 set_fs(old_fs); \
70133 + pagefault_enable(); \
70134 ret; \
70135 })
70136
70137 @@ -93,7 +93,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
70138 * Safely read from address @src to the buffer at @dst. If a kernel fault
70139 * happens, handle that and return -EFAULT.
70140 */
70141 -extern long probe_kernel_read(void *dst, void *src, size_t size);
70142 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
70143
70144 /*
70145 * probe_kernel_write(): safely attempt to write to a location
70146 @@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst, void *src, size_t size);
70147 * Safely write to address @dst from the buffer at @src. If a kernel fault
70148 * happens, handle that and return -EFAULT.
70149 */
70150 -extern long probe_kernel_write(void *dst, void *src, size_t size);
70151 +extern long probe_kernel_write(void *dst, const void *src, size_t size);
70152
70153 #endif /* __LINUX_UACCESS_H__ */
70154 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
70155 index 99c1b4d..bb94261 100644
70156 --- a/include/linux/unaligned/access_ok.h
70157 +++ b/include/linux/unaligned/access_ok.h
70158 @@ -6,32 +6,32 @@
70159
70160 static inline u16 get_unaligned_le16(const void *p)
70161 {
70162 - return le16_to_cpup((__le16 *)p);
70163 + return le16_to_cpup((const __le16 *)p);
70164 }
70165
70166 static inline u32 get_unaligned_le32(const void *p)
70167 {
70168 - return le32_to_cpup((__le32 *)p);
70169 + return le32_to_cpup((const __le32 *)p);
70170 }
70171
70172 static inline u64 get_unaligned_le64(const void *p)
70173 {
70174 - return le64_to_cpup((__le64 *)p);
70175 + return le64_to_cpup((const __le64 *)p);
70176 }
70177
70178 static inline u16 get_unaligned_be16(const void *p)
70179 {
70180 - return be16_to_cpup((__be16 *)p);
70181 + return be16_to_cpup((const __be16 *)p);
70182 }
70183
70184 static inline u32 get_unaligned_be32(const void *p)
70185 {
70186 - return be32_to_cpup((__be32 *)p);
70187 + return be32_to_cpup((const __be32 *)p);
70188 }
70189
70190 static inline u64 get_unaligned_be64(const void *p)
70191 {
70192 - return be64_to_cpup((__be64 *)p);
70193 + return be64_to_cpup((const __be64 *)p);
70194 }
70195
70196 static inline void put_unaligned_le16(u16 val, void *p)
70197 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
70198 index 79b9837..b5a56f9 100644
70199 --- a/include/linux/vermagic.h
70200 +++ b/include/linux/vermagic.h
70201 @@ -26,9 +26,35 @@
70202 #define MODULE_ARCH_VERMAGIC ""
70203 #endif
70204
70205 +#ifdef CONFIG_PAX_REFCOUNT
70206 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
70207 +#else
70208 +#define MODULE_PAX_REFCOUNT ""
70209 +#endif
70210 +
70211 +#ifdef CONSTIFY_PLUGIN
70212 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
70213 +#else
70214 +#define MODULE_CONSTIFY_PLUGIN ""
70215 +#endif
70216 +
70217 +#ifdef STACKLEAK_PLUGIN
70218 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
70219 +#else
70220 +#define MODULE_STACKLEAK_PLUGIN ""
70221 +#endif
70222 +
70223 +#ifdef CONFIG_GRKERNSEC
70224 +#define MODULE_GRSEC "GRSEC "
70225 +#else
70226 +#define MODULE_GRSEC ""
70227 +#endif
70228 +
70229 #define VERMAGIC_STRING \
70230 UTS_RELEASE " " \
70231 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
70232 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
70233 - MODULE_ARCH_VERMAGIC
70234 + MODULE_ARCH_VERMAGIC \
70235 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
70236 + MODULE_GRSEC
70237
70238 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
70239 index 819a634..462ac12 100644
70240 --- a/include/linux/vmalloc.h
70241 +++ b/include/linux/vmalloc.h
70242 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
70243 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
70244 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
70245 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
70246 +
70247 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70248 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
70249 +#endif
70250 +
70251 /* bits [20..32] reserved for arch specific ioremap internals */
70252
70253 /*
70254 @@ -124,4 +129,81 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
70255
70256 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
70257
70258 +#define vmalloc(x) \
70259 +({ \
70260 + void *___retval; \
70261 + intoverflow_t ___x = (intoverflow_t)x; \
70262 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
70263 + ___retval = NULL; \
70264 + else \
70265 + ___retval = vmalloc((unsigned long)___x); \
70266 + ___retval; \
70267 +})
70268 +
70269 +#define __vmalloc(x, y, z) \
70270 +({ \
70271 + void *___retval; \
70272 + intoverflow_t ___x = (intoverflow_t)x; \
70273 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
70274 + ___retval = NULL; \
70275 + else \
70276 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
70277 + ___retval; \
70278 +})
70279 +
70280 +#define vmalloc_user(x) \
70281 +({ \
70282 + void *___retval; \
70283 + intoverflow_t ___x = (intoverflow_t)x; \
70284 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
70285 + ___retval = NULL; \
70286 + else \
70287 + ___retval = vmalloc_user((unsigned long)___x); \
70288 + ___retval; \
70289 +})
70290 +
70291 +#define vmalloc_exec(x) \
70292 +({ \
70293 + void *___retval; \
70294 + intoverflow_t ___x = (intoverflow_t)x; \
70295 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
70296 + ___retval = NULL; \
70297 + else \
70298 + ___retval = vmalloc_exec((unsigned long)___x); \
70299 + ___retval; \
70300 +})
70301 +
70302 +#define vmalloc_node(x, y) \
70303 +({ \
70304 + void *___retval; \
70305 + intoverflow_t ___x = (intoverflow_t)x; \
70306 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
70307 + ___retval = NULL; \
70308 + else \
70309 + ___retval = vmalloc_node((unsigned long)___x, (y));\
70310 + ___retval; \
70311 +})
70312 +
70313 +#define vmalloc_32(x) \
70314 +({ \
70315 + void *___retval; \
70316 + intoverflow_t ___x = (intoverflow_t)x; \
70317 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
70318 + ___retval = NULL; \
70319 + else \
70320 + ___retval = vmalloc_32((unsigned long)___x); \
70321 + ___retval; \
70322 +})
70323 +
70324 +#define vmalloc_32_user(x) \
70325 +({ \
70326 + void *___retval; \
70327 + intoverflow_t ___x = (intoverflow_t)x; \
70328 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
70329 + ___retval = NULL; \
70330 + else \
70331 + ___retval = vmalloc_32_user((unsigned long)___x);\
70332 + ___retval; \
70333 +})
70334 +
70335 #endif /* _LINUX_VMALLOC_H */
70336 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
70337 index 13070d6..aa4159a 100644
70338 --- a/include/linux/vmstat.h
70339 +++ b/include/linux/vmstat.h
70340 @@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(int cpu)
70341 /*
70342 * Zone based page accounting with per cpu differentials.
70343 */
70344 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70345 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70346
70347 static inline void zone_page_state_add(long x, struct zone *zone,
70348 enum zone_stat_item item)
70349 {
70350 - atomic_long_add(x, &zone->vm_stat[item]);
70351 - atomic_long_add(x, &vm_stat[item]);
70352 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
70353 + atomic_long_add_unchecked(x, &vm_stat[item]);
70354 }
70355
70356 static inline unsigned long global_page_state(enum zone_stat_item item)
70357 {
70358 - long x = atomic_long_read(&vm_stat[item]);
70359 + long x = atomic_long_read_unchecked(&vm_stat[item]);
70360 #ifdef CONFIG_SMP
70361 if (x < 0)
70362 x = 0;
70363 @@ -158,7 +158,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
70364 static inline unsigned long zone_page_state(struct zone *zone,
70365 enum zone_stat_item item)
70366 {
70367 - long x = atomic_long_read(&zone->vm_stat[item]);
70368 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
70369 #ifdef CONFIG_SMP
70370 if (x < 0)
70371 x = 0;
70372 @@ -175,7 +175,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
70373 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
70374 enum zone_stat_item item)
70375 {
70376 - long x = atomic_long_read(&zone->vm_stat[item]);
70377 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
70378
70379 #ifdef CONFIG_SMP
70380 int cpu;
70381 @@ -264,8 +264,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
70382
70383 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
70384 {
70385 - atomic_long_inc(&zone->vm_stat[item]);
70386 - atomic_long_inc(&vm_stat[item]);
70387 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
70388 + atomic_long_inc_unchecked(&vm_stat[item]);
70389 }
70390
70391 static inline void __inc_zone_page_state(struct page *page,
70392 @@ -276,8 +276,8 @@ static inline void __inc_zone_page_state(struct page *page,
70393
70394 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
70395 {
70396 - atomic_long_dec(&zone->vm_stat[item]);
70397 - atomic_long_dec(&vm_stat[item]);
70398 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
70399 + atomic_long_dec_unchecked(&vm_stat[item]);
70400 }
70401
70402 static inline void __dec_zone_page_state(struct page *page,
70403 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
70404 index 5c84af8..1a3b6e2 100644
70405 --- a/include/linux/xattr.h
70406 +++ b/include/linux/xattr.h
70407 @@ -33,6 +33,11 @@
70408 #define XATTR_USER_PREFIX "user."
70409 #define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
70410
70411 +/* User namespace */
70412 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
70413 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
70414 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
70415 +
70416 struct inode;
70417 struct dentry;
70418
70419 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
70420 index eed5fcc..5080d24 100644
70421 --- a/include/media/saa7146_vv.h
70422 +++ b/include/media/saa7146_vv.h
70423 @@ -167,7 +167,7 @@ struct saa7146_ext_vv
70424 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
70425
70426 /* the extension can override this */
70427 - struct v4l2_ioctl_ops ops;
70428 + v4l2_ioctl_ops_no_const ops;
70429 /* pointer to the saa7146 core ops */
70430 const struct v4l2_ioctl_ops *core_ops;
70431
70432 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
70433 index 73c9867..2da8837 100644
70434 --- a/include/media/v4l2-dev.h
70435 +++ b/include/media/v4l2-dev.h
70436 @@ -34,7 +34,7 @@ struct v4l2_device;
70437 #define V4L2_FL_UNREGISTERED (0)
70438
70439 struct v4l2_file_operations {
70440 - struct module *owner;
70441 + struct module * const owner;
70442 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
70443 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
70444 unsigned int (*poll) (struct file *, struct poll_table_struct *);
70445 @@ -46,6 +46,7 @@ struct v4l2_file_operations {
70446 int (*open) (struct file *);
70447 int (*release) (struct file *);
70448 };
70449 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
70450
70451 /*
70452 * Newer version of video_device, handled by videodev2.c
70453 diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
70454 index 5d5d550..f559ef1 100644
70455 --- a/include/media/v4l2-device.h
70456 +++ b/include/media/v4l2-device.h
70457 @@ -71,7 +71,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
70458 this function returns 0. If the name ends with a digit (e.g. cx18),
70459 then the name will be set to cx18-0 since cx180 looks really odd. */
70460 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
70461 - atomic_t *instance);
70462 + atomic_unchecked_t *instance);
70463
70464 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
70465 Since the parent disappears this ensures that v4l2_dev doesn't have an
70466 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
70467 index 7a4529d..7244290 100644
70468 --- a/include/media/v4l2-ioctl.h
70469 +++ b/include/media/v4l2-ioctl.h
70470 @@ -243,6 +243,7 @@ struct v4l2_ioctl_ops {
70471 long (*vidioc_default) (struct file *file, void *fh,
70472 int cmd, void *arg);
70473 };
70474 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
70475
70476
70477 /* v4l debugging and diagnostics */
70478 diff --git a/include/net/flow.h b/include/net/flow.h
70479 index 809970b..c3df4f3 100644
70480 --- a/include/net/flow.h
70481 +++ b/include/net/flow.h
70482 @@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net *net, struct flowi *key, u16 family,
70483 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
70484 u8 dir, flow_resolve_t resolver);
70485 extern void flow_cache_flush(void);
70486 -extern atomic_t flow_cache_genid;
70487 +extern atomic_unchecked_t flow_cache_genid;
70488
70489 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
70490 {
70491 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
70492 index 15e1f8fe..668837c 100644
70493 --- a/include/net/inetpeer.h
70494 +++ b/include/net/inetpeer.h
70495 @@ -24,7 +24,7 @@ struct inet_peer
70496 __u32 dtime; /* the time of last use of not
70497 * referenced entries */
70498 atomic_t refcnt;
70499 - atomic_t rid; /* Frag reception counter */
70500 + atomic_unchecked_t rid; /* Frag reception counter */
70501 __u32 tcp_ts;
70502 unsigned long tcp_ts_stamp;
70503 };
70504 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
70505 index 98978e7..2243a3d 100644
70506 --- a/include/net/ip_vs.h
70507 +++ b/include/net/ip_vs.h
70508 @@ -365,7 +365,7 @@ struct ip_vs_conn {
70509 struct ip_vs_conn *control; /* Master control connection */
70510 atomic_t n_control; /* Number of controlled ones */
70511 struct ip_vs_dest *dest; /* real server */
70512 - atomic_t in_pkts; /* incoming packet counter */
70513 + atomic_unchecked_t in_pkts; /* incoming packet counter */
70514
70515 /* packet transmitter for different forwarding methods. If it
70516 mangles the packet, it must return NF_DROP or better NF_STOLEN,
70517 @@ -466,7 +466,7 @@ struct ip_vs_dest {
70518 union nf_inet_addr addr; /* IP address of the server */
70519 __be16 port; /* port number of the server */
70520 volatile unsigned flags; /* dest status flags */
70521 - atomic_t conn_flags; /* flags to copy to conn */
70522 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
70523 atomic_t weight; /* server weight */
70524
70525 atomic_t refcnt; /* reference counter */
70526 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
70527 index 69b610a..fe3962c 100644
70528 --- a/include/net/irda/ircomm_core.h
70529 +++ b/include/net/irda/ircomm_core.h
70530 @@ -51,7 +51,7 @@ typedef struct {
70531 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
70532 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
70533 struct ircomm_info *);
70534 -} call_t;
70535 +} __no_const call_t;
70536
70537 struct ircomm_cb {
70538 irda_queue_t queue;
70539 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
70540 index eea2e61..08c692d 100644
70541 --- a/include/net/irda/ircomm_tty.h
70542 +++ b/include/net/irda/ircomm_tty.h
70543 @@ -35,6 +35,7 @@
70544 #include <linux/termios.h>
70545 #include <linux/timer.h>
70546 #include <linux/tty.h> /* struct tty_struct */
70547 +#include <asm/local.h>
70548
70549 #include <net/irda/irias_object.h>
70550 #include <net/irda/ircomm_core.h>
70551 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
70552 unsigned short close_delay;
70553 unsigned short closing_wait; /* time to wait before closing */
70554
70555 - int open_count;
70556 - int blocked_open; /* # of blocked opens */
70557 + local_t open_count;
70558 + local_t blocked_open; /* # of blocked opens */
70559
70560 /* Protect concurent access to :
70561 * o self->open_count
70562 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
70563 index f82a1e8..82d81e8 100644
70564 --- a/include/net/iucv/af_iucv.h
70565 +++ b/include/net/iucv/af_iucv.h
70566 @@ -87,7 +87,7 @@ struct iucv_sock {
70567 struct iucv_sock_list {
70568 struct hlist_head head;
70569 rwlock_t lock;
70570 - atomic_t autobind_name;
70571 + atomic_unchecked_t autobind_name;
70572 };
70573
70574 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
70575 diff --git a/include/net/lapb.h b/include/net/lapb.h
70576 index 96cb5dd..25e8d4f 100644
70577 --- a/include/net/lapb.h
70578 +++ b/include/net/lapb.h
70579 @@ -95,7 +95,7 @@ struct lapb_cb {
70580 struct sk_buff_head write_queue;
70581 struct sk_buff_head ack_queue;
70582 unsigned char window;
70583 - struct lapb_register_struct callbacks;
70584 + struct lapb_register_struct *callbacks;
70585
70586 /* FRMR control information */
70587 struct lapb_frame frmr_data;
70588 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
70589 index 3817fda..cdb2343 100644
70590 --- a/include/net/neighbour.h
70591 +++ b/include/net/neighbour.h
70592 @@ -131,7 +131,7 @@ struct neigh_ops
70593 int (*connected_output)(struct sk_buff*);
70594 int (*hh_output)(struct sk_buff*);
70595 int (*queue_xmit)(struct sk_buff*);
70596 -};
70597 +} __do_const;
70598
70599 struct pneigh_entry
70600 {
70601 diff --git a/include/net/netlink.h b/include/net/netlink.h
70602 index c344646..4778c71 100644
70603 --- a/include/net/netlink.h
70604 +++ b/include/net/netlink.h
70605 @@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
70606 {
70607 return (remaining >= (int) sizeof(struct nlmsghdr) &&
70608 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
70609 - nlh->nlmsg_len <= remaining);
70610 + nlh->nlmsg_len <= (unsigned int)remaining);
70611 }
70612
70613 /**
70614 @@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
70615 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
70616 {
70617 if (mark)
70618 - skb_trim(skb, (unsigned char *) mark - skb->data);
70619 + skb_trim(skb, (const unsigned char *) mark - skb->data);
70620 }
70621
70622 /**
70623 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
70624 index 9a4b8b7..e49e077 100644
70625 --- a/include/net/netns/ipv4.h
70626 +++ b/include/net/netns/ipv4.h
70627 @@ -54,7 +54,7 @@ struct netns_ipv4 {
70628 int current_rt_cache_rebuild_count;
70629
70630 struct timer_list rt_secret_timer;
70631 - atomic_t rt_genid;
70632 + atomic_unchecked_t rt_genid;
70633
70634 #ifdef CONFIG_IP_MROUTE
70635 struct sock *mroute_sk;
70636 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
70637 index 8a6d529..171f401 100644
70638 --- a/include/net/sctp/sctp.h
70639 +++ b/include/net/sctp/sctp.h
70640 @@ -305,8 +305,8 @@ extern int sctp_debug_flag;
70641
70642 #else /* SCTP_DEBUG */
70643
70644 -#define SCTP_DEBUG_PRINTK(whatever...)
70645 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
70646 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
70647 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
70648 #define SCTP_ENABLE_DEBUG
70649 #define SCTP_DISABLE_DEBUG
70650 #define SCTP_ASSERT(expr, str, func)
70651 diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
70652 index d97f689..f3b90ab 100644
70653 --- a/include/net/secure_seq.h
70654 +++ b/include/net/secure_seq.h
70655 @@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
70656 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
70657 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
70658 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
70659 - __be16 dport);
70660 + __be16 dport);
70661 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
70662 __be16 sport, __be16 dport);
70663 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
70664 - __be16 sport, __be16 dport);
70665 + __be16 sport, __be16 dport);
70666 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
70667 - __be16 sport, __be16 dport);
70668 + __be16 sport, __be16 dport);
70669 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
70670 - __be16 sport, __be16 dport);
70671 + __be16 sport, __be16 dport);
70672
70673 #endif /* _NET_SECURE_SEQ */
70674 diff --git a/include/net/sock.h b/include/net/sock.h
70675 index 78adf52..99afd29 100644
70676 --- a/include/net/sock.h
70677 +++ b/include/net/sock.h
70678 @@ -272,7 +272,7 @@ struct sock {
70679 rwlock_t sk_callback_lock;
70680 int sk_err,
70681 sk_err_soft;
70682 - atomic_t sk_drops;
70683 + atomic_unchecked_t sk_drops;
70684 unsigned short sk_ack_backlog;
70685 unsigned short sk_max_ack_backlog;
70686 __u32 sk_priority;
70687 @@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
70688 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
70689 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
70690 #else
70691 -static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
70692 +static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
70693 int inc)
70694 {
70695 }
70696 diff --git a/include/net/tcp.h b/include/net/tcp.h
70697 index 6cfe18b..dd21acb 100644
70698 --- a/include/net/tcp.h
70699 +++ b/include/net/tcp.h
70700 @@ -1444,8 +1444,8 @@ enum tcp_seq_states {
70701 struct tcp_seq_afinfo {
70702 char *name;
70703 sa_family_t family;
70704 - struct file_operations seq_fops;
70705 - struct seq_operations seq_ops;
70706 + file_operations_no_const seq_fops;
70707 + seq_operations_no_const seq_ops;
70708 };
70709
70710 struct tcp_iter_state {
70711 diff --git a/include/net/udp.h b/include/net/udp.h
70712 index f98abd2..b4b042f 100644
70713 --- a/include/net/udp.h
70714 +++ b/include/net/udp.h
70715 @@ -187,8 +187,8 @@ struct udp_seq_afinfo {
70716 char *name;
70717 sa_family_t family;
70718 struct udp_table *udp_table;
70719 - struct file_operations seq_fops;
70720 - struct seq_operations seq_ops;
70721 + file_operations_no_const seq_fops;
70722 + seq_operations_no_const seq_ops;
70723 };
70724
70725 struct udp_iter_state {
70726 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
70727 index cbb822e..e9c1cbe 100644
70728 --- a/include/rdma/iw_cm.h
70729 +++ b/include/rdma/iw_cm.h
70730 @@ -129,7 +129,7 @@ struct iw_cm_verbs {
70731 int backlog);
70732
70733 int (*destroy_listen)(struct iw_cm_id *cm_id);
70734 -};
70735 +} __no_const;
70736
70737 /**
70738 * iw_create_cm_id - Create an IW CM identifier.
70739 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
70740 index 09a124b..caa8ca8 100644
70741 --- a/include/scsi/libfc.h
70742 +++ b/include/scsi/libfc.h
70743 @@ -675,6 +675,7 @@ struct libfc_function_template {
70744 */
70745 void (*disc_stop_final) (struct fc_lport *);
70746 };
70747 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
70748
70749 /* information used by the discovery layer */
70750 struct fc_disc {
70751 @@ -707,7 +708,7 @@ struct fc_lport {
70752 struct fc_disc disc;
70753
70754 /* Operational Information */
70755 - struct libfc_function_template tt;
70756 + libfc_function_template_no_const tt;
70757 u8 link_up;
70758 u8 qfull;
70759 enum fc_lport_state state;
70760 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
70761 index de8e180..f15e0d7 100644
70762 --- a/include/scsi/scsi_device.h
70763 +++ b/include/scsi/scsi_device.h
70764 @@ -156,9 +156,9 @@ struct scsi_device {
70765 unsigned int max_device_blocked; /* what device_blocked counts down from */
70766 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
70767
70768 - atomic_t iorequest_cnt;
70769 - atomic_t iodone_cnt;
70770 - atomic_t ioerr_cnt;
70771 + atomic_unchecked_t iorequest_cnt;
70772 + atomic_unchecked_t iodone_cnt;
70773 + atomic_unchecked_t ioerr_cnt;
70774
70775 struct device sdev_gendev,
70776 sdev_dev;
70777 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
70778 index fc50bd6..81ba9cb 100644
70779 --- a/include/scsi/scsi_transport_fc.h
70780 +++ b/include/scsi/scsi_transport_fc.h
70781 @@ -708,7 +708,7 @@ struct fc_function_template {
70782 unsigned long show_host_system_hostname:1;
70783
70784 unsigned long disable_target_scan:1;
70785 -};
70786 +} __do_const;
70787
70788
70789 /**
70790 diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
70791 index 3dae3f7..8440d6f 100644
70792 --- a/include/sound/ac97_codec.h
70793 +++ b/include/sound/ac97_codec.h
70794 @@ -419,15 +419,15 @@
70795 struct snd_ac97;
70796
70797 struct snd_ac97_build_ops {
70798 - int (*build_3d) (struct snd_ac97 *ac97);
70799 - int (*build_specific) (struct snd_ac97 *ac97);
70800 - int (*build_spdif) (struct snd_ac97 *ac97);
70801 - int (*build_post_spdif) (struct snd_ac97 *ac97);
70802 + int (* const build_3d) (struct snd_ac97 *ac97);
70803 + int (* const build_specific) (struct snd_ac97 *ac97);
70804 + int (* const build_spdif) (struct snd_ac97 *ac97);
70805 + int (* const build_post_spdif) (struct snd_ac97 *ac97);
70806 #ifdef CONFIG_PM
70807 - void (*suspend) (struct snd_ac97 *ac97);
70808 - void (*resume) (struct snd_ac97 *ac97);
70809 + void (* const suspend) (struct snd_ac97 *ac97);
70810 + void (* const resume) (struct snd_ac97 *ac97);
70811 #endif
70812 - void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
70813 + void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
70814 };
70815
70816 struct snd_ac97_bus_ops {
70817 @@ -477,7 +477,7 @@ struct snd_ac97_template {
70818
70819 struct snd_ac97 {
70820 /* -- lowlevel (hardware) driver specific -- */
70821 - struct snd_ac97_build_ops * build_ops;
70822 + const struct snd_ac97_build_ops * build_ops;
70823 void *private_data;
70824 void (*private_free) (struct snd_ac97 *ac97);
70825 /* --- */
70826 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
70827 index 891cf1a..a94ba2b 100644
70828 --- a/include/sound/ak4xxx-adda.h
70829 +++ b/include/sound/ak4xxx-adda.h
70830 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
70831 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
70832 unsigned char val);
70833 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
70834 -};
70835 +} __no_const;
70836
70837 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
70838
70839 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
70840 index 8c05e47..2b5df97 100644
70841 --- a/include/sound/hwdep.h
70842 +++ b/include/sound/hwdep.h
70843 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
70844 struct snd_hwdep_dsp_status *status);
70845 int (*dsp_load)(struct snd_hwdep *hw,
70846 struct snd_hwdep_dsp_image *image);
70847 -};
70848 +} __no_const;
70849
70850 struct snd_hwdep {
70851 struct snd_card *card;
70852 diff --git a/include/sound/info.h b/include/sound/info.h
70853 index 112e894..6fda5b5 100644
70854 --- a/include/sound/info.h
70855 +++ b/include/sound/info.h
70856 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
70857 struct snd_info_buffer *buffer);
70858 void (*write)(struct snd_info_entry *entry,
70859 struct snd_info_buffer *buffer);
70860 -};
70861 +} __no_const;
70862
70863 struct snd_info_entry_ops {
70864 int (*open)(struct snd_info_entry *entry,
70865 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
70866 index de6d981..590a550 100644
70867 --- a/include/sound/pcm.h
70868 +++ b/include/sound/pcm.h
70869 @@ -80,6 +80,7 @@ struct snd_pcm_ops {
70870 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
70871 int (*ack)(struct snd_pcm_substream *substream);
70872 };
70873 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
70874
70875 /*
70876 *
70877 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
70878 index 736eac7..fe8a80f 100644
70879 --- a/include/sound/sb16_csp.h
70880 +++ b/include/sound/sb16_csp.h
70881 @@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
70882 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
70883 int (*csp_stop) (struct snd_sb_csp * p);
70884 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
70885 -};
70886 +} __no_const;
70887
70888 /*
70889 * CSP private data
70890 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
70891 index 444cd6b..3327cc5 100644
70892 --- a/include/sound/ymfpci.h
70893 +++ b/include/sound/ymfpci.h
70894 @@ -358,7 +358,7 @@ struct snd_ymfpci {
70895 spinlock_t reg_lock;
70896 spinlock_t voice_lock;
70897 wait_queue_head_t interrupt_sleep;
70898 - atomic_t interrupt_sleep_count;
70899 + atomic_unchecked_t interrupt_sleep_count;
70900 struct snd_info_entry *proc_entry;
70901 const struct firmware *dsp_microcode;
70902 const struct firmware *controller_microcode;
70903 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
70904 index b89f9db..f097b38 100644
70905 --- a/include/trace/events/irq.h
70906 +++ b/include/trace/events/irq.h
70907 @@ -34,7 +34,7 @@
70908 */
70909 TRACE_EVENT(irq_handler_entry,
70910
70911 - TP_PROTO(int irq, struct irqaction *action),
70912 + TP_PROTO(int irq, const struct irqaction *action),
70913
70914 TP_ARGS(irq, action),
70915
70916 @@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
70917 */
70918 TRACE_EVENT(irq_handler_exit,
70919
70920 - TP_PROTO(int irq, struct irqaction *action, int ret),
70921 + TP_PROTO(int irq, const struct irqaction *action, int ret),
70922
70923 TP_ARGS(irq, action, ret),
70924
70925 @@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
70926 */
70927 TRACE_EVENT(softirq_entry,
70928
70929 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
70930 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
70931
70932 TP_ARGS(h, vec),
70933
70934 @@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
70935 */
70936 TRACE_EVENT(softirq_exit,
70937
70938 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
70939 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
70940
70941 TP_ARGS(h, vec),
70942
70943 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
70944 index 0993a22..32ba2fe 100644
70945 --- a/include/video/uvesafb.h
70946 +++ b/include/video/uvesafb.h
70947 @@ -177,6 +177,7 @@ struct uvesafb_par {
70948 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
70949 u8 pmi_setpal; /* PMI for palette changes */
70950 u16 *pmi_base; /* protected mode interface location */
70951 + u8 *pmi_code; /* protected mode code location */
70952 void *pmi_start;
70953 void *pmi_pal;
70954 u8 *vbe_state_orig; /*
70955 diff --git a/init/Kconfig b/init/Kconfig
70956 index d72691b..3996e54 100644
70957 --- a/init/Kconfig
70958 +++ b/init/Kconfig
70959 @@ -1004,7 +1004,7 @@ config SLUB_DEBUG
70960
70961 config COMPAT_BRK
70962 bool "Disable heap randomization"
70963 - default y
70964 + default n
70965 help
70966 Randomizing heap placement makes heap exploits harder, but it
70967 also breaks ancient binaries (including anything libc5 based).
70968 diff --git a/init/do_mounts.c b/init/do_mounts.c
70969 index bb008d0..4fa3933 100644
70970 --- a/init/do_mounts.c
70971 +++ b/init/do_mounts.c
70972 @@ -216,11 +216,11 @@ static void __init get_fs_names(char *page)
70973
70974 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
70975 {
70976 - int err = sys_mount(name, "/root", fs, flags, data);
70977 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
70978 if (err)
70979 return err;
70980
70981 - sys_chdir("/root");
70982 + sys_chdir((__force const char __user *)"/root");
70983 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
70984 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
70985 current->fs->pwd.mnt->mnt_sb->s_type->name,
70986 @@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...)
70987 va_start(args, fmt);
70988 vsprintf(buf, fmt, args);
70989 va_end(args);
70990 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
70991 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
70992 if (fd >= 0) {
70993 sys_ioctl(fd, FDEJECT, 0);
70994 sys_close(fd);
70995 }
70996 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
70997 - fd = sys_open("/dev/console", O_RDWR, 0);
70998 + fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
70999 if (fd >= 0) {
71000 sys_ioctl(fd, TCGETS, (long)&termios);
71001 termios.c_lflag &= ~ICANON;
71002 sys_ioctl(fd, TCSETSF, (long)&termios);
71003 - sys_read(fd, &c, 1);
71004 + sys_read(fd, (char __user *)&c, 1);
71005 termios.c_lflag |= ICANON;
71006 sys_ioctl(fd, TCSETSF, (long)&termios);
71007 sys_close(fd);
71008 @@ -416,6 +416,6 @@ void __init prepare_namespace(void)
71009 mount_root();
71010 out:
71011 devtmpfs_mount("dev");
71012 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
71013 - sys_chroot(".");
71014 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
71015 + sys_chroot((__force char __user *)".");
71016 }
71017 diff --git a/init/do_mounts.h b/init/do_mounts.h
71018 index f5b978a..69dbfe8 100644
71019 --- a/init/do_mounts.h
71020 +++ b/init/do_mounts.h
71021 @@ -15,15 +15,15 @@ extern int root_mountflags;
71022
71023 static inline int create_dev(char *name, dev_t dev)
71024 {
71025 - sys_unlink(name);
71026 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
71027 + sys_unlink((char __force_user *)name);
71028 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
71029 }
71030
71031 #if BITS_PER_LONG == 32
71032 static inline u32 bstat(char *name)
71033 {
71034 struct stat64 stat;
71035 - if (sys_stat64(name, &stat) != 0)
71036 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
71037 return 0;
71038 if (!S_ISBLK(stat.st_mode))
71039 return 0;
71040 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
71041 static inline u32 bstat(char *name)
71042 {
71043 struct stat stat;
71044 - if (sys_newstat(name, &stat) != 0)
71045 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
71046 return 0;
71047 if (!S_ISBLK(stat.st_mode))
71048 return 0;
71049 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
71050 index 614241b..4da046b 100644
71051 --- a/init/do_mounts_initrd.c
71052 +++ b/init/do_mounts_initrd.c
71053 @@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shell)
71054 sys_close(old_fd);sys_close(root_fd);
71055 sys_close(0);sys_close(1);sys_close(2);
71056 sys_setsid();
71057 - (void) sys_open("/dev/console",O_RDWR,0);
71058 + (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
71059 (void) sys_dup(0);
71060 (void) sys_dup(0);
71061 return kernel_execve(shell, argv, envp_init);
71062 @@ -47,13 +47,13 @@ static void __init handle_initrd(void)
71063 create_dev("/dev/root.old", Root_RAM0);
71064 /* mount initrd on rootfs' /root */
71065 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
71066 - sys_mkdir("/old", 0700);
71067 - root_fd = sys_open("/", 0, 0);
71068 - old_fd = sys_open("/old", 0, 0);
71069 + sys_mkdir((const char __force_user *)"/old", 0700);
71070 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
71071 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
71072 /* move initrd over / and chdir/chroot in initrd root */
71073 - sys_chdir("/root");
71074 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
71075 - sys_chroot(".");
71076 + sys_chdir((const char __force_user *)"/root");
71077 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
71078 + sys_chroot((const char __force_user *)".");
71079
71080 /*
71081 * In case that a resume from disk is carried out by linuxrc or one of
71082 @@ -70,15 +70,15 @@ static void __init handle_initrd(void)
71083
71084 /* move initrd to rootfs' /old */
71085 sys_fchdir(old_fd);
71086 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
71087 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
71088 /* switch root and cwd back to / of rootfs */
71089 sys_fchdir(root_fd);
71090 - sys_chroot(".");
71091 + sys_chroot((const char __force_user *)".");
71092 sys_close(old_fd);
71093 sys_close(root_fd);
71094
71095 if (new_decode_dev(real_root_dev) == Root_RAM0) {
71096 - sys_chdir("/old");
71097 + sys_chdir((const char __force_user *)"/old");
71098 return;
71099 }
71100
71101 @@ -86,17 +86,17 @@ static void __init handle_initrd(void)
71102 mount_root();
71103
71104 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
71105 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
71106 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
71107 if (!error)
71108 printk("okay\n");
71109 else {
71110 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
71111 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
71112 if (error == -ENOENT)
71113 printk("/initrd does not exist. Ignored.\n");
71114 else
71115 printk("failed\n");
71116 printk(KERN_NOTICE "Unmounting old root\n");
71117 - sys_umount("/old", MNT_DETACH);
71118 + sys_umount((char __force_user *)"/old", MNT_DETACH);
71119 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
71120 if (fd < 0) {
71121 error = fd;
71122 @@ -119,11 +119,11 @@ int __init initrd_load(void)
71123 * mounted in the normal path.
71124 */
71125 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
71126 - sys_unlink("/initrd.image");
71127 + sys_unlink((const char __force_user *)"/initrd.image");
71128 handle_initrd();
71129 return 1;
71130 }
71131 }
71132 - sys_unlink("/initrd.image");
71133 + sys_unlink((const char __force_user *)"/initrd.image");
71134 return 0;
71135 }
71136 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
71137 index 69aebbf..c0bf6a7 100644
71138 --- a/init/do_mounts_md.c
71139 +++ b/init/do_mounts_md.c
71140 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
71141 partitioned ? "_d" : "", minor,
71142 md_setup_args[ent].device_names);
71143
71144 - fd = sys_open(name, 0, 0);
71145 + fd = sys_open((char __force_user *)name, 0, 0);
71146 if (fd < 0) {
71147 printk(KERN_ERR "md: open failed - cannot start "
71148 "array %s\n", name);
71149 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
71150 * array without it
71151 */
71152 sys_close(fd);
71153 - fd = sys_open(name, 0, 0);
71154 + fd = sys_open((char __force_user *)name, 0, 0);
71155 sys_ioctl(fd, BLKRRPART, 0);
71156 }
71157 sys_close(fd);
71158 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
71159
71160 wait_for_device_probe();
71161
71162 - fd = sys_open("/dev/md0", 0, 0);
71163 + fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
71164 if (fd >= 0) {
71165 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
71166 sys_close(fd);
71167 diff --git a/init/initramfs.c b/init/initramfs.c
71168 index 1fd59b8..a01b079 100644
71169 --- a/init/initramfs.c
71170 +++ b/init/initramfs.c
71171 @@ -74,7 +74,7 @@ static void __init free_hash(void)
71172 }
71173 }
71174
71175 -static long __init do_utime(char __user *filename, time_t mtime)
71176 +static long __init do_utime(__force char __user *filename, time_t mtime)
71177 {
71178 struct timespec t[2];
71179
71180 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
71181 struct dir_entry *de, *tmp;
71182 list_for_each_entry_safe(de, tmp, &dir_list, list) {
71183 list_del(&de->list);
71184 - do_utime(de->name, de->mtime);
71185 + do_utime((char __force_user *)de->name, de->mtime);
71186 kfree(de->name);
71187 kfree(de);
71188 }
71189 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
71190 if (nlink >= 2) {
71191 char *old = find_link(major, minor, ino, mode, collected);
71192 if (old)
71193 - return (sys_link(old, collected) < 0) ? -1 : 1;
71194 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
71195 }
71196 return 0;
71197 }
71198 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
71199 {
71200 struct stat st;
71201
71202 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
71203 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
71204 if (S_ISDIR(st.st_mode))
71205 - sys_rmdir(path);
71206 + sys_rmdir((char __force_user *)path);
71207 else
71208 - sys_unlink(path);
71209 + sys_unlink((char __force_user *)path);
71210 }
71211 }
71212
71213 @@ -305,7 +305,7 @@ static int __init do_name(void)
71214 int openflags = O_WRONLY|O_CREAT;
71215 if (ml != 1)
71216 openflags |= O_TRUNC;
71217 - wfd = sys_open(collected, openflags, mode);
71218 + wfd = sys_open((char __force_user *)collected, openflags, mode);
71219
71220 if (wfd >= 0) {
71221 sys_fchown(wfd, uid, gid);
71222 @@ -317,17 +317,17 @@ static int __init do_name(void)
71223 }
71224 }
71225 } else if (S_ISDIR(mode)) {
71226 - sys_mkdir(collected, mode);
71227 - sys_chown(collected, uid, gid);
71228 - sys_chmod(collected, mode);
71229 + sys_mkdir((char __force_user *)collected, mode);
71230 + sys_chown((char __force_user *)collected, uid, gid);
71231 + sys_chmod((char __force_user *)collected, mode);
71232 dir_add(collected, mtime);
71233 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
71234 S_ISFIFO(mode) || S_ISSOCK(mode)) {
71235 if (maybe_link() == 0) {
71236 - sys_mknod(collected, mode, rdev);
71237 - sys_chown(collected, uid, gid);
71238 - sys_chmod(collected, mode);
71239 - do_utime(collected, mtime);
71240 + sys_mknod((char __force_user *)collected, mode, rdev);
71241 + sys_chown((char __force_user *)collected, uid, gid);
71242 + sys_chmod((char __force_user *)collected, mode);
71243 + do_utime((char __force_user *)collected, mtime);
71244 }
71245 }
71246 return 0;
71247 @@ -336,15 +336,15 @@ static int __init do_name(void)
71248 static int __init do_copy(void)
71249 {
71250 if (count >= body_len) {
71251 - sys_write(wfd, victim, body_len);
71252 + sys_write(wfd, (char __force_user *)victim, body_len);
71253 sys_close(wfd);
71254 - do_utime(vcollected, mtime);
71255 + do_utime((char __force_user *)vcollected, mtime);
71256 kfree(vcollected);
71257 eat(body_len);
71258 state = SkipIt;
71259 return 0;
71260 } else {
71261 - sys_write(wfd, victim, count);
71262 + sys_write(wfd, (char __force_user *)victim, count);
71263 body_len -= count;
71264 eat(count);
71265 return 1;
71266 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
71267 {
71268 collected[N_ALIGN(name_len) + body_len] = '\0';
71269 clean_path(collected, 0);
71270 - sys_symlink(collected + N_ALIGN(name_len), collected);
71271 - sys_lchown(collected, uid, gid);
71272 - do_utime(collected, mtime);
71273 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
71274 + sys_lchown((char __force_user *)collected, uid, gid);
71275 + do_utime((char __force_user *)collected, mtime);
71276 state = SkipIt;
71277 next_state = Reset;
71278 return 0;
71279 diff --git a/init/main.c b/init/main.c
71280 index 1eb4bd5..fea5bbe 100644
71281 --- a/init/main.c
71282 +++ b/init/main.c
71283 @@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void) { }
71284 #ifdef CONFIG_TC
71285 extern void tc_init(void);
71286 #endif
71287 +extern void grsecurity_init(void);
71288
71289 enum system_states system_state __read_mostly;
71290 EXPORT_SYMBOL(system_state);
71291 @@ -183,6 +184,49 @@ static int __init set_reset_devices(char *str)
71292
71293 __setup("reset_devices", set_reset_devices);
71294
71295 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
71296 +extern char pax_enter_kernel_user[];
71297 +extern char pax_exit_kernel_user[];
71298 +extern pgdval_t clone_pgd_mask;
71299 +#endif
71300 +
71301 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
71302 +static int __init setup_pax_nouderef(char *str)
71303 +{
71304 +#ifdef CONFIG_X86_32
71305 + unsigned int cpu;
71306 + struct desc_struct *gdt;
71307 +
71308 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
71309 + gdt = get_cpu_gdt_table(cpu);
71310 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
71311 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
71312 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
71313 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
71314 + }
71315 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
71316 +#else
71317 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
71318 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
71319 + clone_pgd_mask = ~(pgdval_t)0UL;
71320 +#endif
71321 +
71322 + return 0;
71323 +}
71324 +early_param("pax_nouderef", setup_pax_nouderef);
71325 +#endif
71326 +
71327 +#ifdef CONFIG_PAX_SOFTMODE
71328 +int pax_softmode;
71329 +
71330 +static int __init setup_pax_softmode(char *str)
71331 +{
71332 + get_option(&str, &pax_softmode);
71333 + return 1;
71334 +}
71335 +__setup("pax_softmode=", setup_pax_softmode);
71336 +#endif
71337 +
71338 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
71339 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
71340 static const char *panic_later, *panic_param;
71341 @@ -705,52 +749,53 @@ int initcall_debug;
71342 core_param(initcall_debug, initcall_debug, bool, 0644);
71343
71344 static char msgbuf[64];
71345 -static struct boot_trace_call call;
71346 -static struct boot_trace_ret ret;
71347 +static struct boot_trace_call trace_call;
71348 +static struct boot_trace_ret trace_ret;
71349
71350 int do_one_initcall(initcall_t fn)
71351 {
71352 int count = preempt_count();
71353 ktime_t calltime, delta, rettime;
71354 + const char *msg1 = "", *msg2 = "";
71355
71356 if (initcall_debug) {
71357 - call.caller = task_pid_nr(current);
71358 - printk("calling %pF @ %i\n", fn, call.caller);
71359 + trace_call.caller = task_pid_nr(current);
71360 + printk("calling %pF @ %i\n", fn, trace_call.caller);
71361 calltime = ktime_get();
71362 - trace_boot_call(&call, fn);
71363 + trace_boot_call(&trace_call, fn);
71364 enable_boot_trace();
71365 }
71366
71367 - ret.result = fn();
71368 + trace_ret.result = fn();
71369
71370 if (initcall_debug) {
71371 disable_boot_trace();
71372 rettime = ktime_get();
71373 delta = ktime_sub(rettime, calltime);
71374 - ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
71375 - trace_boot_ret(&ret, fn);
71376 + trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
71377 + trace_boot_ret(&trace_ret, fn);
71378 printk("initcall %pF returned %d after %Ld usecs\n", fn,
71379 - ret.result, ret.duration);
71380 + trace_ret.result, trace_ret.duration);
71381 }
71382
71383 msgbuf[0] = 0;
71384
71385 - if (ret.result && ret.result != -ENODEV && initcall_debug)
71386 - sprintf(msgbuf, "error code %d ", ret.result);
71387 + if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
71388 + sprintf(msgbuf, "error code %d ", trace_ret.result);
71389
71390 if (preempt_count() != count) {
71391 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
71392 + msg1 = " preemption imbalance";
71393 preempt_count() = count;
71394 }
71395 if (irqs_disabled()) {
71396 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
71397 + msg2 = " disabled interrupts";
71398 local_irq_enable();
71399 }
71400 - if (msgbuf[0]) {
71401 - printk("initcall %pF returned with %s\n", fn, msgbuf);
71402 + if (msgbuf[0] || *msg1 || *msg2) {
71403 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
71404 }
71405
71406 - return ret.result;
71407 + return trace_ret.result;
71408 }
71409
71410
71411 @@ -893,11 +938,13 @@ static int __init kernel_init(void * unused)
71412 if (!ramdisk_execute_command)
71413 ramdisk_execute_command = "/init";
71414
71415 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
71416 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
71417 ramdisk_execute_command = NULL;
71418 prepare_namespace();
71419 }
71420
71421 + grsecurity_init();
71422 +
71423 /*
71424 * Ok, we have completed the initial bootup, and
71425 * we're essentially up and running. Get rid of the
71426 diff --git a/init/noinitramfs.c b/init/noinitramfs.c
71427 index f4c1a3a..96c19bd 100644
71428 --- a/init/noinitramfs.c
71429 +++ b/init/noinitramfs.c
71430 @@ -29,7 +29,7 @@ static int __init default_rootfs(void)
71431 {
71432 int err;
71433
71434 - err = sys_mkdir("/dev", 0755);
71435 + err = sys_mkdir((const char __user *)"/dev", 0755);
71436 if (err < 0)
71437 goto out;
71438
71439 @@ -39,7 +39,7 @@ static int __init default_rootfs(void)
71440 if (err < 0)
71441 goto out;
71442
71443 - err = sys_mkdir("/root", 0700);
71444 + err = sys_mkdir((const char __user *)"/root", 0700);
71445 if (err < 0)
71446 goto out;
71447
71448 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
71449 index d01bc14..8df81db 100644
71450 --- a/ipc/mqueue.c
71451 +++ b/ipc/mqueue.c
71452 @@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
71453 mq_bytes = (mq_msg_tblsz +
71454 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
71455
71456 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
71457 spin_lock(&mq_lock);
71458 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
71459 u->mq_bytes + mq_bytes >
71460 diff --git a/ipc/msg.c b/ipc/msg.c
71461 index 779f762..4af9e36 100644
71462 --- a/ipc/msg.c
71463 +++ b/ipc/msg.c
71464 @@ -310,18 +310,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
71465 return security_msg_queue_associate(msq, msgflg);
71466 }
71467
71468 +static struct ipc_ops msg_ops = {
71469 + .getnew = newque,
71470 + .associate = msg_security,
71471 + .more_checks = NULL
71472 +};
71473 +
71474 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
71475 {
71476 struct ipc_namespace *ns;
71477 - struct ipc_ops msg_ops;
71478 struct ipc_params msg_params;
71479
71480 ns = current->nsproxy->ipc_ns;
71481
71482 - msg_ops.getnew = newque;
71483 - msg_ops.associate = msg_security;
71484 - msg_ops.more_checks = NULL;
71485 -
71486 msg_params.key = key;
71487 msg_params.flg = msgflg;
71488
71489 diff --git a/ipc/sem.c b/ipc/sem.c
71490 index b781007..f738b04 100644
71491 --- a/ipc/sem.c
71492 +++ b/ipc/sem.c
71493 @@ -309,10 +309,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
71494 return 0;
71495 }
71496
71497 +static struct ipc_ops sem_ops = {
71498 + .getnew = newary,
71499 + .associate = sem_security,
71500 + .more_checks = sem_more_checks
71501 +};
71502 +
71503 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
71504 {
71505 struct ipc_namespace *ns;
71506 - struct ipc_ops sem_ops;
71507 struct ipc_params sem_params;
71508
71509 ns = current->nsproxy->ipc_ns;
71510 @@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
71511 if (nsems < 0 || nsems > ns->sc_semmsl)
71512 return -EINVAL;
71513
71514 - sem_ops.getnew = newary;
71515 - sem_ops.associate = sem_security;
71516 - sem_ops.more_checks = sem_more_checks;
71517 -
71518 sem_params.key = key;
71519 sem_params.flg = semflg;
71520 sem_params.u.nsems = nsems;
71521 @@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
71522 ushort* sem_io = fast_sem_io;
71523 int nsems;
71524
71525 + pax_track_stack();
71526 +
71527 sma = sem_lock_check(ns, semid);
71528 if (IS_ERR(sma))
71529 return PTR_ERR(sma);
71530 @@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
71531 unsigned long jiffies_left = 0;
71532 struct ipc_namespace *ns;
71533
71534 + pax_track_stack();
71535 +
71536 ns = current->nsproxy->ipc_ns;
71537
71538 if (nsops < 1 || semid < 0)
71539 diff --git a/ipc/shm.c b/ipc/shm.c
71540 index d30732c..e4992cd 100644
71541 --- a/ipc/shm.c
71542 +++ b/ipc/shm.c
71543 @@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
71544 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
71545 #endif
71546
71547 +#ifdef CONFIG_GRKERNSEC
71548 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
71549 + const time_t shm_createtime, const uid_t cuid,
71550 + const int shmid);
71551 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
71552 + const time_t shm_createtime);
71553 +#endif
71554 +
71555 void shm_init_ns(struct ipc_namespace *ns)
71556 {
71557 ns->shm_ctlmax = SHMMAX;
71558 @@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
71559 shp->shm_lprid = 0;
71560 shp->shm_atim = shp->shm_dtim = 0;
71561 shp->shm_ctim = get_seconds();
71562 +#ifdef CONFIG_GRKERNSEC
71563 + {
71564 + struct timespec timeval;
71565 + do_posix_clock_monotonic_gettime(&timeval);
71566 +
71567 + shp->shm_createtime = timeval.tv_sec;
71568 + }
71569 +#endif
71570 shp->shm_segsz = size;
71571 shp->shm_nattch = 0;
71572 shp->shm_file = file;
71573 @@ -446,18 +462,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
71574 return 0;
71575 }
71576
71577 +static struct ipc_ops shm_ops = {
71578 + .getnew = newseg,
71579 + .associate = shm_security,
71580 + .more_checks = shm_more_checks
71581 +};
71582 +
71583 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
71584 {
71585 struct ipc_namespace *ns;
71586 - struct ipc_ops shm_ops;
71587 struct ipc_params shm_params;
71588
71589 ns = current->nsproxy->ipc_ns;
71590
71591 - shm_ops.getnew = newseg;
71592 - shm_ops.associate = shm_security;
71593 - shm_ops.more_checks = shm_more_checks;
71594 -
71595 shm_params.key = key;
71596 shm_params.flg = shmflg;
71597 shm_params.u.size = size;
71598 @@ -857,6 +874,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
71599 f_mode = FMODE_READ | FMODE_WRITE;
71600 }
71601 if (shmflg & SHM_EXEC) {
71602 +
71603 +#ifdef CONFIG_PAX_MPROTECT
71604 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
71605 + goto out;
71606 +#endif
71607 +
71608 prot |= PROT_EXEC;
71609 acc_mode |= S_IXUGO;
71610 }
71611 @@ -880,9 +903,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
71612 if (err)
71613 goto out_unlock;
71614
71615 +#ifdef CONFIG_GRKERNSEC
71616 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
71617 + shp->shm_perm.cuid, shmid) ||
71618 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
71619 + err = -EACCES;
71620 + goto out_unlock;
71621 + }
71622 +#endif
71623 +
71624 path.dentry = dget(shp->shm_file->f_path.dentry);
71625 path.mnt = shp->shm_file->f_path.mnt;
71626 shp->shm_nattch++;
71627 +#ifdef CONFIG_GRKERNSEC
71628 + shp->shm_lapid = current->pid;
71629 +#endif
71630 size = i_size_read(path.dentry->d_inode);
71631 shm_unlock(shp);
71632
71633 diff --git a/kernel/acct.c b/kernel/acct.c
71634 index a6605ca..ca91111 100644
71635 --- a/kernel/acct.c
71636 +++ b/kernel/acct.c
71637 @@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
71638 */
71639 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
71640 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
71641 - file->f_op->write(file, (char *)&ac,
71642 + file->f_op->write(file, (char __force_user *)&ac,
71643 sizeof(acct_t), &file->f_pos);
71644 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
71645 set_fs(fs);
71646 diff --git a/kernel/audit.c b/kernel/audit.c
71647 index 5feed23..48415fd 100644
71648 --- a/kernel/audit.c
71649 +++ b/kernel/audit.c
71650 @@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
71651 3) suppressed due to audit_rate_limit
71652 4) suppressed due to audit_backlog_limit
71653 */
71654 -static atomic_t audit_lost = ATOMIC_INIT(0);
71655 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
71656
71657 /* The netlink socket. */
71658 static struct sock *audit_sock;
71659 @@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
71660 unsigned long now;
71661 int print;
71662
71663 - atomic_inc(&audit_lost);
71664 + atomic_inc_unchecked(&audit_lost);
71665
71666 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
71667
71668 @@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
71669 printk(KERN_WARNING
71670 "audit: audit_lost=%d audit_rate_limit=%d "
71671 "audit_backlog_limit=%d\n",
71672 - atomic_read(&audit_lost),
71673 + atomic_read_unchecked(&audit_lost),
71674 audit_rate_limit,
71675 audit_backlog_limit);
71676 audit_panic(message);
71677 @@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
71678 status_set.pid = audit_pid;
71679 status_set.rate_limit = audit_rate_limit;
71680 status_set.backlog_limit = audit_backlog_limit;
71681 - status_set.lost = atomic_read(&audit_lost);
71682 + status_set.lost = atomic_read_unchecked(&audit_lost);
71683 status_set.backlog = skb_queue_len(&audit_skb_queue);
71684 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
71685 &status_set, sizeof(status_set));
71686 @@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
71687 spin_unlock_irq(&tsk->sighand->siglock);
71688 }
71689 read_unlock(&tasklist_lock);
71690 - audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
71691 - &s, sizeof(s));
71692 +
71693 + if (!err)
71694 + audit_send_reply(NETLINK_CB(skb).pid, seq,
71695 + AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
71696 break;
71697 }
71698 case AUDIT_TTY_SET: {
71699 @@ -1262,12 +1264,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
71700 avail = audit_expand(ab,
71701 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
71702 if (!avail)
71703 - goto out;
71704 + goto out_va_end;
71705 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
71706 }
71707 - va_end(args2);
71708 if (len > 0)
71709 skb_put(skb, len);
71710 +out_va_end:
71711 + va_end(args2);
71712 out:
71713 return;
71714 }
71715 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
71716 index 267e484..ac41bc3 100644
71717 --- a/kernel/auditsc.c
71718 +++ b/kernel/auditsc.c
71719 @@ -1157,8 +1157,8 @@ static void audit_log_execve_info(struct audit_context *context,
71720 struct audit_buffer **ab,
71721 struct audit_aux_data_execve *axi)
71722 {
71723 - int i;
71724 - size_t len, len_sent = 0;
71725 + int i, len;
71726 + size_t len_sent = 0;
71727 const char __user *p;
71728 char *buf;
71729
71730 @@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
71731 }
71732
71733 /* global counter which is incremented every time something logs in */
71734 -static atomic_t session_id = ATOMIC_INIT(0);
71735 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
71736
71737 /**
71738 * audit_set_loginuid - set a task's audit_context loginuid
71739 @@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
71740 */
71741 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
71742 {
71743 - unsigned int sessionid = atomic_inc_return(&session_id);
71744 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
71745 struct audit_context *context = task->audit_context;
71746
71747 if (context && context->in_syscall) {
71748 diff --git a/kernel/capability.c b/kernel/capability.c
71749 index 8a944f5..db5001e 100644
71750 --- a/kernel/capability.c
71751 +++ b/kernel/capability.c
71752 @@ -305,10 +305,26 @@ int capable(int cap)
71753 BUG();
71754 }
71755
71756 - if (security_capable(cap) == 0) {
71757 + if (security_capable(cap) == 0 && gr_is_capable(cap)) {
71758 current->flags |= PF_SUPERPRIV;
71759 return 1;
71760 }
71761 return 0;
71762 }
71763 +
71764 +int capable_nolog(int cap)
71765 +{
71766 + if (unlikely(!cap_valid(cap))) {
71767 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
71768 + BUG();
71769 + }
71770 +
71771 + if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
71772 + current->flags |= PF_SUPERPRIV;
71773 + return 1;
71774 + }
71775 + return 0;
71776 +}
71777 +
71778 EXPORT_SYMBOL(capable);
71779 +EXPORT_SYMBOL(capable_nolog);
71780 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
71781 index 1fbcc74..7000012 100644
71782 --- a/kernel/cgroup.c
71783 +++ b/kernel/cgroup.c
71784 @@ -536,6 +536,8 @@ static struct css_set *find_css_set(
71785 struct hlist_head *hhead;
71786 struct cg_cgroup_link *link;
71787
71788 + pax_track_stack();
71789 +
71790 /* First see if we already have a cgroup group that matches
71791 * the desired set */
71792 read_lock(&css_set_lock);
71793 diff --git a/kernel/compat.c b/kernel/compat.c
71794 index 8bc5578..186e44a 100644
71795 --- a/kernel/compat.c
71796 +++ b/kernel/compat.c
71797 @@ -108,7 +108,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
71798 mm_segment_t oldfs;
71799 long ret;
71800
71801 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
71802 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
71803 oldfs = get_fs();
71804 set_fs(KERNEL_DS);
71805 ret = hrtimer_nanosleep_restart(restart);
71806 @@ -140,7 +140,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
71807 oldfs = get_fs();
71808 set_fs(KERNEL_DS);
71809 ret = hrtimer_nanosleep(&tu,
71810 - rmtp ? (struct timespec __user *)&rmt : NULL,
71811 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
71812 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
71813 set_fs(oldfs);
71814
71815 @@ -247,7 +247,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
71816 mm_segment_t old_fs = get_fs();
71817
71818 set_fs(KERNEL_DS);
71819 - ret = sys_sigpending((old_sigset_t __user *) &s);
71820 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
71821 set_fs(old_fs);
71822 if (ret == 0)
71823 ret = put_user(s, set);
71824 @@ -266,8 +266,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
71825 old_fs = get_fs();
71826 set_fs(KERNEL_DS);
71827 ret = sys_sigprocmask(how,
71828 - set ? (old_sigset_t __user *) &s : NULL,
71829 - oset ? (old_sigset_t __user *) &s : NULL);
71830 + set ? (old_sigset_t __force_user *) &s : NULL,
71831 + oset ? (old_sigset_t __force_user *) &s : NULL);
71832 set_fs(old_fs);
71833 if (ret == 0)
71834 if (oset)
71835 @@ -310,7 +310,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
71836 mm_segment_t old_fs = get_fs();
71837
71838 set_fs(KERNEL_DS);
71839 - ret = sys_old_getrlimit(resource, &r);
71840 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
71841 set_fs(old_fs);
71842
71843 if (!ret) {
71844 @@ -385,7 +385,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
71845 mm_segment_t old_fs = get_fs();
71846
71847 set_fs(KERNEL_DS);
71848 - ret = sys_getrusage(who, (struct rusage __user *) &r);
71849 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
71850 set_fs(old_fs);
71851
71852 if (ret)
71853 @@ -412,8 +412,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
71854 set_fs (KERNEL_DS);
71855 ret = sys_wait4(pid,
71856 (stat_addr ?
71857 - (unsigned int __user *) &status : NULL),
71858 - options, (struct rusage __user *) &r);
71859 + (unsigned int __force_user *) &status : NULL),
71860 + options, (struct rusage __force_user *) &r);
71861 set_fs (old_fs);
71862
71863 if (ret > 0) {
71864 @@ -438,8 +438,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
71865 memset(&info, 0, sizeof(info));
71866
71867 set_fs(KERNEL_DS);
71868 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
71869 - uru ? (struct rusage __user *)&ru : NULL);
71870 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
71871 + uru ? (struct rusage __force_user *)&ru : NULL);
71872 set_fs(old_fs);
71873
71874 if ((ret < 0) || (info.si_signo == 0))
71875 @@ -569,8 +569,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
71876 oldfs = get_fs();
71877 set_fs(KERNEL_DS);
71878 err = sys_timer_settime(timer_id, flags,
71879 - (struct itimerspec __user *) &newts,
71880 - (struct itimerspec __user *) &oldts);
71881 + (struct itimerspec __force_user *) &newts,
71882 + (struct itimerspec __force_user *) &oldts);
71883 set_fs(oldfs);
71884 if (!err && old && put_compat_itimerspec(old, &oldts))
71885 return -EFAULT;
71886 @@ -587,7 +587,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
71887 oldfs = get_fs();
71888 set_fs(KERNEL_DS);
71889 err = sys_timer_gettime(timer_id,
71890 - (struct itimerspec __user *) &ts);
71891 + (struct itimerspec __force_user *) &ts);
71892 set_fs(oldfs);
71893 if (!err && put_compat_itimerspec(setting, &ts))
71894 return -EFAULT;
71895 @@ -606,7 +606,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
71896 oldfs = get_fs();
71897 set_fs(KERNEL_DS);
71898 err = sys_clock_settime(which_clock,
71899 - (struct timespec __user *) &ts);
71900 + (struct timespec __force_user *) &ts);
71901 set_fs(oldfs);
71902 return err;
71903 }
71904 @@ -621,7 +621,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
71905 oldfs = get_fs();
71906 set_fs(KERNEL_DS);
71907 err = sys_clock_gettime(which_clock,
71908 - (struct timespec __user *) &ts);
71909 + (struct timespec __force_user *) &ts);
71910 set_fs(oldfs);
71911 if (!err && put_compat_timespec(&ts, tp))
71912 return -EFAULT;
71913 @@ -638,7 +638,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
71914 oldfs = get_fs();
71915 set_fs(KERNEL_DS);
71916 err = sys_clock_getres(which_clock,
71917 - (struct timespec __user *) &ts);
71918 + (struct timespec __force_user *) &ts);
71919 set_fs(oldfs);
71920 if (!err && tp && put_compat_timespec(&ts, tp))
71921 return -EFAULT;
71922 @@ -650,9 +650,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
71923 long err;
71924 mm_segment_t oldfs;
71925 struct timespec tu;
71926 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
71927 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
71928
71929 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
71930 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
71931 oldfs = get_fs();
71932 set_fs(KERNEL_DS);
71933 err = clock_nanosleep_restart(restart);
71934 @@ -684,8 +684,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
71935 oldfs = get_fs();
71936 set_fs(KERNEL_DS);
71937 err = sys_clock_nanosleep(which_clock, flags,
71938 - (struct timespec __user *) &in,
71939 - (struct timespec __user *) &out);
71940 + (struct timespec __force_user *) &in,
71941 + (struct timespec __force_user *) &out);
71942 set_fs(oldfs);
71943
71944 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
71945 diff --git a/kernel/configs.c b/kernel/configs.c
71946 index abaee68..047facd 100644
71947 --- a/kernel/configs.c
71948 +++ b/kernel/configs.c
71949 @@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
71950 struct proc_dir_entry *entry;
71951
71952 /* create the current config file */
71953 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
71954 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
71955 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
71956 + &ikconfig_file_ops);
71957 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71958 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
71959 + &ikconfig_file_ops);
71960 +#endif
71961 +#else
71962 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
71963 &ikconfig_file_ops);
71964 +#endif
71965 +
71966 if (!entry)
71967 return -ENOMEM;
71968
71969 diff --git a/kernel/cpu.c b/kernel/cpu.c
71970 index 3f2f04f..4e53ded 100644
71971 --- a/kernel/cpu.c
71972 +++ b/kernel/cpu.c
71973 @@ -20,7 +20,7 @@
71974 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
71975 static DEFINE_MUTEX(cpu_add_remove_lock);
71976
71977 -static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
71978 +static RAW_NOTIFIER_HEAD(cpu_chain);
71979
71980 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
71981 * Should always be manipulated under cpu_add_remove_lock
71982 diff --git a/kernel/cred.c b/kernel/cred.c
71983 index 0b5b5fc..f7fe51a 100644
71984 --- a/kernel/cred.c
71985 +++ b/kernel/cred.c
71986 @@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
71987 */
71988 void __put_cred(struct cred *cred)
71989 {
71990 + pax_track_stack();
71991 +
71992 kdebug("__put_cred(%p{%d,%d})", cred,
71993 atomic_read(&cred->usage),
71994 read_cred_subscribers(cred));
71995 @@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
71996 {
71997 struct cred *cred;
71998
71999 + pax_track_stack();
72000 +
72001 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
72002 atomic_read(&tsk->cred->usage),
72003 read_cred_subscribers(tsk->cred));
72004 @@ -206,6 +210,15 @@ void exit_creds(struct task_struct *tsk)
72005 validate_creds(cred);
72006 put_cred(cred);
72007 }
72008 +
72009 +#ifdef CONFIG_GRKERNSEC_SETXID
72010 + cred = (struct cred *) tsk->delayed_cred;
72011 + if (cred) {
72012 + tsk->delayed_cred = NULL;
72013 + validate_creds(cred);
72014 + put_cred(cred);
72015 + }
72016 +#endif
72017 }
72018
72019 /**
72020 @@ -222,6 +235,8 @@ const struct cred *get_task_cred(struct task_struct *task)
72021 {
72022 const struct cred *cred;
72023
72024 + pax_track_stack();
72025 +
72026 rcu_read_lock();
72027
72028 do {
72029 @@ -241,6 +256,8 @@ struct cred *cred_alloc_blank(void)
72030 {
72031 struct cred *new;
72032
72033 + pax_track_stack();
72034 +
72035 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
72036 if (!new)
72037 return NULL;
72038 @@ -289,6 +306,8 @@ struct cred *prepare_creds(void)
72039 const struct cred *old;
72040 struct cred *new;
72041
72042 + pax_track_stack();
72043 +
72044 validate_process_creds();
72045
72046 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
72047 @@ -335,6 +354,8 @@ struct cred *prepare_exec_creds(void)
72048 struct thread_group_cred *tgcred = NULL;
72049 struct cred *new;
72050
72051 + pax_track_stack();
72052 +
72053 #ifdef CONFIG_KEYS
72054 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
72055 if (!tgcred)
72056 @@ -441,6 +462,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
72057 struct cred *new;
72058 int ret;
72059
72060 + pax_track_stack();
72061 +
72062 mutex_init(&p->cred_guard_mutex);
72063
72064 if (
72065 @@ -523,11 +546,13 @@ error_put:
72066 * Always returns 0 thus allowing this function to be tail-called at the end
72067 * of, say, sys_setgid().
72068 */
72069 -int commit_creds(struct cred *new)
72070 +static int __commit_creds(struct cred *new)
72071 {
72072 struct task_struct *task = current;
72073 const struct cred *old = task->real_cred;
72074
72075 + pax_track_stack();
72076 +
72077 kdebug("commit_creds(%p{%d,%d})", new,
72078 atomic_read(&new->usage),
72079 read_cred_subscribers(new));
72080 @@ -544,6 +569,8 @@ int commit_creds(struct cred *new)
72081
72082 get_cred(new); /* we will require a ref for the subj creds too */
72083
72084 + gr_set_role_label(task, new->uid, new->gid);
72085 +
72086 /* dumpability changes */
72087 if (old->euid != new->euid ||
72088 old->egid != new->egid ||
72089 @@ -563,10 +590,8 @@ int commit_creds(struct cred *new)
72090 key_fsgid_changed(task);
72091
72092 /* do it
72093 - * - What if a process setreuid()'s and this brings the
72094 - * new uid over his NPROC rlimit? We can check this now
72095 - * cheaply with the new uid cache, so if it matters
72096 - * we should be checking for it. -DaveM
72097 + * RLIMIT_NPROC limits on user->processes have already been checked
72098 + * in set_user().
72099 */
72100 alter_cred_subscribers(new, 2);
72101 if (new->user != old->user)
72102 @@ -595,8 +620,96 @@ int commit_creds(struct cred *new)
72103 put_cred(old);
72104 return 0;
72105 }
72106 +
72107 +#ifdef CONFIG_GRKERNSEC_SETXID
72108 +extern int set_user(struct cred *new);
72109 +
72110 +void gr_delayed_cred_worker(void)
72111 +{
72112 + const struct cred *new = current->delayed_cred;
72113 + struct cred *ncred;
72114 +
72115 + current->delayed_cred = NULL;
72116 +
72117 + if (current_uid() && new != NULL) {
72118 + // from doing get_cred on it when queueing this
72119 + put_cred(new);
72120 + return;
72121 + } else if (new == NULL)
72122 + return;
72123 +
72124 + ncred = prepare_creds();
72125 + if (!ncred)
72126 + goto die;
72127 + // uids
72128 + ncred->uid = new->uid;
72129 + ncred->euid = new->euid;
72130 + ncred->suid = new->suid;
72131 + ncred->fsuid = new->fsuid;
72132 + // gids
72133 + ncred->gid = new->gid;
72134 + ncred->egid = new->egid;
72135 + ncred->sgid = new->sgid;
72136 + ncred->fsgid = new->fsgid;
72137 + // groups
72138 + if (set_groups(ncred, new->group_info) < 0) {
72139 + abort_creds(ncred);
72140 + goto die;
72141 + }
72142 + // caps
72143 + ncred->securebits = new->securebits;
72144 + ncred->cap_inheritable = new->cap_inheritable;
72145 + ncred->cap_permitted = new->cap_permitted;
72146 + ncred->cap_effective = new->cap_effective;
72147 + ncred->cap_bset = new->cap_bset;
72148 +
72149 + if (set_user(ncred)) {
72150 + abort_creds(ncred);
72151 + goto die;
72152 + }
72153 +
72154 + // from doing get_cred on it when queueing this
72155 + put_cred(new);
72156 +
72157 + __commit_creds(ncred);
72158 + return;
72159 +die:
72160 + // from doing get_cred on it when queueing this
72161 + put_cred(new);
72162 + do_group_exit(SIGKILL);
72163 +}
72164 +#endif
72165 +
72166 +int commit_creds(struct cred *new)
72167 +{
72168 +#ifdef CONFIG_GRKERNSEC_SETXID
72169 + struct task_struct *t;
72170 +
72171 + /* we won't get called with tasklist_lock held for writing
72172 + and interrupts disabled as the cred struct in that case is
72173 + init_cred
72174 + */
72175 + if (grsec_enable_setxid && !current_is_single_threaded() &&
72176 + !current_uid() && new->uid) {
72177 + rcu_read_lock();
72178 + read_lock(&tasklist_lock);
72179 + for (t = next_thread(current); t != current;
72180 + t = next_thread(t)) {
72181 + if (t->delayed_cred == NULL) {
72182 + t->delayed_cred = get_cred(new);
72183 + set_tsk_need_resched(t);
72184 + }
72185 + }
72186 + read_unlock(&tasklist_lock);
72187 + rcu_read_unlock();
72188 + }
72189 +#endif
72190 + return __commit_creds(new);
72191 +}
72192 +
72193 EXPORT_SYMBOL(commit_creds);
72194
72195 +
72196 /**
72197 * abort_creds - Discard a set of credentials and unlock the current task
72198 * @new: The credentials that were going to be applied
72199 @@ -606,6 +719,8 @@ EXPORT_SYMBOL(commit_creds);
72200 */
72201 void abort_creds(struct cred *new)
72202 {
72203 + pax_track_stack();
72204 +
72205 kdebug("abort_creds(%p{%d,%d})", new,
72206 atomic_read(&new->usage),
72207 read_cred_subscribers(new));
72208 @@ -629,6 +744,8 @@ const struct cred *override_creds(const struct cred *new)
72209 {
72210 const struct cred *old = current->cred;
72211
72212 + pax_track_stack();
72213 +
72214 kdebug("override_creds(%p{%d,%d})", new,
72215 atomic_read(&new->usage),
72216 read_cred_subscribers(new));
72217 @@ -658,6 +775,8 @@ void revert_creds(const struct cred *old)
72218 {
72219 const struct cred *override = current->cred;
72220
72221 + pax_track_stack();
72222 +
72223 kdebug("revert_creds(%p{%d,%d})", old,
72224 atomic_read(&old->usage),
72225 read_cred_subscribers(old));
72226 @@ -704,6 +823,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
72227 const struct cred *old;
72228 struct cred *new;
72229
72230 + pax_track_stack();
72231 +
72232 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
72233 if (!new)
72234 return NULL;
72235 @@ -758,6 +879,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
72236 */
72237 int set_security_override(struct cred *new, u32 secid)
72238 {
72239 + pax_track_stack();
72240 +
72241 return security_kernel_act_as(new, secid);
72242 }
72243 EXPORT_SYMBOL(set_security_override);
72244 @@ -777,6 +900,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
72245 u32 secid;
72246 int ret;
72247
72248 + pax_track_stack();
72249 +
72250 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
72251 if (ret < 0)
72252 return ret;
72253 diff --git a/kernel/exit.c b/kernel/exit.c
72254 index 0f8fae3..9344a56 100644
72255 --- a/kernel/exit.c
72256 +++ b/kernel/exit.c
72257 @@ -55,6 +55,10 @@
72258 #include <asm/pgtable.h>
72259 #include <asm/mmu_context.h>
72260
72261 +#ifdef CONFIG_GRKERNSEC
72262 +extern rwlock_t grsec_exec_file_lock;
72263 +#endif
72264 +
72265 static void exit_mm(struct task_struct * tsk);
72266
72267 static void __unhash_process(struct task_struct *p)
72268 @@ -174,6 +178,10 @@ void release_task(struct task_struct * p)
72269 struct task_struct *leader;
72270 int zap_leader;
72271 repeat:
72272 +#ifdef CONFIG_NET
72273 + gr_del_task_from_ip_table(p);
72274 +#endif
72275 +
72276 tracehook_prepare_release_task(p);
72277 /* don't need to get the RCU readlock here - the process is dead and
72278 * can't be modifying its own credentials */
72279 @@ -397,7 +405,7 @@ int allow_signal(int sig)
72280 * know it'll be handled, so that they don't get converted to
72281 * SIGKILL or just silently dropped.
72282 */
72283 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
72284 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
72285 recalc_sigpending();
72286 spin_unlock_irq(&current->sighand->siglock);
72287 return 0;
72288 @@ -433,6 +441,17 @@ void daemonize(const char *name, ...)
72289 vsnprintf(current->comm, sizeof(current->comm), name, args);
72290 va_end(args);
72291
72292 +#ifdef CONFIG_GRKERNSEC
72293 + write_lock(&grsec_exec_file_lock);
72294 + if (current->exec_file) {
72295 + fput(current->exec_file);
72296 + current->exec_file = NULL;
72297 + }
72298 + write_unlock(&grsec_exec_file_lock);
72299 +#endif
72300 +
72301 + gr_set_kernel_label(current);
72302 +
72303 /*
72304 * If we were started as result of loading a module, close all of the
72305 * user space pages. We don't need them, and if we didn't close them
72306 @@ -897,17 +916,17 @@ NORET_TYPE void do_exit(long code)
72307 struct task_struct *tsk = current;
72308 int group_dead;
72309
72310 - profile_task_exit(tsk);
72311 -
72312 - WARN_ON(atomic_read(&tsk->fs_excl));
72313 -
72314 + /*
72315 + * Check this first since set_fs() below depends on
72316 + * current_thread_info(), which we better not access when we're in
72317 + * interrupt context. Other than that, we want to do the set_fs()
72318 + * as early as possible.
72319 + */
72320 if (unlikely(in_interrupt()))
72321 panic("Aiee, killing interrupt handler!");
72322 - if (unlikely(!tsk->pid))
72323 - panic("Attempted to kill the idle task!");
72324
72325 /*
72326 - * If do_exit is called because this processes oopsed, it's possible
72327 + * If do_exit is called because this processes Oops'ed, it's possible
72328 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
72329 * continuing. Amongst other possible reasons, this is to prevent
72330 * mm_release()->clear_child_tid() from writing to a user-controlled
72331 @@ -915,6 +934,13 @@ NORET_TYPE void do_exit(long code)
72332 */
72333 set_fs(USER_DS);
72334
72335 + profile_task_exit(tsk);
72336 +
72337 + WARN_ON(atomic_read(&tsk->fs_excl));
72338 +
72339 + if (unlikely(!tsk->pid))
72340 + panic("Attempted to kill the idle task!");
72341 +
72342 tracehook_report_exit(&code);
72343
72344 validate_creds_for_do_exit(tsk);
72345 @@ -973,6 +999,9 @@ NORET_TYPE void do_exit(long code)
72346 tsk->exit_code = code;
72347 taskstats_exit(tsk, group_dead);
72348
72349 + gr_acl_handle_psacct(tsk, code);
72350 + gr_acl_handle_exit();
72351 +
72352 exit_mm(tsk);
72353
72354 if (group_dead)
72355 @@ -1188,7 +1217,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
72356
72357 if (unlikely(wo->wo_flags & WNOWAIT)) {
72358 int exit_code = p->exit_code;
72359 - int why, status;
72360 + int why;
72361
72362 get_task_struct(p);
72363 read_unlock(&tasklist_lock);
72364 diff --git a/kernel/fork.c b/kernel/fork.c
72365 index 4bde56f..29a9bab 100644
72366 --- a/kernel/fork.c
72367 +++ b/kernel/fork.c
72368 @@ -253,7 +253,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
72369 *stackend = STACK_END_MAGIC; /* for overflow detection */
72370
72371 #ifdef CONFIG_CC_STACKPROTECTOR
72372 - tsk->stack_canary = get_random_int();
72373 + tsk->stack_canary = pax_get_random_long();
72374 #endif
72375
72376 /* One for us, one for whoever does the "release_task()" (usually parent) */
72377 @@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
72378 mm->locked_vm = 0;
72379 mm->mmap = NULL;
72380 mm->mmap_cache = NULL;
72381 - mm->free_area_cache = oldmm->mmap_base;
72382 - mm->cached_hole_size = ~0UL;
72383 + mm->free_area_cache = oldmm->free_area_cache;
72384 + mm->cached_hole_size = oldmm->cached_hole_size;
72385 mm->map_count = 0;
72386 cpumask_clear(mm_cpumask(mm));
72387 mm->mm_rb = RB_ROOT;
72388 @@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
72389 tmp->vm_flags &= ~VM_LOCKED;
72390 tmp->vm_mm = mm;
72391 tmp->vm_next = tmp->vm_prev = NULL;
72392 + tmp->vm_mirror = NULL;
72393 anon_vma_link(tmp);
72394 file = tmp->vm_file;
72395 if (file) {
72396 @@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
72397 if (retval)
72398 goto out;
72399 }
72400 +
72401 +#ifdef CONFIG_PAX_SEGMEXEC
72402 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
72403 + struct vm_area_struct *mpnt_m;
72404 +
72405 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
72406 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
72407 +
72408 + if (!mpnt->vm_mirror)
72409 + continue;
72410 +
72411 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
72412 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
72413 + mpnt->vm_mirror = mpnt_m;
72414 + } else {
72415 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
72416 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
72417 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
72418 + mpnt->vm_mirror->vm_mirror = mpnt;
72419 + }
72420 + }
72421 + BUG_ON(mpnt_m);
72422 + }
72423 +#endif
72424 +
72425 /* a new mm has just been created */
72426 arch_dup_mmap(oldmm, mm);
72427 retval = 0;
72428 @@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
72429 write_unlock(&fs->lock);
72430 return -EAGAIN;
72431 }
72432 - fs->users++;
72433 + atomic_inc(&fs->users);
72434 write_unlock(&fs->lock);
72435 return 0;
72436 }
72437 tsk->fs = copy_fs_struct(fs);
72438 if (!tsk->fs)
72439 return -ENOMEM;
72440 + gr_set_chroot_entries(tsk, &tsk->fs->root);
72441 return 0;
72442 }
72443
72444 @@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
72445 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
72446 #endif
72447 retval = -EAGAIN;
72448 +
72449 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
72450 +
72451 if (atomic_read(&p->real_cred->user->processes) >=
72452 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
72453 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
72454 - p->real_cred->user != INIT_USER)
72455 + if (p->real_cred->user != INIT_USER &&
72456 + !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
72457 goto bad_fork_free;
72458 }
72459 + current->flags &= ~PF_NPROC_EXCEEDED;
72460
72461 retval = copy_creds(p, clone_flags);
72462 if (retval < 0)
72463 @@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
72464 goto bad_fork_free_pid;
72465 }
72466
72467 + gr_copy_label(p);
72468 +
72469 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
72470 /*
72471 * Clear TID on mm_release()?
72472 @@ -1333,6 +1366,8 @@ bad_fork_cleanup_count:
72473 bad_fork_free:
72474 free_task(p);
72475 fork_out:
72476 + gr_log_forkfail(retval);
72477 +
72478 return ERR_PTR(retval);
72479 }
72480
72481 @@ -1426,6 +1461,8 @@ long do_fork(unsigned long clone_flags,
72482 if (clone_flags & CLONE_PARENT_SETTID)
72483 put_user(nr, parent_tidptr);
72484
72485 + gr_handle_brute_check();
72486 +
72487 if (clone_flags & CLONE_VFORK) {
72488 p->vfork_done = &vfork;
72489 init_completion(&vfork);
72490 @@ -1558,7 +1595,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
72491 return 0;
72492
72493 /* don't need lock here; in the worst case we'll do useless copy */
72494 - if (fs->users == 1)
72495 + if (atomic_read(&fs->users) == 1)
72496 return 0;
72497
72498 *new_fsp = copy_fs_struct(fs);
72499 @@ -1681,7 +1718,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
72500 fs = current->fs;
72501 write_lock(&fs->lock);
72502 current->fs = new_fs;
72503 - if (--fs->users)
72504 + gr_set_chroot_entries(current, &current->fs->root);
72505 + if (atomic_dec_return(&fs->users))
72506 new_fs = NULL;
72507 else
72508 new_fs = fs;
72509 diff --git a/kernel/futex.c b/kernel/futex.c
72510 index fb98c9f..333faec 100644
72511 --- a/kernel/futex.c
72512 +++ b/kernel/futex.c
72513 @@ -54,6 +54,7 @@
72514 #include <linux/mount.h>
72515 #include <linux/pagemap.h>
72516 #include <linux/syscalls.h>
72517 +#include <linux/ptrace.h>
72518 #include <linux/signal.h>
72519 #include <linux/module.h>
72520 #include <linux/magic.h>
72521 @@ -223,6 +224,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
72522 struct page *page;
72523 int err, ro = 0;
72524
72525 +#ifdef CONFIG_PAX_SEGMEXEC
72526 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
72527 + return -EFAULT;
72528 +#endif
72529 +
72530 /*
72531 * The futex address must be "naturally" aligned.
72532 */
72533 @@ -1819,6 +1825,8 @@ static int futex_wait(u32 __user *uaddr, int fshared,
72534 struct futex_q q;
72535 int ret;
72536
72537 + pax_track_stack();
72538 +
72539 if (!bitset)
72540 return -EINVAL;
72541
72542 @@ -1871,7 +1879,7 @@ retry:
72543
72544 restart = &current_thread_info()->restart_block;
72545 restart->fn = futex_wait_restart;
72546 - restart->futex.uaddr = (u32 *)uaddr;
72547 + restart->futex.uaddr = uaddr;
72548 restart->futex.val = val;
72549 restart->futex.time = abs_time->tv64;
72550 restart->futex.bitset = bitset;
72551 @@ -2233,6 +2241,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
72552 struct futex_q q;
72553 int res, ret;
72554
72555 + pax_track_stack();
72556 +
72557 if (!bitset)
72558 return -EINVAL;
72559
72560 @@ -2423,6 +2433,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
72561 if (!p)
72562 goto err_unlock;
72563 ret = -EPERM;
72564 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72565 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
72566 + goto err_unlock;
72567 +#endif
72568 pcred = __task_cred(p);
72569 if (cred->euid != pcred->euid &&
72570 cred->euid != pcred->uid &&
72571 @@ -2489,7 +2503,7 @@ retry:
72572 */
72573 static inline int fetch_robust_entry(struct robust_list __user **entry,
72574 struct robust_list __user * __user *head,
72575 - int *pi)
72576 + unsigned int *pi)
72577 {
72578 unsigned long uentry;
72579
72580 @@ -2670,6 +2684,7 @@ static int __init futex_init(void)
72581 {
72582 u32 curval;
72583 int i;
72584 + mm_segment_t oldfs;
72585
72586 /*
72587 * This will fail and we want it. Some arch implementations do
72588 @@ -2681,7 +2696,10 @@ static int __init futex_init(void)
72589 * implementation, the non functional ones will return
72590 * -ENOSYS.
72591 */
72592 + oldfs = get_fs();
72593 + set_fs(USER_DS);
72594 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
72595 + set_fs(oldfs);
72596 if (curval == -EFAULT)
72597 futex_cmpxchg_enabled = 1;
72598
72599 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
72600 index 2357165..eb25501 100644
72601 --- a/kernel/futex_compat.c
72602 +++ b/kernel/futex_compat.c
72603 @@ -10,6 +10,7 @@
72604 #include <linux/compat.h>
72605 #include <linux/nsproxy.h>
72606 #include <linux/futex.h>
72607 +#include <linux/ptrace.h>
72608
72609 #include <asm/uaccess.h>
72610
72611 @@ -135,7 +136,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
72612 {
72613 struct compat_robust_list_head __user *head;
72614 unsigned long ret;
72615 - const struct cred *cred = current_cred(), *pcred;
72616 + const struct cred *cred = current_cred();
72617 + const struct cred *pcred;
72618
72619 if (!futex_cmpxchg_enabled)
72620 return -ENOSYS;
72621 @@ -151,6 +153,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
72622 if (!p)
72623 goto err_unlock;
72624 ret = -EPERM;
72625 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72626 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
72627 + goto err_unlock;
72628 +#endif
72629 pcred = __task_cred(p);
72630 if (cred->euid != pcred->euid &&
72631 cred->euid != pcred->uid &&
72632 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
72633 index 9b22d03..6295b62 100644
72634 --- a/kernel/gcov/base.c
72635 +++ b/kernel/gcov/base.c
72636 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
72637 }
72638
72639 #ifdef CONFIG_MODULES
72640 -static inline int within(void *addr, void *start, unsigned long size)
72641 -{
72642 - return ((addr >= start) && (addr < start + size));
72643 -}
72644 -
72645 /* Update list and generate events when modules are unloaded. */
72646 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
72647 void *data)
72648 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
72649 prev = NULL;
72650 /* Remove entries located in module from linked list. */
72651 for (info = gcov_info_head; info; info = info->next) {
72652 - if (within(info, mod->module_core, mod->core_size)) {
72653 + if (within_module_core_rw((unsigned long)info, mod)) {
72654 if (prev)
72655 prev->next = info->next;
72656 else
72657 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
72658 index a6e9d00..a0da4f9 100644
72659 --- a/kernel/hrtimer.c
72660 +++ b/kernel/hrtimer.c
72661 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
72662 local_irq_restore(flags);
72663 }
72664
72665 -static void run_hrtimer_softirq(struct softirq_action *h)
72666 +static void run_hrtimer_softirq(void)
72667 {
72668 hrtimer_peek_ahead_timers();
72669 }
72670 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
72671 index 8b6b8b6..6bc87df 100644
72672 --- a/kernel/kallsyms.c
72673 +++ b/kernel/kallsyms.c
72674 @@ -11,6 +11,9 @@
72675 * Changed the compression method from stem compression to "table lookup"
72676 * compression (see scripts/kallsyms.c for a more complete description)
72677 */
72678 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72679 +#define __INCLUDED_BY_HIDESYM 1
72680 +#endif
72681 #include <linux/kallsyms.h>
72682 #include <linux/module.h>
72683 #include <linux/init.h>
72684 @@ -51,12 +54,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
72685
72686 static inline int is_kernel_inittext(unsigned long addr)
72687 {
72688 + if (system_state != SYSTEM_BOOTING)
72689 + return 0;
72690 +
72691 if (addr >= (unsigned long)_sinittext
72692 && addr <= (unsigned long)_einittext)
72693 return 1;
72694 return 0;
72695 }
72696
72697 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72698 +#ifdef CONFIG_MODULES
72699 +static inline int is_module_text(unsigned long addr)
72700 +{
72701 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
72702 + return 1;
72703 +
72704 + addr = ktla_ktva(addr);
72705 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
72706 +}
72707 +#else
72708 +static inline int is_module_text(unsigned long addr)
72709 +{
72710 + return 0;
72711 +}
72712 +#endif
72713 +#endif
72714 +
72715 static inline int is_kernel_text(unsigned long addr)
72716 {
72717 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
72718 @@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigned long addr)
72719
72720 static inline int is_kernel(unsigned long addr)
72721 {
72722 +
72723 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72724 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
72725 + return 1;
72726 +
72727 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
72728 +#else
72729 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
72730 +#endif
72731 +
72732 return 1;
72733 return in_gate_area_no_task(addr);
72734 }
72735
72736 static int is_ksym_addr(unsigned long addr)
72737 {
72738 +
72739 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72740 + if (is_module_text(addr))
72741 + return 0;
72742 +#endif
72743 +
72744 if (all_var)
72745 return is_kernel(addr);
72746
72747 @@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
72748
72749 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
72750 {
72751 - iter->name[0] = '\0';
72752 iter->nameoff = get_symbol_offset(new_pos);
72753 iter->pos = new_pos;
72754 }
72755 @@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, void *p)
72756 {
72757 struct kallsym_iter *iter = m->private;
72758
72759 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72760 + if (current_uid())
72761 + return 0;
72762 +#endif
72763 +
72764 /* Some debugging symbols have no name. Ignore them. */
72765 if (!iter->name[0])
72766 return 0;
72767 @@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
72768 struct kallsym_iter *iter;
72769 int ret;
72770
72771 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
72772 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
72773 if (!iter)
72774 return -ENOMEM;
72775 reset_iter(iter, 0);
72776 diff --git a/kernel/kexec.c b/kernel/kexec.c
72777 index f336e21..9c1c20b 100644
72778 --- a/kernel/kexec.c
72779 +++ b/kernel/kexec.c
72780 @@ -1028,7 +1028,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
72781 unsigned long flags)
72782 {
72783 struct compat_kexec_segment in;
72784 - struct kexec_segment out, __user *ksegments;
72785 + struct kexec_segment out;
72786 + struct kexec_segment __user *ksegments;
72787 unsigned long i, result;
72788
72789 /* Don't allow clients that don't understand the native
72790 diff --git a/kernel/kgdb.c b/kernel/kgdb.c
72791 index 53dae4b..9ba3743 100644
72792 --- a/kernel/kgdb.c
72793 +++ b/kernel/kgdb.c
72794 @@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
72795 /* Guard for recursive entry */
72796 static int exception_level;
72797
72798 -static struct kgdb_io *kgdb_io_ops;
72799 +static const struct kgdb_io *kgdb_io_ops;
72800 static DEFINE_SPINLOCK(kgdb_registration_lock);
72801
72802 /* kgdb console driver is loaded */
72803 @@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1);
72804 */
72805 static atomic_t passive_cpu_wait[NR_CPUS];
72806 static atomic_t cpu_in_kgdb[NR_CPUS];
72807 -atomic_t kgdb_setting_breakpoint;
72808 +atomic_unchecked_t kgdb_setting_breakpoint;
72809
72810 struct task_struct *kgdb_usethread;
72811 struct task_struct *kgdb_contthread;
72812 @@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBYTES +
72813 sizeof(unsigned long)];
72814
72815 /* to keep track of the CPU which is doing the single stepping*/
72816 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
72817 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
72818
72819 /*
72820 * If you are debugging a problem where roundup (the collection of
72821 @@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
72822 return 0;
72823 if (kgdb_connected)
72824 return 1;
72825 - if (atomic_read(&kgdb_setting_breakpoint))
72826 + if (atomic_read_unchecked(&kgdb_setting_breakpoint))
72827 return 1;
72828 if (print_wait)
72829 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
72830 @@ -1426,8 +1426,8 @@ acquirelock:
72831 * instance of the exception handler wanted to come into the
72832 * debugger on a different CPU via a single step
72833 */
72834 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
72835 - atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
72836 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
72837 + atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
72838
72839 atomic_set(&kgdb_active, -1);
72840 touch_softlockup_watchdog();
72841 @@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void)
72842 *
72843 * Register it with the KGDB core.
72844 */
72845 -int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
72846 +int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
72847 {
72848 int err;
72849
72850 @@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_module);
72851 *
72852 * Unregister it with the KGDB core.
72853 */
72854 -void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
72855 +void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
72856 {
72857 BUG_ON(kgdb_connected);
72858
72859 @@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
72860 */
72861 void kgdb_breakpoint(void)
72862 {
72863 - atomic_set(&kgdb_setting_breakpoint, 1);
72864 + atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
72865 wmb(); /* Sync point before breakpoint */
72866 arch_kgdb_breakpoint();
72867 wmb(); /* Sync point after breakpoint */
72868 - atomic_set(&kgdb_setting_breakpoint, 0);
72869 + atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
72870 }
72871 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
72872
72873 diff --git a/kernel/kmod.c b/kernel/kmod.c
72874 index d206078..e27ba6a 100644
72875 --- a/kernel/kmod.c
72876 +++ b/kernel/kmod.c
72877 @@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
72878 * If module auto-loading support is disabled then this function
72879 * becomes a no-operation.
72880 */
72881 -int __request_module(bool wait, const char *fmt, ...)
72882 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
72883 {
72884 - va_list args;
72885 char module_name[MODULE_NAME_LEN];
72886 unsigned int max_modprobes;
72887 int ret;
72888 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
72889 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
72890 static char *envp[] = { "HOME=/",
72891 "TERM=linux",
72892 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
72893 @@ -84,12 +83,24 @@ int __request_module(bool wait, const char *fmt, ...)
72894 if (ret)
72895 return ret;
72896
72897 - va_start(args, fmt);
72898 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
72899 - va_end(args);
72900 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
72901 if (ret >= MODULE_NAME_LEN)
72902 return -ENAMETOOLONG;
72903
72904 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
72905 + if (!current_uid()) {
72906 + /* hack to workaround consolekit/udisks stupidity */
72907 + read_lock(&tasklist_lock);
72908 + if (!strcmp(current->comm, "mount") &&
72909 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
72910 + read_unlock(&tasklist_lock);
72911 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
72912 + return -EPERM;
72913 + }
72914 + read_unlock(&tasklist_lock);
72915 + }
72916 +#endif
72917 +
72918 /* If modprobe needs a service that is in a module, we get a recursive
72919 * loop. Limit the number of running kmod threads to max_threads/2 or
72920 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
72921 @@ -123,6 +134,48 @@ int __request_module(bool wait, const char *fmt, ...)
72922 atomic_dec(&kmod_concurrent);
72923 return ret;
72924 }
72925 +
72926 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
72927 +{
72928 + va_list args;
72929 + int ret;
72930 +
72931 + va_start(args, fmt);
72932 + ret = ____request_module(wait, module_param, fmt, args);
72933 + va_end(args);
72934 +
72935 + return ret;
72936 +}
72937 +
72938 +int __request_module(bool wait, const char *fmt, ...)
72939 +{
72940 + va_list args;
72941 + int ret;
72942 +
72943 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
72944 + if (current_uid()) {
72945 + char module_param[MODULE_NAME_LEN];
72946 +
72947 + memset(module_param, 0, sizeof(module_param));
72948 +
72949 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
72950 +
72951 + va_start(args, fmt);
72952 + ret = ____request_module(wait, module_param, fmt, args);
72953 + va_end(args);
72954 +
72955 + return ret;
72956 + }
72957 +#endif
72958 +
72959 + va_start(args, fmt);
72960 + ret = ____request_module(wait, NULL, fmt, args);
72961 + va_end(args);
72962 +
72963 + return ret;
72964 +}
72965 +
72966 +
72967 EXPORT_SYMBOL(__request_module);
72968 #endif /* CONFIG_MODULES */
72969
72970 @@ -228,7 +281,7 @@ static int wait_for_helper(void *data)
72971 *
72972 * Thus the __user pointer cast is valid here.
72973 */
72974 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
72975 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
72976
72977 /*
72978 * If ret is 0, either ____call_usermodehelper failed and the
72979 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
72980 index 176d825..77fa8ea 100644
72981 --- a/kernel/kprobes.c
72982 +++ b/kernel/kprobes.c
72983 @@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
72984 * kernel image and loaded module images reside. This is required
72985 * so x86_64 can correctly handle the %rip-relative fixups.
72986 */
72987 - kip->insns = module_alloc(PAGE_SIZE);
72988 + kip->insns = module_alloc_exec(PAGE_SIZE);
72989 if (!kip->insns) {
72990 kfree(kip);
72991 return NULL;
72992 @@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
72993 */
72994 if (!list_is_singular(&kprobe_insn_pages)) {
72995 list_del(&kip->list);
72996 - module_free(NULL, kip->insns);
72997 + module_free_exec(NULL, kip->insns);
72998 kfree(kip);
72999 }
73000 return 1;
73001 @@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
73002 {
73003 int i, err = 0;
73004 unsigned long offset = 0, size = 0;
73005 - char *modname, namebuf[128];
73006 + char *modname, namebuf[KSYM_NAME_LEN];
73007 const char *symbol_name;
73008 void *addr;
73009 struct kprobe_blackpoint *kb;
73010 @@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
73011 const char *sym = NULL;
73012 unsigned int i = *(loff_t *) v;
73013 unsigned long offset = 0;
73014 - char *modname, namebuf[128];
73015 + char *modname, namebuf[KSYM_NAME_LEN];
73016
73017 head = &kprobe_table[i];
73018 preempt_disable();
73019 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
73020 index d86fe89..d12fc66 100644
73021 --- a/kernel/lockdep.c
73022 +++ b/kernel/lockdep.c
73023 @@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_trace = {
73024 /*
73025 * Various lockdep statistics:
73026 */
73027 -atomic_t chain_lookup_hits;
73028 -atomic_t chain_lookup_misses;
73029 -atomic_t hardirqs_on_events;
73030 -atomic_t hardirqs_off_events;
73031 -atomic_t redundant_hardirqs_on;
73032 -atomic_t redundant_hardirqs_off;
73033 -atomic_t softirqs_on_events;
73034 -atomic_t softirqs_off_events;
73035 -atomic_t redundant_softirqs_on;
73036 -atomic_t redundant_softirqs_off;
73037 -atomic_t nr_unused_locks;
73038 -atomic_t nr_cyclic_checks;
73039 -atomic_t nr_find_usage_forwards_checks;
73040 -atomic_t nr_find_usage_backwards_checks;
73041 +atomic_unchecked_t chain_lookup_hits;
73042 +atomic_unchecked_t chain_lookup_misses;
73043 +atomic_unchecked_t hardirqs_on_events;
73044 +atomic_unchecked_t hardirqs_off_events;
73045 +atomic_unchecked_t redundant_hardirqs_on;
73046 +atomic_unchecked_t redundant_hardirqs_off;
73047 +atomic_unchecked_t softirqs_on_events;
73048 +atomic_unchecked_t softirqs_off_events;
73049 +atomic_unchecked_t redundant_softirqs_on;
73050 +atomic_unchecked_t redundant_softirqs_off;
73051 +atomic_unchecked_t nr_unused_locks;
73052 +atomic_unchecked_t nr_cyclic_checks;
73053 +atomic_unchecked_t nr_find_usage_forwards_checks;
73054 +atomic_unchecked_t nr_find_usage_backwards_checks;
73055 #endif
73056
73057 /*
73058 @@ -577,6 +577,10 @@ static int static_obj(void *obj)
73059 int i;
73060 #endif
73061
73062 +#ifdef CONFIG_PAX_KERNEXEC
73063 + start = ktla_ktva(start);
73064 +#endif
73065 +
73066 /*
73067 * static variable?
73068 */
73069 @@ -592,8 +596,7 @@ static int static_obj(void *obj)
73070 */
73071 for_each_possible_cpu(i) {
73072 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
73073 - end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
73074 - + per_cpu_offset(i);
73075 + end = start + PERCPU_ENOUGH_ROOM;
73076
73077 if ((addr >= start) && (addr < end))
73078 return 1;
73079 @@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
73080 if (!static_obj(lock->key)) {
73081 debug_locks_off();
73082 printk("INFO: trying to register non-static key.\n");
73083 + printk("lock:%pS key:%pS.\n", lock, lock->key);
73084 printk("the code is fine but needs lockdep annotation.\n");
73085 printk("turning off the locking correctness validator.\n");
73086 dump_stack();
73087 @@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
73088 if (!class)
73089 return 0;
73090 }
73091 - debug_atomic_inc((atomic_t *)&class->ops);
73092 + debug_atomic_inc((atomic_unchecked_t *)&class->ops);
73093 if (very_verbose(class)) {
73094 printk("\nacquire class [%p] %s", class->key, class->name);
73095 if (class->name_version > 1)
73096 diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
73097 index a2ee95a..092f0f2 100644
73098 --- a/kernel/lockdep_internals.h
73099 +++ b/kernel/lockdep_internals.h
73100 @@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_class *class)
73101 /*
73102 * Various lockdep statistics:
73103 */
73104 -extern atomic_t chain_lookup_hits;
73105 -extern atomic_t chain_lookup_misses;
73106 -extern atomic_t hardirqs_on_events;
73107 -extern atomic_t hardirqs_off_events;
73108 -extern atomic_t redundant_hardirqs_on;
73109 -extern atomic_t redundant_hardirqs_off;
73110 -extern atomic_t softirqs_on_events;
73111 -extern atomic_t softirqs_off_events;
73112 -extern atomic_t redundant_softirqs_on;
73113 -extern atomic_t redundant_softirqs_off;
73114 -extern atomic_t nr_unused_locks;
73115 -extern atomic_t nr_cyclic_checks;
73116 -extern atomic_t nr_cyclic_check_recursions;
73117 -extern atomic_t nr_find_usage_forwards_checks;
73118 -extern atomic_t nr_find_usage_forwards_recursions;
73119 -extern atomic_t nr_find_usage_backwards_checks;
73120 -extern atomic_t nr_find_usage_backwards_recursions;
73121 -# define debug_atomic_inc(ptr) atomic_inc(ptr)
73122 -# define debug_atomic_dec(ptr) atomic_dec(ptr)
73123 -# define debug_atomic_read(ptr) atomic_read(ptr)
73124 +extern atomic_unchecked_t chain_lookup_hits;
73125 +extern atomic_unchecked_t chain_lookup_misses;
73126 +extern atomic_unchecked_t hardirqs_on_events;
73127 +extern atomic_unchecked_t hardirqs_off_events;
73128 +extern atomic_unchecked_t redundant_hardirqs_on;
73129 +extern atomic_unchecked_t redundant_hardirqs_off;
73130 +extern atomic_unchecked_t softirqs_on_events;
73131 +extern atomic_unchecked_t softirqs_off_events;
73132 +extern atomic_unchecked_t redundant_softirqs_on;
73133 +extern atomic_unchecked_t redundant_softirqs_off;
73134 +extern atomic_unchecked_t nr_unused_locks;
73135 +extern atomic_unchecked_t nr_cyclic_checks;
73136 +extern atomic_unchecked_t nr_cyclic_check_recursions;
73137 +extern atomic_unchecked_t nr_find_usage_forwards_checks;
73138 +extern atomic_unchecked_t nr_find_usage_forwards_recursions;
73139 +extern atomic_unchecked_t nr_find_usage_backwards_checks;
73140 +extern atomic_unchecked_t nr_find_usage_backwards_recursions;
73141 +# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
73142 +# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
73143 +# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
73144 #else
73145 # define debug_atomic_inc(ptr) do { } while (0)
73146 # define debug_atomic_dec(ptr) do { } while (0)
73147 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
73148 index d4aba4f..02a353f 100644
73149 --- a/kernel/lockdep_proc.c
73150 +++ b/kernel/lockdep_proc.c
73151 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
73152
73153 static void print_name(struct seq_file *m, struct lock_class *class)
73154 {
73155 - char str[128];
73156 + char str[KSYM_NAME_LEN];
73157 const char *name = class->name;
73158
73159 if (!name) {
73160 diff --git a/kernel/module.c b/kernel/module.c
73161 index 4b270e6..2226274 100644
73162 --- a/kernel/module.c
73163 +++ b/kernel/module.c
73164 @@ -55,6 +55,7 @@
73165 #include <linux/async.h>
73166 #include <linux/percpu.h>
73167 #include <linux/kmemleak.h>
73168 +#include <linux/grsecurity.h>
73169
73170 #define CREATE_TRACE_POINTS
73171 #include <trace/events/module.h>
73172 @@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
73173 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
73174
73175 /* Bounds of module allocation, for speeding __module_address */
73176 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
73177 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
73178 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
73179
73180 int register_module_notifier(struct notifier_block * nb)
73181 {
73182 @@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
73183 return true;
73184
73185 list_for_each_entry_rcu(mod, &modules, list) {
73186 - struct symsearch arr[] = {
73187 + struct symsearch modarr[] = {
73188 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
73189 NOT_GPL_ONLY, false },
73190 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
73191 @@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
73192 #endif
73193 };
73194
73195 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
73196 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
73197 return true;
73198 }
73199 return false;
73200 @@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
73201 void *ptr;
73202 int cpu;
73203
73204 - if (align > PAGE_SIZE) {
73205 + if (align-1 >= PAGE_SIZE) {
73206 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
73207 name, align, PAGE_SIZE);
73208 align = PAGE_SIZE;
73209 @@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
73210 * /sys/module/foo/sections stuff
73211 * J. Corbet <corbet@lwn.net>
73212 */
73213 -#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
73214 +#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
73215
73216 static inline bool sect_empty(const Elf_Shdr *sect)
73217 {
73218 @@ -1545,7 +1547,8 @@ static void free_module(struct module *mod)
73219 destroy_params(mod->kp, mod->num_kp);
73220
73221 /* This may be NULL, but that's OK */
73222 - module_free(mod, mod->module_init);
73223 + module_free(mod, mod->module_init_rw);
73224 + module_free_exec(mod, mod->module_init_rx);
73225 kfree(mod->args);
73226 if (mod->percpu)
73227 percpu_modfree(mod->percpu);
73228 @@ -1554,10 +1557,12 @@ static void free_module(struct module *mod)
73229 percpu_modfree(mod->refptr);
73230 #endif
73231 /* Free lock-classes: */
73232 - lockdep_free_key_range(mod->module_core, mod->core_size);
73233 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
73234 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
73235
73236 /* Finally, free the core (containing the module structure) */
73237 - module_free(mod, mod->module_core);
73238 + module_free_exec(mod, mod->module_core_rx);
73239 + module_free(mod, mod->module_core_rw);
73240
73241 #ifdef CONFIG_MPU
73242 update_protections(current->mm);
73243 @@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
73244 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
73245 int ret = 0;
73246 const struct kernel_symbol *ksym;
73247 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
73248 + int is_fs_load = 0;
73249 + int register_filesystem_found = 0;
73250 + char *p;
73251 +
73252 + p = strstr(mod->args, "grsec_modharden_fs");
73253 +
73254 + if (p) {
73255 + char *endptr = p + strlen("grsec_modharden_fs");
73256 + /* copy \0 as well */
73257 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
73258 + is_fs_load = 1;
73259 + }
73260 +#endif
73261 +
73262
73263 for (i = 1; i < n; i++) {
73264 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
73265 + const char *name = strtab + sym[i].st_name;
73266 +
73267 + /* it's a real shame this will never get ripped and copied
73268 + upstream! ;(
73269 + */
73270 + if (is_fs_load && !strcmp(name, "register_filesystem"))
73271 + register_filesystem_found = 1;
73272 +#endif
73273 switch (sym[i].st_shndx) {
73274 case SHN_COMMON:
73275 /* We compiled with -fno-common. These are not
73276 @@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
73277 strtab + sym[i].st_name, mod);
73278 /* Ok if resolved. */
73279 if (ksym) {
73280 + pax_open_kernel();
73281 sym[i].st_value = ksym->value;
73282 + pax_close_kernel();
73283 break;
73284 }
73285
73286 @@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
73287 secbase = (unsigned long)mod->percpu;
73288 else
73289 secbase = sechdrs[sym[i].st_shndx].sh_addr;
73290 + pax_open_kernel();
73291 sym[i].st_value += secbase;
73292 + pax_close_kernel();
73293 break;
73294 }
73295 }
73296
73297 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
73298 + if (is_fs_load && !register_filesystem_found) {
73299 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
73300 + ret = -EPERM;
73301 + }
73302 +#endif
73303 +
73304 return ret;
73305 }
73306
73307 @@ -1731,11 +1771,12 @@ static void layout_sections(struct module *mod,
73308 || s->sh_entsize != ~0UL
73309 || strstarts(secstrings + s->sh_name, ".init"))
73310 continue;
73311 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
73312 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
73313 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
73314 + else
73315 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
73316 DEBUGP("\t%s\n", secstrings + s->sh_name);
73317 }
73318 - if (m == 0)
73319 - mod->core_text_size = mod->core_size;
73320 }
73321
73322 DEBUGP("Init section allocation order:\n");
73323 @@ -1748,12 +1789,13 @@ static void layout_sections(struct module *mod,
73324 || s->sh_entsize != ~0UL
73325 || !strstarts(secstrings + s->sh_name, ".init"))
73326 continue;
73327 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
73328 - | INIT_OFFSET_MASK);
73329 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
73330 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
73331 + else
73332 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
73333 + s->sh_entsize |= INIT_OFFSET_MASK;
73334 DEBUGP("\t%s\n", secstrings + s->sh_name);
73335 }
73336 - if (m == 0)
73337 - mod->init_text_size = mod->init_size;
73338 }
73339 }
73340
73341 @@ -1857,9 +1899,8 @@ static int is_exported(const char *name, unsigned long value,
73342
73343 /* As per nm */
73344 static char elf_type(const Elf_Sym *sym,
73345 - Elf_Shdr *sechdrs,
73346 - const char *secstrings,
73347 - struct module *mod)
73348 + const Elf_Shdr *sechdrs,
73349 + const char *secstrings)
73350 {
73351 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
73352 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
73353 @@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struct module *mod,
73354
73355 /* Put symbol section at end of init part of module. */
73356 symsect->sh_flags |= SHF_ALLOC;
73357 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
73358 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
73359 symindex) | INIT_OFFSET_MASK;
73360 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
73361
73362 @@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struct module *mod,
73363 }
73364
73365 /* Append room for core symbols at end of core part. */
73366 - symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
73367 - mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
73368 + symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
73369 + mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
73370
73371 /* Put string table section at end of init part of module. */
73372 strsect->sh_flags |= SHF_ALLOC;
73373 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
73374 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
73375 strindex) | INIT_OFFSET_MASK;
73376 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
73377
73378 /* Append room for core symbols' strings at end of core part. */
73379 - *pstroffs = mod->core_size;
73380 + *pstroffs = mod->core_size_rx;
73381 __set_bit(0, strmap);
73382 - mod->core_size += bitmap_weight(strmap, strsect->sh_size);
73383 + mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
73384
73385 return symoffs;
73386 }
73387 @@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *mod,
73388 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
73389 mod->strtab = (void *)sechdrs[strindex].sh_addr;
73390
73391 + pax_open_kernel();
73392 +
73393 /* Set types up while we still have access to sections. */
73394 for (i = 0; i < mod->num_symtab; i++)
73395 mod->symtab[i].st_info
73396 - = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
73397 + = elf_type(&mod->symtab[i], sechdrs, secstrings);
73398
73399 - mod->core_symtab = dst = mod->module_core + symoffs;
73400 + mod->core_symtab = dst = mod->module_core_rx + symoffs;
73401 src = mod->symtab;
73402 *dst = *src;
73403 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
73404 @@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *mod,
73405 }
73406 mod->core_num_syms = ndst;
73407
73408 - mod->core_strtab = s = mod->module_core + stroffs;
73409 + mod->core_strtab = s = mod->module_core_rx + stroffs;
73410 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
73411 if (test_bit(i, strmap))
73412 *++s = mod->strtab[i];
73413 +
73414 + pax_close_kernel();
73415 }
73416 #else
73417 static inline unsigned long layout_symtab(struct module *mod,
73418 @@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
73419 #endif
73420 }
73421
73422 -static void *module_alloc_update_bounds(unsigned long size)
73423 +static void *module_alloc_update_bounds_rw(unsigned long size)
73424 {
73425 void *ret = module_alloc(size);
73426
73427 if (ret) {
73428 /* Update module bounds. */
73429 - if ((unsigned long)ret < module_addr_min)
73430 - module_addr_min = (unsigned long)ret;
73431 - if ((unsigned long)ret + size > module_addr_max)
73432 - module_addr_max = (unsigned long)ret + size;
73433 + if ((unsigned long)ret < module_addr_min_rw)
73434 + module_addr_min_rw = (unsigned long)ret;
73435 + if ((unsigned long)ret + size > module_addr_max_rw)
73436 + module_addr_max_rw = (unsigned long)ret + size;
73437 + }
73438 + return ret;
73439 +}
73440 +
73441 +static void *module_alloc_update_bounds_rx(unsigned long size)
73442 +{
73443 + void *ret = module_alloc_exec(size);
73444 +
73445 + if (ret) {
73446 + /* Update module bounds. */
73447 + if ((unsigned long)ret < module_addr_min_rx)
73448 + module_addr_min_rx = (unsigned long)ret;
73449 + if ((unsigned long)ret + size > module_addr_max_rx)
73450 + module_addr_max_rx = (unsigned long)ret + size;
73451 }
73452 return ret;
73453 }
73454 @@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
73455 unsigned int i;
73456
73457 /* only scan the sections containing data */
73458 - kmemleak_scan_area(mod->module_core, (unsigned long)mod -
73459 - (unsigned long)mod->module_core,
73460 + kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
73461 + (unsigned long)mod->module_core_rw,
73462 sizeof(struct module), GFP_KERNEL);
73463
73464 for (i = 1; i < hdr->e_shnum; i++) {
73465 @@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
73466 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
73467 continue;
73468
73469 - kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
73470 - (unsigned long)mod->module_core,
73471 + kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
73472 + (unsigned long)mod->module_core_rw,
73473 sechdrs[i].sh_size, GFP_KERNEL);
73474 }
73475 }
73476 @@ -2097,7 +2156,7 @@ static noinline struct module *load_module(void __user *umod,
73477 Elf_Ehdr *hdr;
73478 Elf_Shdr *sechdrs;
73479 char *secstrings, *args, *modmagic, *strtab = NULL;
73480 - char *staging;
73481 + char *staging, *license;
73482 unsigned int i;
73483 unsigned int symindex = 0;
73484 unsigned int strindex = 0;
73485 @@ -2195,6 +2254,14 @@ static noinline struct module *load_module(void __user *umod,
73486 goto free_hdr;
73487 }
73488
73489 + license = get_modinfo(sechdrs, infoindex, "license");
73490 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
73491 + if (!license || !license_is_gpl_compatible(license)) {
73492 + err -ENOEXEC;
73493 + goto free_hdr;
73494 + }
73495 +#endif
73496 +
73497 modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
73498 /* This is allowed: modprobe --force will invalidate it. */
73499 if (!modmagic) {
73500 @@ -2263,7 +2330,7 @@ static noinline struct module *load_module(void __user *umod,
73501 secstrings, &stroffs, strmap);
73502
73503 /* Do the allocs. */
73504 - ptr = module_alloc_update_bounds(mod->core_size);
73505 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
73506 /*
73507 * The pointer to this block is stored in the module structure
73508 * which is inside the block. Just mark it as not being a
73509 @@ -2274,23 +2341,47 @@ static noinline struct module *load_module(void __user *umod,
73510 err = -ENOMEM;
73511 goto free_percpu;
73512 }
73513 - memset(ptr, 0, mod->core_size);
73514 - mod->module_core = ptr;
73515 + memset(ptr, 0, mod->core_size_rw);
73516 + mod->module_core_rw = ptr;
73517
73518 - ptr = module_alloc_update_bounds(mod->init_size);
73519 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
73520 /*
73521 * The pointer to this block is stored in the module structure
73522 * which is inside the block. This block doesn't need to be
73523 * scanned as it contains data and code that will be freed
73524 * after the module is initialized.
73525 */
73526 - kmemleak_ignore(ptr);
73527 - if (!ptr && mod->init_size) {
73528 + kmemleak_not_leak(ptr);
73529 + if (!ptr && mod->init_size_rw) {
73530 err = -ENOMEM;
73531 - goto free_core;
73532 + goto free_core_rw;
73533 }
73534 - memset(ptr, 0, mod->init_size);
73535 - mod->module_init = ptr;
73536 + memset(ptr, 0, mod->init_size_rw);
73537 + mod->module_init_rw = ptr;
73538 +
73539 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
73540 + kmemleak_not_leak(ptr);
73541 + if (!ptr) {
73542 + err = -ENOMEM;
73543 + goto free_init_rw;
73544 + }
73545 +
73546 + pax_open_kernel();
73547 + memset(ptr, 0, mod->core_size_rx);
73548 + pax_close_kernel();
73549 + mod->module_core_rx = ptr;
73550 +
73551 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
73552 + kmemleak_not_leak(ptr);
73553 + if (!ptr && mod->init_size_rx) {
73554 + err = -ENOMEM;
73555 + goto free_core_rx;
73556 + }
73557 +
73558 + pax_open_kernel();
73559 + memset(ptr, 0, mod->init_size_rx);
73560 + pax_close_kernel();
73561 + mod->module_init_rx = ptr;
73562
73563 /* Transfer each section which specifies SHF_ALLOC */
73564 DEBUGP("final section addresses:\n");
73565 @@ -2300,17 +2391,45 @@ static noinline struct module *load_module(void __user *umod,
73566 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
73567 continue;
73568
73569 - if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
73570 - dest = mod->module_init
73571 - + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73572 - else
73573 - dest = mod->module_core + sechdrs[i].sh_entsize;
73574 + if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
73575 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
73576 + dest = mod->module_init_rw
73577 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73578 + else
73579 + dest = mod->module_init_rx
73580 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73581 + } else {
73582 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
73583 + dest = mod->module_core_rw + sechdrs[i].sh_entsize;
73584 + else
73585 + dest = mod->module_core_rx + sechdrs[i].sh_entsize;
73586 + }
73587
73588 - if (sechdrs[i].sh_type != SHT_NOBITS)
73589 - memcpy(dest, (void *)sechdrs[i].sh_addr,
73590 - sechdrs[i].sh_size);
73591 + if (sechdrs[i].sh_type != SHT_NOBITS) {
73592 +
73593 +#ifdef CONFIG_PAX_KERNEXEC
73594 +#ifdef CONFIG_X86_64
73595 + if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
73596 + set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
73597 +#endif
73598 + if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
73599 + pax_open_kernel();
73600 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
73601 + pax_close_kernel();
73602 + } else
73603 +#endif
73604 +
73605 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
73606 + }
73607 /* Update sh_addr to point to copy in image. */
73608 - sechdrs[i].sh_addr = (unsigned long)dest;
73609 +
73610 +#ifdef CONFIG_PAX_KERNEXEC
73611 + if (sechdrs[i].sh_flags & SHF_EXECINSTR)
73612 + sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
73613 + else
73614 +#endif
73615 +
73616 + sechdrs[i].sh_addr = (unsigned long)dest;
73617 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
73618 }
73619 /* Module has been moved. */
73620 @@ -2322,7 +2441,7 @@ static noinline struct module *load_module(void __user *umod,
73621 mod->name);
73622 if (!mod->refptr) {
73623 err = -ENOMEM;
73624 - goto free_init;
73625 + goto free_init_rx;
73626 }
73627 #endif
73628 /* Now we've moved module, initialize linked lists, etc. */
73629 @@ -2334,7 +2453,7 @@ static noinline struct module *load_module(void __user *umod,
73630 goto free_unload;
73631
73632 /* Set up license info based on the info section */
73633 - set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
73634 + set_license(mod, license);
73635
73636 /*
73637 * ndiswrapper is under GPL by itself, but loads proprietary modules.
73638 @@ -2351,6 +2470,31 @@ static noinline struct module *load_module(void __user *umod,
73639 /* Set up MODINFO_ATTR fields */
73640 setup_modinfo(mod, sechdrs, infoindex);
73641
73642 + mod->args = args;
73643 +
73644 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
73645 + {
73646 + char *p, *p2;
73647 +
73648 + if (strstr(mod->args, "grsec_modharden_netdev")) {
73649 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
73650 + err = -EPERM;
73651 + goto cleanup;
73652 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
73653 + p += strlen("grsec_modharden_normal");
73654 + p2 = strstr(p, "_");
73655 + if (p2) {
73656 + *p2 = '\0';
73657 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
73658 + *p2 = '_';
73659 + }
73660 + err = -EPERM;
73661 + goto cleanup;
73662 + }
73663 + }
73664 +#endif
73665 +
73666 +
73667 /* Fix up syms, so that st_value is a pointer to location. */
73668 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
73669 mod);
73670 @@ -2431,8 +2575,8 @@ static noinline struct module *load_module(void __user *umod,
73671
73672 /* Now do relocations. */
73673 for (i = 1; i < hdr->e_shnum; i++) {
73674 - const char *strtab = (char *)sechdrs[strindex].sh_addr;
73675 unsigned int info = sechdrs[i].sh_info;
73676 + strtab = (char *)sechdrs[strindex].sh_addr;
73677
73678 /* Not a valid relocation section? */
73679 if (info >= hdr->e_shnum)
73680 @@ -2493,16 +2637,15 @@ static noinline struct module *load_module(void __user *umod,
73681 * Do it before processing of module parameters, so the module
73682 * can provide parameter accessor functions of its own.
73683 */
73684 - if (mod->module_init)
73685 - flush_icache_range((unsigned long)mod->module_init,
73686 - (unsigned long)mod->module_init
73687 - + mod->init_size);
73688 - flush_icache_range((unsigned long)mod->module_core,
73689 - (unsigned long)mod->module_core + mod->core_size);
73690 + if (mod->module_init_rx)
73691 + flush_icache_range((unsigned long)mod->module_init_rx,
73692 + (unsigned long)mod->module_init_rx
73693 + + mod->init_size_rx);
73694 + flush_icache_range((unsigned long)mod->module_core_rx,
73695 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
73696
73697 set_fs(old_fs);
73698
73699 - mod->args = args;
73700 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
73701 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
73702 mod->name);
73703 @@ -2546,12 +2689,16 @@ static noinline struct module *load_module(void __user *umod,
73704 free_unload:
73705 module_unload_free(mod);
73706 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
73707 + free_init_rx:
73708 percpu_modfree(mod->refptr);
73709 - free_init:
73710 #endif
73711 - module_free(mod, mod->module_init);
73712 - free_core:
73713 - module_free(mod, mod->module_core);
73714 + module_free_exec(mod, mod->module_init_rx);
73715 + free_core_rx:
73716 + module_free_exec(mod, mod->module_core_rx);
73717 + free_init_rw:
73718 + module_free(mod, mod->module_init_rw);
73719 + free_core_rw:
73720 + module_free(mod, mod->module_core_rw);
73721 /* mod will be freed with core. Don't access it beyond this line! */
73722 free_percpu:
73723 if (percpu)
73724 @@ -2653,10 +2800,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
73725 mod->symtab = mod->core_symtab;
73726 mod->strtab = mod->core_strtab;
73727 #endif
73728 - module_free(mod, mod->module_init);
73729 - mod->module_init = NULL;
73730 - mod->init_size = 0;
73731 - mod->init_text_size = 0;
73732 + module_free(mod, mod->module_init_rw);
73733 + module_free_exec(mod, mod->module_init_rx);
73734 + mod->module_init_rw = NULL;
73735 + mod->module_init_rx = NULL;
73736 + mod->init_size_rw = 0;
73737 + mod->init_size_rx = 0;
73738 mutex_unlock(&module_mutex);
73739
73740 return 0;
73741 @@ -2687,10 +2836,16 @@ static const char *get_ksymbol(struct module *mod,
73742 unsigned long nextval;
73743
73744 /* At worse, next value is at end of module */
73745 - if (within_module_init(addr, mod))
73746 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
73747 + if (within_module_init_rx(addr, mod))
73748 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
73749 + else if (within_module_init_rw(addr, mod))
73750 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
73751 + else if (within_module_core_rx(addr, mod))
73752 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
73753 + else if (within_module_core_rw(addr, mod))
73754 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
73755 else
73756 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
73757 + return NULL;
73758
73759 /* Scan for closest preceeding symbol, and next symbol. (ELF
73760 starts real symbols at 1). */
73761 @@ -2936,7 +3091,7 @@ static int m_show(struct seq_file *m, void *p)
73762 char buf[8];
73763
73764 seq_printf(m, "%s %u",
73765 - mod->name, mod->init_size + mod->core_size);
73766 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
73767 print_unload_info(m, mod);
73768
73769 /* Informative for users. */
73770 @@ -2945,7 +3100,7 @@ static int m_show(struct seq_file *m, void *p)
73771 mod->state == MODULE_STATE_COMING ? "Loading":
73772 "Live");
73773 /* Used by oprofile and other similar tools. */
73774 - seq_printf(m, " 0x%p", mod->module_core);
73775 + seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
73776
73777 /* Taints info */
73778 if (mod->taints)
73779 @@ -2981,7 +3136,17 @@ static const struct file_operations proc_modules_operations = {
73780
73781 static int __init proc_modules_init(void)
73782 {
73783 +#ifndef CONFIG_GRKERNSEC_HIDESYM
73784 +#ifdef CONFIG_GRKERNSEC_PROC_USER
73785 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
73786 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73787 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
73788 +#else
73789 proc_create("modules", 0, NULL, &proc_modules_operations);
73790 +#endif
73791 +#else
73792 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
73793 +#endif
73794 return 0;
73795 }
73796 module_init(proc_modules_init);
73797 @@ -3040,12 +3205,12 @@ struct module *__module_address(unsigned long addr)
73798 {
73799 struct module *mod;
73800
73801 - if (addr < module_addr_min || addr > module_addr_max)
73802 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
73803 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
73804 return NULL;
73805
73806 list_for_each_entry_rcu(mod, &modules, list)
73807 - if (within_module_core(addr, mod)
73808 - || within_module_init(addr, mod))
73809 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
73810 return mod;
73811 return NULL;
73812 }
73813 @@ -3079,11 +3244,20 @@ bool is_module_text_address(unsigned long addr)
73814 */
73815 struct module *__module_text_address(unsigned long addr)
73816 {
73817 - struct module *mod = __module_address(addr);
73818 + struct module *mod;
73819 +
73820 +#ifdef CONFIG_X86_32
73821 + addr = ktla_ktva(addr);
73822 +#endif
73823 +
73824 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
73825 + return NULL;
73826 +
73827 + mod = __module_address(addr);
73828 +
73829 if (mod) {
73830 /* Make sure it's within the text section. */
73831 - if (!within(addr, mod->module_init, mod->init_text_size)
73832 - && !within(addr, mod->module_core, mod->core_text_size))
73833 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
73834 mod = NULL;
73835 }
73836 return mod;
73837 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
73838 index ec815a9..fe46e99 100644
73839 --- a/kernel/mutex-debug.c
73840 +++ b/kernel/mutex-debug.c
73841 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
73842 }
73843
73844 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
73845 - struct thread_info *ti)
73846 + struct task_struct *task)
73847 {
73848 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
73849
73850 /* Mark the current thread as blocked on the lock: */
73851 - ti->task->blocked_on = waiter;
73852 + task->blocked_on = waiter;
73853 }
73854
73855 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
73856 - struct thread_info *ti)
73857 + struct task_struct *task)
73858 {
73859 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
73860 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
73861 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
73862 - ti->task->blocked_on = NULL;
73863 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
73864 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
73865 + task->blocked_on = NULL;
73866
73867 list_del_init(&waiter->list);
73868 waiter->task = NULL;
73869 @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock)
73870 return;
73871
73872 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
73873 - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
73874 + DEBUG_LOCKS_WARN_ON(lock->owner != current);
73875 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
73876 mutex_clear_owner(lock);
73877 }
73878 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
73879 index 6b2d735..372d3c4 100644
73880 --- a/kernel/mutex-debug.h
73881 +++ b/kernel/mutex-debug.h
73882 @@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
73883 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
73884 extern void debug_mutex_add_waiter(struct mutex *lock,
73885 struct mutex_waiter *waiter,
73886 - struct thread_info *ti);
73887 + struct task_struct *task);
73888 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
73889 - struct thread_info *ti);
73890 + struct task_struct *task);
73891 extern void debug_mutex_unlock(struct mutex *lock);
73892 extern void debug_mutex_init(struct mutex *lock, const char *name,
73893 struct lock_class_key *key);
73894
73895 static inline void mutex_set_owner(struct mutex *lock)
73896 {
73897 - lock->owner = current_thread_info();
73898 + lock->owner = current;
73899 }
73900
73901 static inline void mutex_clear_owner(struct mutex *lock)
73902 diff --git a/kernel/mutex.c b/kernel/mutex.c
73903 index f85644c..5ee9f77 100644
73904 --- a/kernel/mutex.c
73905 +++ b/kernel/mutex.c
73906 @@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73907 */
73908
73909 for (;;) {
73910 - struct thread_info *owner;
73911 + struct task_struct *owner;
73912
73913 /*
73914 * If we own the BKL, then don't spin. The owner of
73915 @@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73916 spin_lock_mutex(&lock->wait_lock, flags);
73917
73918 debug_mutex_lock_common(lock, &waiter);
73919 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
73920 + debug_mutex_add_waiter(lock, &waiter, task);
73921
73922 /* add waiting tasks to the end of the waitqueue (FIFO): */
73923 list_add_tail(&waiter.list, &lock->wait_list);
73924 @@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73925 * TASK_UNINTERRUPTIBLE case.)
73926 */
73927 if (unlikely(signal_pending_state(state, task))) {
73928 - mutex_remove_waiter(lock, &waiter,
73929 - task_thread_info(task));
73930 + mutex_remove_waiter(lock, &waiter, task);
73931 mutex_release(&lock->dep_map, 1, ip);
73932 spin_unlock_mutex(&lock->wait_lock, flags);
73933
73934 @@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73935 done:
73936 lock_acquired(&lock->dep_map, ip);
73937 /* got the lock - rejoice! */
73938 - mutex_remove_waiter(lock, &waiter, current_thread_info());
73939 + mutex_remove_waiter(lock, &waiter, task);
73940 mutex_set_owner(lock);
73941
73942 /* set it to 0 if there are no waiters left: */
73943 diff --git a/kernel/mutex.h b/kernel/mutex.h
73944 index 67578ca..4115fbf 100644
73945 --- a/kernel/mutex.h
73946 +++ b/kernel/mutex.h
73947 @@ -19,7 +19,7 @@
73948 #ifdef CONFIG_SMP
73949 static inline void mutex_set_owner(struct mutex *lock)
73950 {
73951 - lock->owner = current_thread_info();
73952 + lock->owner = current;
73953 }
73954
73955 static inline void mutex_clear_owner(struct mutex *lock)
73956 diff --git a/kernel/panic.c b/kernel/panic.c
73957 index 96b45d0..ff70a46 100644
73958 --- a/kernel/panic.c
73959 +++ b/kernel/panic.c
73960 @@ -71,7 +71,11 @@ NORET_TYPE void panic(const char * fmt, ...)
73961 va_end(args);
73962 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
73963 #ifdef CONFIG_DEBUG_BUGVERBOSE
73964 - dump_stack();
73965 + /*
73966 + * Avoid nested stack-dumping if a panic occurs during oops processing
73967 + */
73968 + if (!oops_in_progress)
73969 + dump_stack();
73970 #endif
73971
73972 /*
73973 @@ -352,7 +356,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller, struc
73974 const char *board;
73975
73976 printk(KERN_WARNING "------------[ cut here ]------------\n");
73977 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
73978 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
73979 board = dmi_get_system_info(DMI_PRODUCT_NAME);
73980 if (board)
73981 printk(KERN_WARNING "Hardware name: %s\n", board);
73982 @@ -392,7 +396,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
73983 */
73984 void __stack_chk_fail(void)
73985 {
73986 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
73987 + dump_stack();
73988 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
73989 __builtin_return_address(0));
73990 }
73991 EXPORT_SYMBOL(__stack_chk_fail);
73992 diff --git a/kernel/params.c b/kernel/params.c
73993 index d656c27..21e452c 100644
73994 --- a/kernel/params.c
73995 +++ b/kernel/params.c
73996 @@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct kobject *kobj,
73997 return ret;
73998 }
73999
74000 -static struct sysfs_ops module_sysfs_ops = {
74001 +static const struct sysfs_ops module_sysfs_ops = {
74002 .show = module_attr_show,
74003 .store = module_attr_store,
74004 };
74005 @@ -739,7 +739,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
74006 return 0;
74007 }
74008
74009 -static struct kset_uevent_ops module_uevent_ops = {
74010 +static const struct kset_uevent_ops module_uevent_ops = {
74011 .filter = uevent_filter,
74012 };
74013
74014 diff --git a/kernel/perf_event.c b/kernel/perf_event.c
74015 index 37ebc14..9c121d9 100644
74016 --- a/kernel/perf_event.c
74017 +++ b/kernel/perf_event.c
74018 @@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */
74019 */
74020 int sysctl_perf_event_sample_rate __read_mostly = 100000;
74021
74022 -static atomic64_t perf_event_id;
74023 +static atomic64_unchecked_t perf_event_id;
74024
74025 /*
74026 * Lock for (sysadmin-configurable) event reservations:
74027 @@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struct perf_event *event,
74028 * In order to keep per-task stats reliable we need to flip the event
74029 * values when we flip the contexts.
74030 */
74031 - value = atomic64_read(&next_event->count);
74032 - value = atomic64_xchg(&event->count, value);
74033 - atomic64_set(&next_event->count, value);
74034 + value = atomic64_read_unchecked(&next_event->count);
74035 + value = atomic64_xchg_unchecked(&event->count, value);
74036 + atomic64_set_unchecked(&next_event->count, value);
74037
74038 swap(event->total_time_enabled, next_event->total_time_enabled);
74039 swap(event->total_time_running, next_event->total_time_running);
74040 @@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_event *event)
74041 update_event_times(event);
74042 }
74043
74044 - return atomic64_read(&event->count);
74045 + return atomic64_read_unchecked(&event->count);
74046 }
74047
74048 /*
74049 @@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct perf_event *event,
74050 values[n++] = 1 + leader->nr_siblings;
74051 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
74052 values[n++] = leader->total_time_enabled +
74053 - atomic64_read(&leader->child_total_time_enabled);
74054 + atomic64_read_unchecked(&leader->child_total_time_enabled);
74055 }
74056 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
74057 values[n++] = leader->total_time_running +
74058 - atomic64_read(&leader->child_total_time_running);
74059 + atomic64_read_unchecked(&leader->child_total_time_running);
74060 }
74061
74062 size = n * sizeof(u64);
74063 @@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct perf_event *event,
74064 values[n++] = perf_event_read_value(event);
74065 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
74066 values[n++] = event->total_time_enabled +
74067 - atomic64_read(&event->child_total_time_enabled);
74068 + atomic64_read_unchecked(&event->child_total_time_enabled);
74069 }
74070 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
74071 values[n++] = event->total_time_running +
74072 - atomic64_read(&event->child_total_time_running);
74073 + atomic64_read_unchecked(&event->child_total_time_running);
74074 }
74075 if (read_format & PERF_FORMAT_ID)
74076 values[n++] = primary_event_id(event);
74077 @@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
74078 static void perf_event_reset(struct perf_event *event)
74079 {
74080 (void)perf_event_read(event);
74081 - atomic64_set(&event->count, 0);
74082 + atomic64_set_unchecked(&event->count, 0);
74083 perf_event_update_userpage(event);
74084 }
74085
74086 @@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct perf_event *event)
74087 ++userpg->lock;
74088 barrier();
74089 userpg->index = perf_event_index(event);
74090 - userpg->offset = atomic64_read(&event->count);
74091 + userpg->offset = atomic64_read_unchecked(&event->count);
74092 if (event->state == PERF_EVENT_STATE_ACTIVE)
74093 - userpg->offset -= atomic64_read(&event->hw.prev_count);
74094 + userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
74095
74096 userpg->time_enabled = event->total_time_enabled +
74097 - atomic64_read(&event->child_total_time_enabled);
74098 + atomic64_read_unchecked(&event->child_total_time_enabled);
74099
74100 userpg->time_running = event->total_time_running +
74101 - atomic64_read(&event->child_total_time_running);
74102 + atomic64_read_unchecked(&event->child_total_time_running);
74103
74104 barrier();
74105 ++userpg->lock;
74106 @@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct perf_output_handle *handle,
74107 u64 values[4];
74108 int n = 0;
74109
74110 - values[n++] = atomic64_read(&event->count);
74111 + values[n++] = atomic64_read_unchecked(&event->count);
74112 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
74113 values[n++] = event->total_time_enabled +
74114 - atomic64_read(&event->child_total_time_enabled);
74115 + atomic64_read_unchecked(&event->child_total_time_enabled);
74116 }
74117 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
74118 values[n++] = event->total_time_running +
74119 - atomic64_read(&event->child_total_time_running);
74120 + atomic64_read_unchecked(&event->child_total_time_running);
74121 }
74122 if (read_format & PERF_FORMAT_ID)
74123 values[n++] = primary_event_id(event);
74124 @@ -2940,7 +2940,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
74125 if (leader != event)
74126 leader->pmu->read(leader);
74127
74128 - values[n++] = atomic64_read(&leader->count);
74129 + values[n++] = atomic64_read_unchecked(&leader->count);
74130 if (read_format & PERF_FORMAT_ID)
74131 values[n++] = primary_event_id(leader);
74132
74133 @@ -2952,7 +2952,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
74134 if (sub != event)
74135 sub->pmu->read(sub);
74136
74137 - values[n++] = atomic64_read(&sub->count);
74138 + values[n++] = atomic64_read_unchecked(&sub->count);
74139 if (read_format & PERF_FORMAT_ID)
74140 values[n++] = primary_event_id(sub);
74141
74142 @@ -3525,12 +3525,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
74143 * need to add enough zero bytes after the string to handle
74144 * the 64bit alignment we do later.
74145 */
74146 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
74147 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
74148 if (!buf) {
74149 name = strncpy(tmp, "//enomem", sizeof(tmp));
74150 goto got_name;
74151 }
74152 - name = d_path(&file->f_path, buf, PATH_MAX);
74153 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
74154 if (IS_ERR(name)) {
74155 name = strncpy(tmp, "//toolong", sizeof(tmp));
74156 goto got_name;
74157 @@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
74158 {
74159 struct hw_perf_event *hwc = &event->hw;
74160
74161 - atomic64_add(nr, &event->count);
74162 + atomic64_add_unchecked(nr, &event->count);
74163
74164 if (!hwc->sample_period)
74165 return;
74166 @@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(struct perf_event *event)
74167 u64 now;
74168
74169 now = cpu_clock(cpu);
74170 - prev = atomic64_read(&event->hw.prev_count);
74171 - atomic64_set(&event->hw.prev_count, now);
74172 - atomic64_add(now - prev, &event->count);
74173 + prev = atomic64_read_unchecked(&event->hw.prev_count);
74174 + atomic64_set_unchecked(&event->hw.prev_count, now);
74175 + atomic64_add_unchecked(now - prev, &event->count);
74176 }
74177
74178 static int cpu_clock_perf_event_enable(struct perf_event *event)
74179 @@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
74180 struct hw_perf_event *hwc = &event->hw;
74181 int cpu = raw_smp_processor_id();
74182
74183 - atomic64_set(&hwc->prev_count, cpu_clock(cpu));
74184 + atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
74185 perf_swevent_start_hrtimer(event);
74186
74187 return 0;
74188 @@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update(struct perf_event *event, u64 now)
74189 u64 prev;
74190 s64 delta;
74191
74192 - prev = atomic64_xchg(&event->hw.prev_count, now);
74193 + prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
74194 delta = now - prev;
74195 - atomic64_add(delta, &event->count);
74196 + atomic64_add_unchecked(delta, &event->count);
74197 }
74198
74199 static int task_clock_perf_event_enable(struct perf_event *event)
74200 @@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(struct perf_event *event)
74201
74202 now = event->ctx->time;
74203
74204 - atomic64_set(&hwc->prev_count, now);
74205 + atomic64_set_unchecked(&hwc->prev_count, now);
74206
74207 perf_swevent_start_hrtimer(event);
74208
74209 @@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr *attr,
74210 event->parent = parent_event;
74211
74212 event->ns = get_pid_ns(current->nsproxy->pid_ns);
74213 - event->id = atomic64_inc_return(&perf_event_id);
74214 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
74215
74216 event->state = PERF_EVENT_STATE_INACTIVE;
74217
74218 @@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf_event *child_event,
74219 if (child_event->attr.inherit_stat)
74220 perf_event_read_event(child_event, child);
74221
74222 - child_val = atomic64_read(&child_event->count);
74223 + child_val = atomic64_read_unchecked(&child_event->count);
74224
74225 /*
74226 * Add back the child's count to the parent's count:
74227 */
74228 - atomic64_add(child_val, &parent_event->count);
74229 - atomic64_add(child_event->total_time_enabled,
74230 + atomic64_add_unchecked(child_val, &parent_event->count);
74231 + atomic64_add_unchecked(child_event->total_time_enabled,
74232 &parent_event->child_total_time_enabled);
74233 - atomic64_add(child_event->total_time_running,
74234 + atomic64_add_unchecked(child_event->total_time_running,
74235 &parent_event->child_total_time_running);
74236
74237 /*
74238 diff --git a/kernel/pid.c b/kernel/pid.c
74239 index fce7198..4f23a7e 100644
74240 --- a/kernel/pid.c
74241 +++ b/kernel/pid.c
74242 @@ -33,6 +33,7 @@
74243 #include <linux/rculist.h>
74244 #include <linux/bootmem.h>
74245 #include <linux/hash.h>
74246 +#include <linux/security.h>
74247 #include <linux/pid_namespace.h>
74248 #include <linux/init_task.h>
74249 #include <linux/syscalls.h>
74250 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
74251
74252 int pid_max = PID_MAX_DEFAULT;
74253
74254 -#define RESERVED_PIDS 300
74255 +#define RESERVED_PIDS 500
74256
74257 int pid_max_min = RESERVED_PIDS + 1;
74258 int pid_max_max = PID_MAX_LIMIT;
74259 @@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
74260 */
74261 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
74262 {
74263 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
74264 + struct task_struct *task;
74265 +
74266 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
74267 +
74268 + if (gr_pid_is_chrooted(task))
74269 + return NULL;
74270 +
74271 + return task;
74272 }
74273
74274 struct task_struct *find_task_by_vpid(pid_t vnr)
74275 @@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
74276 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
74277 }
74278
74279 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
74280 +{
74281 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
74282 +}
74283 +
74284 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
74285 {
74286 struct pid *pid;
74287 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
74288 index 5c9dc22..d271117 100644
74289 --- a/kernel/posix-cpu-timers.c
74290 +++ b/kernel/posix-cpu-timers.c
74291 @@ -6,6 +6,7 @@
74292 #include <linux/posix-timers.h>
74293 #include <linux/errno.h>
74294 #include <linux/math64.h>
74295 +#include <linux/security.h>
74296 #include <asm/uaccess.h>
74297 #include <linux/kernel_stat.h>
74298 #include <trace/events/timer.h>
74299 @@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
74300
74301 static __init int init_posix_cpu_timers(void)
74302 {
74303 - struct k_clock process = {
74304 + static struct k_clock process = {
74305 .clock_getres = process_cpu_clock_getres,
74306 .clock_get = process_cpu_clock_get,
74307 .clock_set = do_posix_clock_nosettime,
74308 @@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(void)
74309 .nsleep = process_cpu_nsleep,
74310 .nsleep_restart = process_cpu_nsleep_restart,
74311 };
74312 - struct k_clock thread = {
74313 + static struct k_clock thread = {
74314 .clock_getres = thread_cpu_clock_getres,
74315 .clock_get = thread_cpu_clock_get,
74316 .clock_set = do_posix_clock_nosettime,
74317 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
74318 index 5e76d22..cf1baeb 100644
74319 --- a/kernel/posix-timers.c
74320 +++ b/kernel/posix-timers.c
74321 @@ -42,6 +42,7 @@
74322 #include <linux/compiler.h>
74323 #include <linux/idr.h>
74324 #include <linux/posix-timers.h>
74325 +#include <linux/grsecurity.h>
74326 #include <linux/syscalls.h>
74327 #include <linux/wait.h>
74328 #include <linux/workqueue.h>
74329 @@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
74330 * which we beg off on and pass to do_sys_settimeofday().
74331 */
74332
74333 -static struct k_clock posix_clocks[MAX_CLOCKS];
74334 +static struct k_clock *posix_clocks[MAX_CLOCKS];
74335
74336 /*
74337 * These ones are defined below.
74338 @@ -157,8 +158,8 @@ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
74339 */
74340 #define CLOCK_DISPATCH(clock, call, arglist) \
74341 ((clock) < 0 ? posix_cpu_##call arglist : \
74342 - (posix_clocks[clock].call != NULL \
74343 - ? (*posix_clocks[clock].call) arglist : common_##call arglist))
74344 + (posix_clocks[clock]->call != NULL \
74345 + ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
74346
74347 /*
74348 * Default clock hook functions when the struct k_clock passed
74349 @@ -172,7 +173,7 @@ static inline int common_clock_getres(const clockid_t which_clock,
74350 struct timespec *tp)
74351 {
74352 tp->tv_sec = 0;
74353 - tp->tv_nsec = posix_clocks[which_clock].res;
74354 + tp->tv_nsec = posix_clocks[which_clock]->res;
74355 return 0;
74356 }
74357
74358 @@ -217,9 +218,11 @@ static inline int invalid_clockid(const clockid_t which_clock)
74359 return 0;
74360 if ((unsigned) which_clock >= MAX_CLOCKS)
74361 return 1;
74362 - if (posix_clocks[which_clock].clock_getres != NULL)
74363 + if (posix_clocks[which_clock] == NULL)
74364 return 0;
74365 - if (posix_clocks[which_clock].res != 0)
74366 + if (posix_clocks[which_clock]->clock_getres != NULL)
74367 + return 0;
74368 + if (posix_clocks[which_clock]->res != 0)
74369 return 0;
74370 return 1;
74371 }
74372 @@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
74373 */
74374 static __init int init_posix_timers(void)
74375 {
74376 - struct k_clock clock_realtime = {
74377 + static struct k_clock clock_realtime = {
74378 .clock_getres = hrtimer_get_res,
74379 };
74380 - struct k_clock clock_monotonic = {
74381 + static struct k_clock clock_monotonic = {
74382 .clock_getres = hrtimer_get_res,
74383 .clock_get = posix_ktime_get_ts,
74384 .clock_set = do_posix_clock_nosettime,
74385 };
74386 - struct k_clock clock_monotonic_raw = {
74387 + static struct k_clock clock_monotonic_raw = {
74388 .clock_getres = hrtimer_get_res,
74389 .clock_get = posix_get_monotonic_raw,
74390 .clock_set = do_posix_clock_nosettime,
74391 .timer_create = no_timer_create,
74392 .nsleep = no_nsleep,
74393 };
74394 - struct k_clock clock_realtime_coarse = {
74395 + static struct k_clock clock_realtime_coarse = {
74396 .clock_getres = posix_get_coarse_res,
74397 .clock_get = posix_get_realtime_coarse,
74398 .clock_set = do_posix_clock_nosettime,
74399 .timer_create = no_timer_create,
74400 .nsleep = no_nsleep,
74401 };
74402 - struct k_clock clock_monotonic_coarse = {
74403 + static struct k_clock clock_monotonic_coarse = {
74404 .clock_getres = posix_get_coarse_res,
74405 .clock_get = posix_get_monotonic_coarse,
74406 .clock_set = do_posix_clock_nosettime,
74407 @@ -296,6 +299,8 @@ static __init int init_posix_timers(void)
74408 .nsleep = no_nsleep,
74409 };
74410
74411 + pax_track_stack();
74412 +
74413 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
74414 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
74415 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
74416 @@ -484,7 +489,7 @@ void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
74417 return;
74418 }
74419
74420 - posix_clocks[clock_id] = *new_clock;
74421 + posix_clocks[clock_id] = new_clock;
74422 }
74423 EXPORT_SYMBOL_GPL(register_posix_clock);
74424
74425 @@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
74426 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
74427 return -EFAULT;
74428
74429 + /* only the CLOCK_REALTIME clock can be set, all other clocks
74430 + have their clock_set fptr set to a nosettime dummy function
74431 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
74432 + call common_clock_set, which calls do_sys_settimeofday, which
74433 + we hook
74434 + */
74435 +
74436 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
74437 }
74438
74439 diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
74440 index 04a9e90..bc355aa 100644
74441 --- a/kernel/power/hibernate.c
74442 +++ b/kernel/power/hibernate.c
74443 @@ -48,14 +48,14 @@ enum {
74444
74445 static int hibernation_mode = HIBERNATION_SHUTDOWN;
74446
74447 -static struct platform_hibernation_ops *hibernation_ops;
74448 +static const struct platform_hibernation_ops *hibernation_ops;
74449
74450 /**
74451 * hibernation_set_ops - set the global hibernate operations
74452 * @ops: the hibernation operations to use in subsequent hibernation transitions
74453 */
74454
74455 -void hibernation_set_ops(struct platform_hibernation_ops *ops)
74456 +void hibernation_set_ops(const struct platform_hibernation_ops *ops)
74457 {
74458 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
74459 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
74460 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
74461 index e8b3370..484c2e4 100644
74462 --- a/kernel/power/poweroff.c
74463 +++ b/kernel/power/poweroff.c
74464 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
74465 .enable_mask = SYSRQ_ENABLE_BOOT,
74466 };
74467
74468 -static int pm_sysrq_init(void)
74469 +static int __init pm_sysrq_init(void)
74470 {
74471 register_sysrq_key('o', &sysrq_poweroff_op);
74472 return 0;
74473 diff --git a/kernel/power/process.c b/kernel/power/process.c
74474 index e7cd671..56d5f459 100644
74475 --- a/kernel/power/process.c
74476 +++ b/kernel/power/process.c
74477 @@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_only)
74478 struct timeval start, end;
74479 u64 elapsed_csecs64;
74480 unsigned int elapsed_csecs;
74481 + bool timedout = false;
74482
74483 do_gettimeofday(&start);
74484
74485 end_time = jiffies + TIMEOUT;
74486 do {
74487 todo = 0;
74488 + if (time_after(jiffies, end_time))
74489 + timedout = true;
74490 read_lock(&tasklist_lock);
74491 do_each_thread(g, p) {
74492 if (frozen(p) || !freezeable(p))
74493 @@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_only)
74494 * It is "frozen enough". If the task does wake
74495 * up, it will immediately call try_to_freeze.
74496 */
74497 - if (!task_is_stopped_or_traced(p) &&
74498 - !freezer_should_skip(p))
74499 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
74500 todo++;
74501 + if (timedout) {
74502 + printk(KERN_ERR "Task refusing to freeze:\n");
74503 + sched_show_task(p);
74504 + }
74505 + }
74506 } while_each_thread(g, p);
74507 read_unlock(&tasklist_lock);
74508 yield(); /* Yield is okay here */
74509 - if (time_after(jiffies, end_time))
74510 - break;
74511 - } while (todo);
74512 + } while (todo && !timedout);
74513
74514 do_gettimeofday(&end);
74515 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
74516 diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
74517 index 40dd021..fb30ceb 100644
74518 --- a/kernel/power/suspend.c
74519 +++ b/kernel/power/suspend.c
74520 @@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_MAX] = {
74521 [PM_SUSPEND_MEM] = "mem",
74522 };
74523
74524 -static struct platform_suspend_ops *suspend_ops;
74525 +static const struct platform_suspend_ops *suspend_ops;
74526
74527 /**
74528 * suspend_set_ops - Set the global suspend method table.
74529 * @ops: Pointer to ops structure.
74530 */
74531 -void suspend_set_ops(struct platform_suspend_ops *ops)
74532 +void suspend_set_ops(const struct platform_suspend_ops *ops)
74533 {
74534 mutex_lock(&pm_mutex);
74535 suspend_ops = ops;
74536 diff --git a/kernel/printk.c b/kernel/printk.c
74537 index 4cade47..4d17900 100644
74538 --- a/kernel/printk.c
74539 +++ b/kernel/printk.c
74540 @@ -33,6 +33,7 @@
74541 #include <linux/bootmem.h>
74542 #include <linux/syscalls.h>
74543 #include <linux/kexec.h>
74544 +#include <linux/syslog.h>
74545
74546 #include <asm/uaccess.h>
74547
74548 @@ -256,38 +257,30 @@ static inline void boot_delay_msec(void)
74549 }
74550 #endif
74551
74552 -/*
74553 - * Commands to do_syslog:
74554 - *
74555 - * 0 -- Close the log. Currently a NOP.
74556 - * 1 -- Open the log. Currently a NOP.
74557 - * 2 -- Read from the log.
74558 - * 3 -- Read all messages remaining in the ring buffer.
74559 - * 4 -- Read and clear all messages remaining in the ring buffer
74560 - * 5 -- Clear ring buffer.
74561 - * 6 -- Disable printk's to console
74562 - * 7 -- Enable printk's to console
74563 - * 8 -- Set level of messages printed to console
74564 - * 9 -- Return number of unread characters in the log buffer
74565 - * 10 -- Return size of the log buffer
74566 - */
74567 -int do_syslog(int type, char __user *buf, int len)
74568 +int do_syslog(int type, char __user *buf, int len, bool from_file)
74569 {
74570 unsigned i, j, limit, count;
74571 int do_clear = 0;
74572 char c;
74573 int error = 0;
74574
74575 - error = security_syslog(type);
74576 +#ifdef CONFIG_GRKERNSEC_DMESG
74577 + if (grsec_enable_dmesg &&
74578 + (!from_file || (from_file && type == SYSLOG_ACTION_OPEN)) &&
74579 + !capable(CAP_SYS_ADMIN))
74580 + return -EPERM;
74581 +#endif
74582 +
74583 + error = security_syslog(type, from_file);
74584 if (error)
74585 return error;
74586
74587 switch (type) {
74588 - case 0: /* Close log */
74589 + case SYSLOG_ACTION_CLOSE: /* Close log */
74590 break;
74591 - case 1: /* Open log */
74592 + case SYSLOG_ACTION_OPEN: /* Open log */
74593 break;
74594 - case 2: /* Read from log */
74595 + case SYSLOG_ACTION_READ: /* Read from log */
74596 error = -EINVAL;
74597 if (!buf || len < 0)
74598 goto out;
74599 @@ -318,10 +311,12 @@ int do_syslog(int type, char __user *buf, int len)
74600 if (!error)
74601 error = i;
74602 break;
74603 - case 4: /* Read/clear last kernel messages */
74604 + /* Read/clear last kernel messages */
74605 + case SYSLOG_ACTION_READ_CLEAR:
74606 do_clear = 1;
74607 /* FALL THRU */
74608 - case 3: /* Read last kernel messages */
74609 + /* Read last kernel messages */
74610 + case SYSLOG_ACTION_READ_ALL:
74611 error = -EINVAL;
74612 if (!buf || len < 0)
74613 goto out;
74614 @@ -374,21 +369,25 @@ int do_syslog(int type, char __user *buf, int len)
74615 }
74616 }
74617 break;
74618 - case 5: /* Clear ring buffer */
74619 + /* Clear ring buffer */
74620 + case SYSLOG_ACTION_CLEAR:
74621 logged_chars = 0;
74622 break;
74623 - case 6: /* Disable logging to console */
74624 + /* Disable logging to console */
74625 + case SYSLOG_ACTION_CONSOLE_OFF:
74626 if (saved_console_loglevel == -1)
74627 saved_console_loglevel = console_loglevel;
74628 console_loglevel = minimum_console_loglevel;
74629 break;
74630 - case 7: /* Enable logging to console */
74631 + /* Enable logging to console */
74632 + case SYSLOG_ACTION_CONSOLE_ON:
74633 if (saved_console_loglevel != -1) {
74634 console_loglevel = saved_console_loglevel;
74635 saved_console_loglevel = -1;
74636 }
74637 break;
74638 - case 8: /* Set level of messages printed to console */
74639 + /* Set level of messages printed to console */
74640 + case SYSLOG_ACTION_CONSOLE_LEVEL:
74641 error = -EINVAL;
74642 if (len < 1 || len > 8)
74643 goto out;
74644 @@ -399,10 +398,12 @@ int do_syslog(int type, char __user *buf, int len)
74645 saved_console_loglevel = -1;
74646 error = 0;
74647 break;
74648 - case 9: /* Number of chars in the log buffer */
74649 + /* Number of chars in the log buffer */
74650 + case SYSLOG_ACTION_SIZE_UNREAD:
74651 error = log_end - log_start;
74652 break;
74653 - case 10: /* Size of the log buffer */
74654 + /* Size of the log buffer */
74655 + case SYSLOG_ACTION_SIZE_BUFFER:
74656 error = log_buf_len;
74657 break;
74658 default:
74659 @@ -415,7 +416,7 @@ out:
74660
74661 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
74662 {
74663 - return do_syslog(type, buf, len);
74664 + return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
74665 }
74666
74667 /*
74668 diff --git a/kernel/profile.c b/kernel/profile.c
74669 index dfadc5b..7f59404 100644
74670 --- a/kernel/profile.c
74671 +++ b/kernel/profile.c
74672 @@ -39,7 +39,7 @@ struct profile_hit {
74673 /* Oprofile timer tick hook */
74674 static int (*timer_hook)(struct pt_regs *) __read_mostly;
74675
74676 -static atomic_t *prof_buffer;
74677 +static atomic_unchecked_t *prof_buffer;
74678 static unsigned long prof_len, prof_shift;
74679
74680 int prof_on __read_mostly;
74681 @@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
74682 hits[i].pc = 0;
74683 continue;
74684 }
74685 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
74686 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
74687 hits[i].hits = hits[i].pc = 0;
74688 }
74689 }
74690 @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
74691 * Add the current hit(s) and flush the write-queue out
74692 * to the global buffer:
74693 */
74694 - atomic_add(nr_hits, &prof_buffer[pc]);
74695 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
74696 for (i = 0; i < NR_PROFILE_HIT; ++i) {
74697 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
74698 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
74699 hits[i].pc = hits[i].hits = 0;
74700 }
74701 out:
74702 @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
74703 if (prof_on != type || !prof_buffer)
74704 return;
74705 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
74706 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
74707 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
74708 }
74709 #endif /* !CONFIG_SMP */
74710 EXPORT_SYMBOL_GPL(profile_hits);
74711 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
74712 return -EFAULT;
74713 buf++; p++; count--; read++;
74714 }
74715 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
74716 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
74717 if (copy_to_user(buf, (void *)pnt, count))
74718 return -EFAULT;
74719 read += count;
74720 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
74721 }
74722 #endif
74723 profile_discard_flip_buffers();
74724 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
74725 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
74726 return count;
74727 }
74728
74729 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
74730 index 05625f6..733bf70 100644
74731 --- a/kernel/ptrace.c
74732 +++ b/kernel/ptrace.c
74733 @@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_struct *child, int kill)
74734 return ret;
74735 }
74736
74737 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
74738 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
74739 + unsigned int log)
74740 {
74741 const struct cred *cred = current_cred(), *tcred;
74742
74743 @@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
74744 cred->gid != tcred->egid ||
74745 cred->gid != tcred->sgid ||
74746 cred->gid != tcred->gid) &&
74747 - !capable(CAP_SYS_PTRACE)) {
74748 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
74749 + (log && !capable(CAP_SYS_PTRACE)))
74750 + ) {
74751 rcu_read_unlock();
74752 return -EPERM;
74753 }
74754 @@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
74755 smp_rmb();
74756 if (task->mm)
74757 dumpable = get_dumpable(task->mm);
74758 - if (!dumpable && !capable(CAP_SYS_PTRACE))
74759 + if (!dumpable &&
74760 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
74761 + (log && !capable(CAP_SYS_PTRACE))))
74762 return -EPERM;
74763
74764 return security_ptrace_access_check(task, mode);
74765 @@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
74766 {
74767 int err;
74768 task_lock(task);
74769 - err = __ptrace_may_access(task, mode);
74770 + err = __ptrace_may_access(task, mode, 0);
74771 + task_unlock(task);
74772 + return !err;
74773 +}
74774 +
74775 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
74776 +{
74777 + int err;
74778 + task_lock(task);
74779 + err = __ptrace_may_access(task, mode, 1);
74780 task_unlock(task);
74781 return !err;
74782 }
74783 @@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *task)
74784 goto out;
74785
74786 task_lock(task);
74787 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
74788 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
74789 task_unlock(task);
74790 if (retval)
74791 goto unlock_creds;
74792 @@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *task)
74793 goto unlock_tasklist;
74794
74795 task->ptrace = PT_PTRACED;
74796 - if (capable(CAP_SYS_PTRACE))
74797 + if (capable_nolog(CAP_SYS_PTRACE))
74798 task->ptrace |= PT_PTRACE_CAP;
74799
74800 __ptrace_link(task, current);
74801 @@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
74802 {
74803 int copied = 0;
74804
74805 + pax_track_stack();
74806 +
74807 while (len > 0) {
74808 char buf[128];
74809 int this_len, retval;
74810 @@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
74811 {
74812 int copied = 0;
74813
74814 + pax_track_stack();
74815 +
74816 while (len > 0) {
74817 char buf[128];
74818 int this_len, retval;
74819 @@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *child, long request,
74820 int ret = -EIO;
74821 siginfo_t siginfo;
74822
74823 + pax_track_stack();
74824 +
74825 switch (request) {
74826 case PTRACE_PEEKTEXT:
74827 case PTRACE_PEEKDATA:
74828 @@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *child, long request,
74829 ret = ptrace_setoptions(child, data);
74830 break;
74831 case PTRACE_GETEVENTMSG:
74832 - ret = put_user(child->ptrace_message, (unsigned long __user *) data);
74833 + ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
74834 break;
74835
74836 case PTRACE_GETSIGINFO:
74837 ret = ptrace_getsiginfo(child, &siginfo);
74838 if (!ret)
74839 - ret = copy_siginfo_to_user((siginfo_t __user *) data,
74840 + ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
74841 &siginfo);
74842 break;
74843
74844 case PTRACE_SETSIGINFO:
74845 - if (copy_from_user(&siginfo, (siginfo_t __user *) data,
74846 + if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
74847 sizeof siginfo))
74848 ret = -EFAULT;
74849 else
74850 @@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
74851 goto out;
74852 }
74853
74854 + if (gr_handle_ptrace(child, request)) {
74855 + ret = -EPERM;
74856 + goto out_put_task_struct;
74857 + }
74858 +
74859 if (request == PTRACE_ATTACH) {
74860 ret = ptrace_attach(child);
74861 /*
74862 * Some architectures need to do book-keeping after
74863 * a ptrace attach.
74864 */
74865 - if (!ret)
74866 + if (!ret) {
74867 arch_ptrace_attach(child);
74868 + gr_audit_ptrace(child);
74869 + }
74870 goto out_put_task_struct;
74871 }
74872
74873 @@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
74874 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
74875 if (copied != sizeof(tmp))
74876 return -EIO;
74877 - return put_user(tmp, (unsigned long __user *)data);
74878 + return put_user(tmp, (__force unsigned long __user *)data);
74879 }
74880
74881 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
74882 @@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
74883 siginfo_t siginfo;
74884 int ret;
74885
74886 + pax_track_stack();
74887 +
74888 switch (request) {
74889 case PTRACE_PEEKTEXT:
74890 case PTRACE_PEEKDATA:
74891 @@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
74892 goto out;
74893 }
74894
74895 + if (gr_handle_ptrace(child, request)) {
74896 + ret = -EPERM;
74897 + goto out_put_task_struct;
74898 + }
74899 +
74900 if (request == PTRACE_ATTACH) {
74901 ret = ptrace_attach(child);
74902 /*
74903 * Some architectures need to do book-keeping after
74904 * a ptrace attach.
74905 */
74906 - if (!ret)
74907 + if (!ret) {
74908 arch_ptrace_attach(child);
74909 + gr_audit_ptrace(child);
74910 + }
74911 goto out_put_task_struct;
74912 }
74913
74914 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
74915 index 697c0a0..2402696 100644
74916 --- a/kernel/rcutorture.c
74917 +++ b/kernel/rcutorture.c
74918 @@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
74919 { 0 };
74920 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
74921 { 0 };
74922 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
74923 -static atomic_t n_rcu_torture_alloc;
74924 -static atomic_t n_rcu_torture_alloc_fail;
74925 -static atomic_t n_rcu_torture_free;
74926 -static atomic_t n_rcu_torture_mberror;
74927 -static atomic_t n_rcu_torture_error;
74928 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
74929 +static atomic_unchecked_t n_rcu_torture_alloc;
74930 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
74931 +static atomic_unchecked_t n_rcu_torture_free;
74932 +static atomic_unchecked_t n_rcu_torture_mberror;
74933 +static atomic_unchecked_t n_rcu_torture_error;
74934 static long n_rcu_torture_timers;
74935 static struct list_head rcu_torture_removed;
74936 static cpumask_var_t shuffle_tmp_mask;
74937 @@ -187,11 +187,11 @@ rcu_torture_alloc(void)
74938
74939 spin_lock_bh(&rcu_torture_lock);
74940 if (list_empty(&rcu_torture_freelist)) {
74941 - atomic_inc(&n_rcu_torture_alloc_fail);
74942 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
74943 spin_unlock_bh(&rcu_torture_lock);
74944 return NULL;
74945 }
74946 - atomic_inc(&n_rcu_torture_alloc);
74947 + atomic_inc_unchecked(&n_rcu_torture_alloc);
74948 p = rcu_torture_freelist.next;
74949 list_del_init(p);
74950 spin_unlock_bh(&rcu_torture_lock);
74951 @@ -204,7 +204,7 @@ rcu_torture_alloc(void)
74952 static void
74953 rcu_torture_free(struct rcu_torture *p)
74954 {
74955 - atomic_inc(&n_rcu_torture_free);
74956 + atomic_inc_unchecked(&n_rcu_torture_free);
74957 spin_lock_bh(&rcu_torture_lock);
74958 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
74959 spin_unlock_bh(&rcu_torture_lock);
74960 @@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
74961 i = rp->rtort_pipe_count;
74962 if (i > RCU_TORTURE_PIPE_LEN)
74963 i = RCU_TORTURE_PIPE_LEN;
74964 - atomic_inc(&rcu_torture_wcount[i]);
74965 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
74966 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
74967 rp->rtort_mbtest = 0;
74968 rcu_torture_free(rp);
74969 @@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
74970 i = rp->rtort_pipe_count;
74971 if (i > RCU_TORTURE_PIPE_LEN)
74972 i = RCU_TORTURE_PIPE_LEN;
74973 - atomic_inc(&rcu_torture_wcount[i]);
74974 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
74975 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
74976 rp->rtort_mbtest = 0;
74977 list_del(&rp->rtort_free);
74978 @@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
74979 i = old_rp->rtort_pipe_count;
74980 if (i > RCU_TORTURE_PIPE_LEN)
74981 i = RCU_TORTURE_PIPE_LEN;
74982 - atomic_inc(&rcu_torture_wcount[i]);
74983 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
74984 old_rp->rtort_pipe_count++;
74985 cur_ops->deferred_free(old_rp);
74986 }
74987 @@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned long unused)
74988 return;
74989 }
74990 if (p->rtort_mbtest == 0)
74991 - atomic_inc(&n_rcu_torture_mberror);
74992 + atomic_inc_unchecked(&n_rcu_torture_mberror);
74993 spin_lock(&rand_lock);
74994 cur_ops->read_delay(&rand);
74995 n_rcu_torture_timers++;
74996 @@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
74997 continue;
74998 }
74999 if (p->rtort_mbtest == 0)
75000 - atomic_inc(&n_rcu_torture_mberror);
75001 + atomic_inc_unchecked(&n_rcu_torture_mberror);
75002 cur_ops->read_delay(&rand);
75003 preempt_disable();
75004 pipe_count = p->rtort_pipe_count;
75005 @@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
75006 rcu_torture_current,
75007 rcu_torture_current_version,
75008 list_empty(&rcu_torture_freelist),
75009 - atomic_read(&n_rcu_torture_alloc),
75010 - atomic_read(&n_rcu_torture_alloc_fail),
75011 - atomic_read(&n_rcu_torture_free),
75012 - atomic_read(&n_rcu_torture_mberror),
75013 + atomic_read_unchecked(&n_rcu_torture_alloc),
75014 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
75015 + atomic_read_unchecked(&n_rcu_torture_free),
75016 + atomic_read_unchecked(&n_rcu_torture_mberror),
75017 n_rcu_torture_timers);
75018 - if (atomic_read(&n_rcu_torture_mberror) != 0)
75019 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
75020 cnt += sprintf(&page[cnt], " !!!");
75021 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
75022 if (i > 1) {
75023 cnt += sprintf(&page[cnt], "!!! ");
75024 - atomic_inc(&n_rcu_torture_error);
75025 + atomic_inc_unchecked(&n_rcu_torture_error);
75026 WARN_ON_ONCE(1);
75027 }
75028 cnt += sprintf(&page[cnt], "Reader Pipe: ");
75029 @@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
75030 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
75031 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
75032 cnt += sprintf(&page[cnt], " %d",
75033 - atomic_read(&rcu_torture_wcount[i]));
75034 + atomic_read_unchecked(&rcu_torture_wcount[i]));
75035 }
75036 cnt += sprintf(&page[cnt], "\n");
75037 if (cur_ops->stats)
75038 @@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
75039
75040 if (cur_ops->cleanup)
75041 cur_ops->cleanup();
75042 - if (atomic_read(&n_rcu_torture_error))
75043 + if (atomic_read_unchecked(&n_rcu_torture_error))
75044 rcu_torture_print_module_parms("End of test: FAILURE");
75045 else
75046 rcu_torture_print_module_parms("End of test: SUCCESS");
75047 @@ -1138,13 +1138,13 @@ rcu_torture_init(void)
75048
75049 rcu_torture_current = NULL;
75050 rcu_torture_current_version = 0;
75051 - atomic_set(&n_rcu_torture_alloc, 0);
75052 - atomic_set(&n_rcu_torture_alloc_fail, 0);
75053 - atomic_set(&n_rcu_torture_free, 0);
75054 - atomic_set(&n_rcu_torture_mberror, 0);
75055 - atomic_set(&n_rcu_torture_error, 0);
75056 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
75057 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
75058 + atomic_set_unchecked(&n_rcu_torture_free, 0);
75059 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
75060 + atomic_set_unchecked(&n_rcu_torture_error, 0);
75061 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
75062 - atomic_set(&rcu_torture_wcount[i], 0);
75063 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
75064 for_each_possible_cpu(cpu) {
75065 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
75066 per_cpu(rcu_torture_count, cpu)[i] = 0;
75067 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
75068 index 683c4f3..97f54c6 100644
75069 --- a/kernel/rcutree.c
75070 +++ b/kernel/rcutree.c
75071 @@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
75072 /*
75073 * Do softirq processing for the current CPU.
75074 */
75075 -static void rcu_process_callbacks(struct softirq_action *unused)
75076 +static void rcu_process_callbacks(void)
75077 {
75078 /*
75079 * Memory references from any prior RCU read-side critical sections
75080 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
75081 index c03edf7..ac1b341 100644
75082 --- a/kernel/rcutree_plugin.h
75083 +++ b/kernel/rcutree_plugin.h
75084 @@ -145,7 +145,7 @@ static void rcu_preempt_note_context_switch(int cpu)
75085 */
75086 void __rcu_read_lock(void)
75087 {
75088 - ACCESS_ONCE(current->rcu_read_lock_nesting)++;
75089 + ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
75090 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
75091 }
75092 EXPORT_SYMBOL_GPL(__rcu_read_lock);
75093 @@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
75094 struct task_struct *t = current;
75095
75096 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
75097 - if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
75098 + if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
75099 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
75100 rcu_read_unlock_special(t);
75101 }
75102 diff --git a/kernel/relay.c b/kernel/relay.c
75103 index 760c262..908e9ee 100644
75104 --- a/kernel/relay.c
75105 +++ b/kernel/relay.c
75106 @@ -171,10 +171,14 @@ depopulate:
75107 */
75108 static struct rchan_buf *relay_create_buf(struct rchan *chan)
75109 {
75110 - struct rchan_buf *buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
75111 + struct rchan_buf *buf;
75112 +
75113 + if (chan->n_subbufs > UINT_MAX / sizeof(size_t *))
75114 + return NULL;
75115 +
75116 + buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
75117 if (!buf)
75118 return NULL;
75119 -
75120 buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL);
75121 if (!buf->padding)
75122 goto free_buf;
75123 @@ -581,6 +585,8 @@ struct rchan *relay_open(const char *base_filename,
75124
75125 if (!(subbuf_size && n_subbufs))
75126 return NULL;
75127 + if (subbuf_size > UINT_MAX / n_subbufs)
75128 + return NULL;
75129
75130 chan = kzalloc(sizeof(struct rchan), GFP_KERNEL);
75131 if (!chan)
75132 @@ -1222,7 +1228,7 @@ static int subbuf_splice_actor(struct file *in,
75133 unsigned int flags,
75134 int *nonpad_ret)
75135 {
75136 - unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
75137 + unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
75138 struct rchan_buf *rbuf = in->private_data;
75139 unsigned int subbuf_size = rbuf->chan->subbuf_size;
75140 uint64_t pos = (uint64_t) *ppos;
75141 @@ -1241,6 +1247,9 @@ static int subbuf_splice_actor(struct file *in,
75142 .ops = &relay_pipe_buf_ops,
75143 .spd_release = relay_page_release,
75144 };
75145 + ssize_t ret;
75146 +
75147 + pax_track_stack();
75148
75149 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
75150 return 0;
75151 diff --git a/kernel/resource.c b/kernel/resource.c
75152 index fb11a58..4e61ae1 100644
75153 --- a/kernel/resource.c
75154 +++ b/kernel/resource.c
75155 @@ -132,8 +132,18 @@ static const struct file_operations proc_iomem_operations = {
75156
75157 static int __init ioresources_init(void)
75158 {
75159 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
75160 +#ifdef CONFIG_GRKERNSEC_PROC_USER
75161 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
75162 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
75163 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
75164 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
75165 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
75166 +#endif
75167 +#else
75168 proc_create("ioports", 0, NULL, &proc_ioports_operations);
75169 proc_create("iomem", 0, NULL, &proc_iomem_operations);
75170 +#endif
75171 return 0;
75172 }
75173 __initcall(ioresources_init);
75174 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
75175 index a56f629..1fc4989 100644
75176 --- a/kernel/rtmutex-tester.c
75177 +++ b/kernel/rtmutex-tester.c
75178 @@ -21,7 +21,7 @@
75179 #define MAX_RT_TEST_MUTEXES 8
75180
75181 static spinlock_t rttest_lock;
75182 -static atomic_t rttest_event;
75183 +static atomic_unchecked_t rttest_event;
75184
75185 struct test_thread_data {
75186 int opcode;
75187 @@ -64,7 +64,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75188
75189 case RTTEST_LOCKCONT:
75190 td->mutexes[td->opdata] = 1;
75191 - td->event = atomic_add_return(1, &rttest_event);
75192 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75193 return 0;
75194
75195 case RTTEST_RESET:
75196 @@ -82,7 +82,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75197 return 0;
75198
75199 case RTTEST_RESETEVENT:
75200 - atomic_set(&rttest_event, 0);
75201 + atomic_set_unchecked(&rttest_event, 0);
75202 return 0;
75203
75204 default:
75205 @@ -99,9 +99,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75206 return ret;
75207
75208 td->mutexes[id] = 1;
75209 - td->event = atomic_add_return(1, &rttest_event);
75210 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75211 rt_mutex_lock(&mutexes[id]);
75212 - td->event = atomic_add_return(1, &rttest_event);
75213 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75214 td->mutexes[id] = 4;
75215 return 0;
75216
75217 @@ -112,9 +112,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75218 return ret;
75219
75220 td->mutexes[id] = 1;
75221 - td->event = atomic_add_return(1, &rttest_event);
75222 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75223 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
75224 - td->event = atomic_add_return(1, &rttest_event);
75225 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75226 td->mutexes[id] = ret ? 0 : 4;
75227 return ret ? -EINTR : 0;
75228
75229 @@ -123,9 +123,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75230 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
75231 return ret;
75232
75233 - td->event = atomic_add_return(1, &rttest_event);
75234 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75235 rt_mutex_unlock(&mutexes[id]);
75236 - td->event = atomic_add_return(1, &rttest_event);
75237 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75238 td->mutexes[id] = 0;
75239 return 0;
75240
75241 @@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
75242 break;
75243
75244 td->mutexes[dat] = 2;
75245 - td->event = atomic_add_return(1, &rttest_event);
75246 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75247 break;
75248
75249 case RTTEST_LOCKBKL:
75250 @@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
75251 return;
75252
75253 td->mutexes[dat] = 3;
75254 - td->event = atomic_add_return(1, &rttest_event);
75255 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75256 break;
75257
75258 case RTTEST_LOCKNOWAIT:
75259 @@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
75260 return;
75261
75262 td->mutexes[dat] = 1;
75263 - td->event = atomic_add_return(1, &rttest_event);
75264 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75265 return;
75266
75267 case RTTEST_LOCKBKL:
75268 diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
75269 index 29bd4ba..8c5de90 100644
75270 --- a/kernel/rtmutex.c
75271 +++ b/kernel/rtmutex.c
75272 @@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
75273 */
75274 spin_lock_irqsave(&pendowner->pi_lock, flags);
75275
75276 - WARN_ON(!pendowner->pi_blocked_on);
75277 + BUG_ON(!pendowner->pi_blocked_on);
75278 WARN_ON(pendowner->pi_blocked_on != waiter);
75279 WARN_ON(pendowner->pi_blocked_on->lock != lock);
75280
75281 diff --git a/kernel/sched.c b/kernel/sched.c
75282 index 0591df8..e3af3a4 100644
75283 --- a/kernel/sched.c
75284 +++ b/kernel/sched.c
75285 @@ -5043,7 +5043,7 @@ out:
75286 * In CONFIG_NO_HZ case, the idle load balance owner will do the
75287 * rebalancing for all the cpus for whom scheduler ticks are stopped.
75288 */
75289 -static void run_rebalance_domains(struct softirq_action *h)
75290 +static void run_rebalance_domains(void)
75291 {
75292 int this_cpu = smp_processor_id();
75293 struct rq *this_rq = cpu_rq(this_cpu);
75294 @@ -5690,6 +5690,19 @@ pick_next_task(struct rq *rq)
75295 }
75296 }
75297
75298 +#ifdef CONFIG_GRKERNSEC_SETXID
75299 +extern void gr_delayed_cred_worker(void);
75300 +static inline void gr_cred_schedule(void)
75301 +{
75302 + if (unlikely(current->delayed_cred))
75303 + gr_delayed_cred_worker();
75304 +}
75305 +#else
75306 +static inline void gr_cred_schedule(void)
75307 +{
75308 +}
75309 +#endif
75310 +
75311 /*
75312 * schedule() is the main scheduler function.
75313 */
75314 @@ -5700,6 +5713,8 @@ asmlinkage void __sched schedule(void)
75315 struct rq *rq;
75316 int cpu;
75317
75318 + pax_track_stack();
75319 +
75320 need_resched:
75321 preempt_disable();
75322 cpu = smp_processor_id();
75323 @@ -5713,6 +5728,8 @@ need_resched_nonpreemptible:
75324
75325 schedule_debug(prev);
75326
75327 + gr_cred_schedule();
75328 +
75329 if (sched_feat(HRTICK))
75330 hrtick_clear(rq);
75331
75332 @@ -5770,7 +5787,7 @@ EXPORT_SYMBOL(schedule);
75333 * Look out! "owner" is an entirely speculative pointer
75334 * access and not reliable.
75335 */
75336 -int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
75337 +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
75338 {
75339 unsigned int cpu;
75340 struct rq *rq;
75341 @@ -5784,10 +5801,10 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
75342 * DEBUG_PAGEALLOC could have unmapped it if
75343 * the mutex owner just released it and exited.
75344 */
75345 - if (probe_kernel_address(&owner->cpu, cpu))
75346 + if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
75347 return 0;
75348 #else
75349 - cpu = owner->cpu;
75350 + cpu = task_thread_info(owner)->cpu;
75351 #endif
75352
75353 /*
75354 @@ -5816,7 +5833,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
75355 /*
75356 * Is that owner really running on that cpu?
75357 */
75358 - if (task_thread_info(rq->curr) != owner || need_resched())
75359 + if (rq->curr != owner || need_resched())
75360 return 0;
75361
75362 cpu_relax();
75363 @@ -6359,6 +6376,8 @@ int can_nice(const struct task_struct *p, const int nice)
75364 /* convert nice value [19,-20] to rlimit style value [1,40] */
75365 int nice_rlim = 20 - nice;
75366
75367 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
75368 +
75369 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
75370 capable(CAP_SYS_NICE));
75371 }
75372 @@ -6392,7 +6411,8 @@ SYSCALL_DEFINE1(nice, int, increment)
75373 if (nice > 19)
75374 nice = 19;
75375
75376 - if (increment < 0 && !can_nice(current, nice))
75377 + if (increment < 0 && (!can_nice(current, nice) ||
75378 + gr_handle_chroot_nice()))
75379 return -EPERM;
75380
75381 retval = security_task_setnice(current, nice);
75382 @@ -8774,7 +8794,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
75383 long power;
75384 int weight;
75385
75386 - WARN_ON(!sd || !sd->groups);
75387 + BUG_ON(!sd || !sd->groups);
75388
75389 if (cpu != group_first_cpu(sd->groups))
75390 return;
75391 diff --git a/kernel/signal.c b/kernel/signal.c
75392 index 2494827..cda80a0 100644
75393 --- a/kernel/signal.c
75394 +++ b/kernel/signal.c
75395 @@ -41,12 +41,12 @@
75396
75397 static struct kmem_cache *sigqueue_cachep;
75398
75399 -static void __user *sig_handler(struct task_struct *t, int sig)
75400 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
75401 {
75402 return t->sighand->action[sig - 1].sa.sa_handler;
75403 }
75404
75405 -static int sig_handler_ignored(void __user *handler, int sig)
75406 +static int sig_handler_ignored(__sighandler_t handler, int sig)
75407 {
75408 /* Is it explicitly or implicitly ignored? */
75409 return handler == SIG_IGN ||
75410 @@ -56,7 +56,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
75411 static int sig_task_ignored(struct task_struct *t, int sig,
75412 int from_ancestor_ns)
75413 {
75414 - void __user *handler;
75415 + __sighandler_t handler;
75416
75417 handler = sig_handler(t, sig);
75418
75419 @@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
75420 */
75421 user = get_uid(__task_cred(t)->user);
75422 atomic_inc(&user->sigpending);
75423 +
75424 + if (!override_rlimit)
75425 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
75426 if (override_rlimit ||
75427 atomic_read(&user->sigpending) <=
75428 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
75429 @@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
75430
75431 int unhandled_signal(struct task_struct *tsk, int sig)
75432 {
75433 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
75434 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
75435 if (is_global_init(tsk))
75436 return 1;
75437 if (handler != SIG_IGN && handler != SIG_DFL)
75438 @@ -627,6 +630,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
75439 }
75440 }
75441
75442 + /* allow glibc communication via tgkill to other threads in our
75443 + thread group */
75444 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
75445 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
75446 + && gr_handle_signal(t, sig))
75447 + return -EPERM;
75448 +
75449 return security_task_kill(t, info, sig, 0);
75450 }
75451
75452 @@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
75453 return send_signal(sig, info, p, 1);
75454 }
75455
75456 -static int
75457 +int
75458 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
75459 {
75460 return send_signal(sig, info, t, 0);
75461 @@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
75462 unsigned long int flags;
75463 int ret, blocked, ignored;
75464 struct k_sigaction *action;
75465 + int is_unhandled = 0;
75466
75467 spin_lock_irqsave(&t->sighand->siglock, flags);
75468 action = &t->sighand->action[sig-1];
75469 @@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
75470 }
75471 if (action->sa.sa_handler == SIG_DFL)
75472 t->signal->flags &= ~SIGNAL_UNKILLABLE;
75473 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
75474 + is_unhandled = 1;
75475 ret = specific_send_sig_info(sig, info, t);
75476 spin_unlock_irqrestore(&t->sighand->siglock, flags);
75477
75478 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
75479 + normal operation */
75480 + if (is_unhandled) {
75481 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
75482 + gr_handle_crash(t, sig);
75483 + }
75484 +
75485 return ret;
75486 }
75487
75488 @@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
75489 {
75490 int ret = check_kill_permission(sig, info, p);
75491
75492 - if (!ret && sig)
75493 + if (!ret && sig) {
75494 ret = do_send_sig_info(sig, info, p, true);
75495 + if (!ret)
75496 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
75497 + }
75498
75499 return ret;
75500 }
75501 @@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
75502 {
75503 siginfo_t info;
75504
75505 + pax_track_stack();
75506 +
75507 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
75508
75509 memset(&info, 0, sizeof info);
75510 @@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
75511 int error = -ESRCH;
75512
75513 rcu_read_lock();
75514 - p = find_task_by_vpid(pid);
75515 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
75516 + /* allow glibc communication via tgkill to other threads in our
75517 + thread group */
75518 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
75519 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
75520 + p = find_task_by_vpid_unrestricted(pid);
75521 + else
75522 +#endif
75523 + p = find_task_by_vpid(pid);
75524 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
75525 error = check_kill_permission(sig, info, p);
75526 /*
75527 diff --git a/kernel/smp.c b/kernel/smp.c
75528 index aa9cff3..631a0de 100644
75529 --- a/kernel/smp.c
75530 +++ b/kernel/smp.c
75531 @@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void *), void *info, int wait)
75532 }
75533 EXPORT_SYMBOL(smp_call_function);
75534
75535 -void ipi_call_lock(void)
75536 +void ipi_call_lock(void) __acquires(call_function.lock)
75537 {
75538 spin_lock(&call_function.lock);
75539 }
75540
75541 -void ipi_call_unlock(void)
75542 +void ipi_call_unlock(void) __releases(call_function.lock)
75543 {
75544 spin_unlock(&call_function.lock);
75545 }
75546
75547 -void ipi_call_lock_irq(void)
75548 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
75549 {
75550 spin_lock_irq(&call_function.lock);
75551 }
75552
75553 -void ipi_call_unlock_irq(void)
75554 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
75555 {
75556 spin_unlock_irq(&call_function.lock);
75557 }
75558 diff --git a/kernel/softirq.c b/kernel/softirq.c
75559 index 04a0252..580c512 100644
75560 --- a/kernel/softirq.c
75561 +++ b/kernel/softirq.c
75562 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
75563
75564 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
75565
75566 -char *softirq_to_name[NR_SOFTIRQS] = {
75567 +const char * const softirq_to_name[NR_SOFTIRQS] = {
75568 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
75569 "TASKLET", "SCHED", "HRTIMER", "RCU"
75570 };
75571 @@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
75572
75573 asmlinkage void __do_softirq(void)
75574 {
75575 - struct softirq_action *h;
75576 + const struct softirq_action *h;
75577 __u32 pending;
75578 int max_restart = MAX_SOFTIRQ_RESTART;
75579 int cpu;
75580 @@ -233,7 +233,7 @@ restart:
75581 kstat_incr_softirqs_this_cpu(h - softirq_vec);
75582
75583 trace_softirq_entry(h, softirq_vec);
75584 - h->action(h);
75585 + h->action();
75586 trace_softirq_exit(h, softirq_vec);
75587 if (unlikely(prev_count != preempt_count())) {
75588 printk(KERN_ERR "huh, entered softirq %td %s %p"
75589 @@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
75590 local_irq_restore(flags);
75591 }
75592
75593 -void open_softirq(int nr, void (*action)(struct softirq_action *))
75594 +void open_softirq(int nr, void (*action)(void))
75595 {
75596 - softirq_vec[nr].action = action;
75597 + pax_open_kernel();
75598 + *(void **)&softirq_vec[nr].action = action;
75599 + pax_close_kernel();
75600 }
75601
75602 /*
75603 @@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
75604
75605 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
75606
75607 -static void tasklet_action(struct softirq_action *a)
75608 +static void tasklet_action(void)
75609 {
75610 struct tasklet_struct *list;
75611
75612 @@ -454,7 +456,7 @@ static void tasklet_action(struct softirq_action *a)
75613 }
75614 }
75615
75616 -static void tasklet_hi_action(struct softirq_action *a)
75617 +static void tasklet_hi_action(void)
75618 {
75619 struct tasklet_struct *list;
75620
75621 diff --git a/kernel/sys.c b/kernel/sys.c
75622 index e9512b1..f07185f 100644
75623 --- a/kernel/sys.c
75624 +++ b/kernel/sys.c
75625 @@ -133,6 +133,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
75626 error = -EACCES;
75627 goto out;
75628 }
75629 +
75630 + if (gr_handle_chroot_setpriority(p, niceval)) {
75631 + error = -EACCES;
75632 + goto out;
75633 + }
75634 +
75635 no_nice = security_task_setnice(p, niceval);
75636 if (no_nice) {
75637 error = no_nice;
75638 @@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
75639 !(user = find_user(who)))
75640 goto out_unlock; /* No processes for this user */
75641
75642 - do_each_thread(g, p)
75643 + do_each_thread(g, p) {
75644 if (__task_cred(p)->uid == who)
75645 error = set_one_prio(p, niceval, error);
75646 - while_each_thread(g, p);
75647 + } while_each_thread(g, p);
75648 if (who != cred->uid)
75649 free_uid(user); /* For find_user() */
75650 break;
75651 @@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
75652 !(user = find_user(who)))
75653 goto out_unlock; /* No processes for this user */
75654
75655 - do_each_thread(g, p)
75656 + do_each_thread(g, p) {
75657 if (__task_cred(p)->uid == who) {
75658 niceval = 20 - task_nice(p);
75659 if (niceval > retval)
75660 retval = niceval;
75661 }
75662 - while_each_thread(g, p);
75663 + } while_each_thread(g, p);
75664 if (who != cred->uid)
75665 free_uid(user); /* for find_user() */
75666 break;
75667 @@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
75668 goto error;
75669 }
75670
75671 + if (gr_check_group_change(new->gid, new->egid, -1))
75672 + goto error;
75673 +
75674 if (rgid != (gid_t) -1 ||
75675 (egid != (gid_t) -1 && egid != old->gid))
75676 new->sgid = new->egid;
75677 @@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
75678 goto error;
75679
75680 retval = -EPERM;
75681 +
75682 + if (gr_check_group_change(gid, gid, gid))
75683 + goto error;
75684 +
75685 if (capable(CAP_SETGID))
75686 new->gid = new->egid = new->sgid = new->fsgid = gid;
75687 else if (gid == old->gid || gid == old->sgid)
75688 @@ -559,7 +572,7 @@ error:
75689 /*
75690 * change the user struct in a credentials set to match the new UID
75691 */
75692 -static int set_user(struct cred *new)
75693 +int set_user(struct cred *new)
75694 {
75695 struct user_struct *new_user;
75696
75697 @@ -567,12 +580,19 @@ static int set_user(struct cred *new)
75698 if (!new_user)
75699 return -EAGAIN;
75700
75701 + /*
75702 + * We don't fail in case of NPROC limit excess here because too many
75703 + * poorly written programs don't check set*uid() return code, assuming
75704 + * it never fails if called by root. We may still enforce NPROC limit
75705 + * for programs doing set*uid()+execve() by harmlessly deferring the
75706 + * failure to the execve() stage.
75707 + */
75708 if (atomic_read(&new_user->processes) >=
75709 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
75710 - new_user != INIT_USER) {
75711 - free_uid(new_user);
75712 - return -EAGAIN;
75713 - }
75714 + new_user != INIT_USER)
75715 + current->flags |= PF_NPROC_EXCEEDED;
75716 + else
75717 + current->flags &= ~PF_NPROC_EXCEEDED;
75718
75719 free_uid(new->user);
75720 new->user = new_user;
75721 @@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
75722 goto error;
75723 }
75724
75725 + if (gr_check_user_change(new->uid, new->euid, -1))
75726 + goto error;
75727 +
75728 if (new->uid != old->uid) {
75729 retval = set_user(new);
75730 if (retval < 0)
75731 @@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
75732 goto error;
75733
75734 retval = -EPERM;
75735 +
75736 + if (gr_check_crash_uid(uid))
75737 + goto error;
75738 + if (gr_check_user_change(uid, uid, uid))
75739 + goto error;
75740 +
75741 if (capable(CAP_SETUID)) {
75742 new->suid = new->uid = uid;
75743 if (uid != old->uid) {
75744 @@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
75745 goto error;
75746 }
75747
75748 + if (gr_check_user_change(ruid, euid, -1))
75749 + goto error;
75750 +
75751 if (ruid != (uid_t) -1) {
75752 new->uid = ruid;
75753 if (ruid != old->uid) {
75754 @@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
75755 goto error;
75756 }
75757
75758 + if (gr_check_group_change(rgid, egid, -1))
75759 + goto error;
75760 +
75761 if (rgid != (gid_t) -1)
75762 new->gid = rgid;
75763 if (egid != (gid_t) -1)
75764 @@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
75765 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
75766 goto error;
75767
75768 + if (gr_check_user_change(-1, -1, uid))
75769 + goto error;
75770 +
75771 if (uid == old->uid || uid == old->euid ||
75772 uid == old->suid || uid == old->fsuid ||
75773 capable(CAP_SETUID)) {
75774 @@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
75775 if (gid == old->gid || gid == old->egid ||
75776 gid == old->sgid || gid == old->fsgid ||
75777 capable(CAP_SETGID)) {
75778 + if (gr_check_group_change(-1, -1, gid))
75779 + goto error;
75780 +
75781 if (gid != old_fsgid) {
75782 new->fsgid = gid;
75783 goto change_okay;
75784 @@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
75785 error = get_dumpable(me->mm);
75786 break;
75787 case PR_SET_DUMPABLE:
75788 - if (arg2 < 0 || arg2 > 1) {
75789 + if (arg2 > 1) {
75790 error = -EINVAL;
75791 break;
75792 }
75793 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
75794 index b8bd058..ab6a76be 100644
75795 --- a/kernel/sysctl.c
75796 +++ b/kernel/sysctl.c
75797 @@ -63,6 +63,13 @@
75798 static int deprecated_sysctl_warning(struct __sysctl_args *args);
75799
75800 #if defined(CONFIG_SYSCTL)
75801 +#include <linux/grsecurity.h>
75802 +#include <linux/grinternal.h>
75803 +
75804 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
75805 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
75806 + const int op);
75807 +extern int gr_handle_chroot_sysctl(const int op);
75808
75809 /* External variables not in a header file. */
75810 extern int C_A_D;
75811 @@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_table *table, int write,
75812 static int proc_taint(struct ctl_table *table, int write,
75813 void __user *buffer, size_t *lenp, loff_t *ppos);
75814 #endif
75815 +extern ctl_table grsecurity_table[];
75816
75817 static struct ctl_table root_table[];
75818 static struct ctl_table_root sysctl_table_root;
75819 @@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
75820 int sysctl_legacy_va_layout;
75821 #endif
75822
75823 +#ifdef CONFIG_PAX_SOFTMODE
75824 +static ctl_table pax_table[] = {
75825 + {
75826 + .ctl_name = CTL_UNNUMBERED,
75827 + .procname = "softmode",
75828 + .data = &pax_softmode,
75829 + .maxlen = sizeof(unsigned int),
75830 + .mode = 0600,
75831 + .proc_handler = &proc_dointvec,
75832 + },
75833 +
75834 + { .ctl_name = 0 }
75835 +};
75836 +#endif
75837 +
75838 extern int prove_locking;
75839 extern int lock_stat;
75840
75841 @@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
75842 #endif
75843
75844 static struct ctl_table kern_table[] = {
75845 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
75846 + {
75847 + .ctl_name = CTL_UNNUMBERED,
75848 + .procname = "grsecurity",
75849 + .mode = 0500,
75850 + .child = grsecurity_table,
75851 + },
75852 +#endif
75853 +
75854 +#ifdef CONFIG_PAX_SOFTMODE
75855 + {
75856 + .ctl_name = CTL_UNNUMBERED,
75857 + .procname = "pax",
75858 + .mode = 0500,
75859 + .child = pax_table,
75860 + },
75861 +#endif
75862 +
75863 {
75864 .ctl_name = CTL_UNNUMBERED,
75865 .procname = "sched_child_runs_first",
75866 @@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
75867 .data = &modprobe_path,
75868 .maxlen = KMOD_PATH_LEN,
75869 .mode = 0644,
75870 - .proc_handler = &proc_dostring,
75871 - .strategy = &sysctl_string,
75872 + .proc_handler = &proc_dostring_modpriv,
75873 + .strategy = &sysctl_string_modpriv,
75874 },
75875 {
75876 .ctl_name = CTL_UNNUMBERED,
75877 @@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
75878 .mode = 0644,
75879 .proc_handler = &proc_dointvec
75880 },
75881 + {
75882 + .procname = "heap_stack_gap",
75883 + .data = &sysctl_heap_stack_gap,
75884 + .maxlen = sizeof(sysctl_heap_stack_gap),
75885 + .mode = 0644,
75886 + .proc_handler = proc_doulongvec_minmax,
75887 + },
75888 #else
75889 {
75890 .ctl_name = CTL_UNNUMBERED,
75891 @@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
75892 return 0;
75893 }
75894
75895 +static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
75896 +
75897 static int parse_table(int __user *name, int nlen,
75898 void __user *oldval, size_t __user *oldlenp,
75899 void __user *newval, size_t newlen,
75900 @@ -1821,7 +1871,7 @@ repeat:
75901 if (n == table->ctl_name) {
75902 int error;
75903 if (table->child) {
75904 - if (sysctl_perm(root, table, MAY_EXEC))
75905 + if (sysctl_perm_nochk(root, table, MAY_EXEC))
75906 return -EPERM;
75907 name++;
75908 nlen--;
75909 @@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
75910 int error;
75911 int mode;
75912
75913 + if (table->parent != NULL && table->parent->procname != NULL &&
75914 + table->procname != NULL &&
75915 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
75916 + return -EACCES;
75917 + if (gr_handle_chroot_sysctl(op))
75918 + return -EACCES;
75919 + error = gr_handle_sysctl(table, op);
75920 + if (error)
75921 + return error;
75922 +
75923 + error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
75924 + if (error)
75925 + return error;
75926 +
75927 + if (root->permissions)
75928 + mode = root->permissions(root, current->nsproxy, table);
75929 + else
75930 + mode = table->mode;
75931 +
75932 + return test_perm(mode, op);
75933 +}
75934 +
75935 +int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
75936 +{
75937 + int error;
75938 + int mode;
75939 +
75940 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
75941 if (error)
75942 return error;
75943 @@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *table, int write,
75944 buffer, lenp, ppos);
75945 }
75946
75947 +int proc_dostring_modpriv(struct ctl_table *table, int write,
75948 + void __user *buffer, size_t *lenp, loff_t *ppos)
75949 +{
75950 + if (write && !capable(CAP_SYS_MODULE))
75951 + return -EPERM;
75952 +
75953 + return _proc_do_string(table->data, table->maxlen, write,
75954 + buffer, lenp, ppos);
75955 +}
75956 +
75957
75958 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
75959 int *valp,
75960 @@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
75961 vleft = table->maxlen / sizeof(unsigned long);
75962 left = *lenp;
75963
75964 - for (; left && vleft--; i++, min++, max++, first=0) {
75965 + for (; left && vleft--; i++, first=0) {
75966 if (write) {
75967 while (left) {
75968 char c;
75969 @@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *table, int write,
75970 return -ENOSYS;
75971 }
75972
75973 +int proc_dostring_modpriv(struct ctl_table *table, int write,
75974 + void __user *buffer, size_t *lenp, loff_t *ppos)
75975 +{
75976 + return -ENOSYS;
75977 +}
75978 +
75979 int proc_dointvec(struct ctl_table *table, int write,
75980 void __user *buffer, size_t *lenp, loff_t *ppos)
75981 {
75982 @@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *table,
75983 return 1;
75984 }
75985
75986 +int sysctl_string_modpriv(struct ctl_table *table,
75987 + void __user *oldval, size_t __user *oldlenp,
75988 + void __user *newval, size_t newlen)
75989 +{
75990 + if (newval && newlen && !capable(CAP_SYS_MODULE))
75991 + return -EPERM;
75992 +
75993 + return sysctl_string(table, oldval, oldlenp, newval, newlen);
75994 +}
75995 +
75996 /*
75997 * This function makes sure that all of the integers in the vector
75998 * are between the minimum and maximum values given in the arrays
75999 @@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *table,
76000 return -ENOSYS;
76001 }
76002
76003 +int sysctl_string_modpriv(struct ctl_table *table,
76004 + void __user *oldval, size_t __user *oldlenp,
76005 + void __user *newval, size_t newlen)
76006 +{
76007 + return -ENOSYS;
76008 +}
76009 +
76010 int sysctl_intvec(struct ctl_table *table,
76011 void __user *oldval, size_t __user *oldlenp,
76012 void __user *newval, size_t newlen)
76013 @@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
76014 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
76015 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
76016 EXPORT_SYMBOL(proc_dostring);
76017 +EXPORT_SYMBOL(proc_dostring_modpriv);
76018 EXPORT_SYMBOL(proc_doulongvec_minmax);
76019 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
76020 EXPORT_SYMBOL(register_sysctl_table);
76021 @@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
76022 EXPORT_SYMBOL(sysctl_jiffies);
76023 EXPORT_SYMBOL(sysctl_ms_jiffies);
76024 EXPORT_SYMBOL(sysctl_string);
76025 +EXPORT_SYMBOL(sysctl_string_modpriv);
76026 EXPORT_SYMBOL(sysctl_data);
76027 EXPORT_SYMBOL(unregister_sysctl_table);
76028 diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
76029 index 469193c..ea3ecb2 100644
76030 --- a/kernel/sysctl_check.c
76031 +++ b/kernel/sysctl_check.c
76032 @@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
76033 } else {
76034 if ((table->strategy == sysctl_data) ||
76035 (table->strategy == sysctl_string) ||
76036 + (table->strategy == sysctl_string_modpriv) ||
76037 (table->strategy == sysctl_intvec) ||
76038 (table->strategy == sysctl_jiffies) ||
76039 (table->strategy == sysctl_ms_jiffies) ||
76040 (table->proc_handler == proc_dostring) ||
76041 + (table->proc_handler == proc_dostring_modpriv) ||
76042 (table->proc_handler == proc_dointvec) ||
76043 (table->proc_handler == proc_dointvec_minmax) ||
76044 (table->proc_handler == proc_dointvec_jiffies) ||
76045 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
76046 index a4ef542..798bcd7 100644
76047 --- a/kernel/taskstats.c
76048 +++ b/kernel/taskstats.c
76049 @@ -26,9 +26,12 @@
76050 #include <linux/cgroup.h>
76051 #include <linux/fs.h>
76052 #include <linux/file.h>
76053 +#include <linux/grsecurity.h>
76054 #include <net/genetlink.h>
76055 #include <asm/atomic.h>
76056
76057 +extern int gr_is_taskstats_denied(int pid);
76058 +
76059 /*
76060 * Maximum length of a cpumask that can be specified in
76061 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
76062 @@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
76063 size_t size;
76064 cpumask_var_t mask;
76065
76066 + if (gr_is_taskstats_denied(current->pid))
76067 + return -EACCES;
76068 +
76069 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
76070 return -ENOMEM;
76071
76072 diff --git a/kernel/time.c b/kernel/time.c
76073 index 33df60e..ca768bd 100644
76074 --- a/kernel/time.c
76075 +++ b/kernel/time.c
76076 @@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
76077 return error;
76078
76079 if (tz) {
76080 + /* we log in do_settimeofday called below, so don't log twice
76081 + */
76082 + if (!tv)
76083 + gr_log_timechange();
76084 +
76085 /* SMP safe, global irq locking makes it work. */
76086 sys_tz = *tz;
76087 update_vsyscall_tz();
76088 @@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
76089 * Avoid unnecessary multiplications/divisions in the
76090 * two most common HZ cases:
76091 */
76092 -unsigned int inline jiffies_to_msecs(const unsigned long j)
76093 +inline unsigned int jiffies_to_msecs(const unsigned long j)
76094 {
76095 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
76096 return (MSEC_PER_SEC / HZ) * j;
76097 @@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(const unsigned long j)
76098 }
76099 EXPORT_SYMBOL(jiffies_to_msecs);
76100
76101 -unsigned int inline jiffies_to_usecs(const unsigned long j)
76102 +inline unsigned int jiffies_to_usecs(const unsigned long j)
76103 {
76104 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
76105 return (USEC_PER_SEC / HZ) * j;
76106 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
76107 index 57b953f..06f149f 100644
76108 --- a/kernel/time/tick-broadcast.c
76109 +++ b/kernel/time/tick-broadcast.c
76110 @@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
76111 * then clear the broadcast bit.
76112 */
76113 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
76114 - int cpu = smp_processor_id();
76115 + cpu = smp_processor_id();
76116
76117 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
76118 tick_broadcast_clear_oneshot(cpu);
76119 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
76120 index 4a71cff..ffb5548 100644
76121 --- a/kernel/time/timekeeping.c
76122 +++ b/kernel/time/timekeeping.c
76123 @@ -14,6 +14,7 @@
76124 #include <linux/init.h>
76125 #include <linux/mm.h>
76126 #include <linux/sched.h>
76127 +#include <linux/grsecurity.h>
76128 #include <linux/sysdev.h>
76129 #include <linux/clocksource.h>
76130 #include <linux/jiffies.h>
76131 @@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
76132 */
76133 struct timespec ts = xtime;
76134 timespec_add_ns(&ts, nsec);
76135 - ACCESS_ONCE(xtime_cache) = ts;
76136 + ACCESS_ONCE_RW(xtime_cache) = ts;
76137 }
76138
76139 /* must hold xtime_lock */
76140 @@ -337,6 +338,8 @@ int do_settimeofday(struct timespec *tv)
76141 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
76142 return -EINVAL;
76143
76144 + gr_log_timechange();
76145 +
76146 write_seqlock_irqsave(&xtime_lock, flags);
76147
76148 timekeeping_forward_now();
76149 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
76150 index 54c0dda..e9095d9 100644
76151 --- a/kernel/time/timer_list.c
76152 +++ b/kernel/time/timer_list.c
76153 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
76154
76155 static void print_name_offset(struct seq_file *m, void *sym)
76156 {
76157 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76158 + SEQ_printf(m, "<%p>", NULL);
76159 +#else
76160 char symname[KSYM_NAME_LEN];
76161
76162 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
76163 SEQ_printf(m, "<%p>", sym);
76164 else
76165 SEQ_printf(m, "%s", symname);
76166 +#endif
76167 }
76168
76169 static void
76170 @@ -112,7 +116,11 @@ next_one:
76171 static void
76172 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
76173 {
76174 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76175 + SEQ_printf(m, " .base: %p\n", NULL);
76176 +#else
76177 SEQ_printf(m, " .base: %p\n", base);
76178 +#endif
76179 SEQ_printf(m, " .index: %d\n",
76180 base->index);
76181 SEQ_printf(m, " .resolution: %Lu nsecs\n",
76182 @@ -289,7 +297,11 @@ static int __init init_timer_list_procfs(void)
76183 {
76184 struct proc_dir_entry *pe;
76185
76186 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
76187 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
76188 +#else
76189 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
76190 +#endif
76191 if (!pe)
76192 return -ENOMEM;
76193 return 0;
76194 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
76195 index ee5681f..634089b 100644
76196 --- a/kernel/time/timer_stats.c
76197 +++ b/kernel/time/timer_stats.c
76198 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
76199 static unsigned long nr_entries;
76200 static struct entry entries[MAX_ENTRIES];
76201
76202 -static atomic_t overflow_count;
76203 +static atomic_unchecked_t overflow_count;
76204
76205 /*
76206 * The entries are in a hash-table, for fast lookup:
76207 @@ -140,7 +140,7 @@ static void reset_entries(void)
76208 nr_entries = 0;
76209 memset(entries, 0, sizeof(entries));
76210 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
76211 - atomic_set(&overflow_count, 0);
76212 + atomic_set_unchecked(&overflow_count, 0);
76213 }
76214
76215 static struct entry *alloc_entry(void)
76216 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
76217 if (likely(entry))
76218 entry->count++;
76219 else
76220 - atomic_inc(&overflow_count);
76221 + atomic_inc_unchecked(&overflow_count);
76222
76223 out_unlock:
76224 spin_unlock_irqrestore(lock, flags);
76225 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
76226
76227 static void print_name_offset(struct seq_file *m, unsigned long addr)
76228 {
76229 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76230 + seq_printf(m, "<%p>", NULL);
76231 +#else
76232 char symname[KSYM_NAME_LEN];
76233
76234 if (lookup_symbol_name(addr, symname) < 0)
76235 seq_printf(m, "<%p>", (void *)addr);
76236 else
76237 seq_printf(m, "%s", symname);
76238 +#endif
76239 }
76240
76241 static int tstats_show(struct seq_file *m, void *v)
76242 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
76243
76244 seq_puts(m, "Timer Stats Version: v0.2\n");
76245 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
76246 - if (atomic_read(&overflow_count))
76247 + if (atomic_read_unchecked(&overflow_count))
76248 seq_printf(m, "Overflow: %d entries\n",
76249 - atomic_read(&overflow_count));
76250 + atomic_read_unchecked(&overflow_count));
76251
76252 for (i = 0; i < nr_entries; i++) {
76253 entry = entries + i;
76254 @@ -415,7 +419,11 @@ static int __init init_tstats_procfs(void)
76255 {
76256 struct proc_dir_entry *pe;
76257
76258 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
76259 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
76260 +#else
76261 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
76262 +#endif
76263 if (!pe)
76264 return -ENOMEM;
76265 return 0;
76266 diff --git a/kernel/timer.c b/kernel/timer.c
76267 index cb3c1f1..8bf5526 100644
76268 --- a/kernel/timer.c
76269 +++ b/kernel/timer.c
76270 @@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
76271 /*
76272 * This function runs timers and the timer-tq in bottom half context.
76273 */
76274 -static void run_timer_softirq(struct softirq_action *h)
76275 +static void run_timer_softirq(void)
76276 {
76277 struct tvec_base *base = __get_cpu_var(tvec_bases);
76278
76279 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
76280 index d9d6206..f19467e 100644
76281 --- a/kernel/trace/blktrace.c
76282 +++ b/kernel/trace/blktrace.c
76283 @@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
76284 struct blk_trace *bt = filp->private_data;
76285 char buf[16];
76286
76287 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
76288 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
76289
76290 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
76291 }
76292 @@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
76293 return 1;
76294
76295 bt = buf->chan->private_data;
76296 - atomic_inc(&bt->dropped);
76297 + atomic_inc_unchecked(&bt->dropped);
76298 return 0;
76299 }
76300
76301 @@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
76302
76303 bt->dir = dir;
76304 bt->dev = dev;
76305 - atomic_set(&bt->dropped, 0);
76306 + atomic_set_unchecked(&bt->dropped, 0);
76307
76308 ret = -EIO;
76309 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
76310 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
76311 index 4872937..c794d40 100644
76312 --- a/kernel/trace/ftrace.c
76313 +++ b/kernel/trace/ftrace.c
76314 @@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
76315
76316 ip = rec->ip;
76317
76318 + ret = ftrace_arch_code_modify_prepare();
76319 + FTRACE_WARN_ON(ret);
76320 + if (ret)
76321 + return 0;
76322 +
76323 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
76324 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
76325 if (ret) {
76326 ftrace_bug(ret, ip);
76327 rec->flags |= FTRACE_FL_FAILED;
76328 - return 0;
76329 }
76330 - return 1;
76331 + return ret ? 0 : 1;
76332 }
76333
76334 /*
76335 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
76336 index e749a05..19c6e94 100644
76337 --- a/kernel/trace/ring_buffer.c
76338 +++ b/kernel/trace/ring_buffer.c
76339 @@ -606,7 +606,7 @@ static struct list_head *rb_list_head(struct list_head *list)
76340 * the reader page). But if the next page is a header page,
76341 * its flags will be non zero.
76342 */
76343 -static int inline
76344 +static inline int
76345 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
76346 struct buffer_page *page, struct list_head *list)
76347 {
76348 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
76349 index a2a2d1f..7f32b09 100644
76350 --- a/kernel/trace/trace.c
76351 +++ b/kernel/trace/trace.c
76352 @@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
76353 size_t rem;
76354 unsigned int i;
76355
76356 + pax_track_stack();
76357 +
76358 /* copy the tracer to avoid using a global lock all around */
76359 mutex_lock(&trace_types_lock);
76360 if (unlikely(old_tracer != current_trace && current_trace)) {
76361 @@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
76362 int entries, size, i;
76363 size_t ret;
76364
76365 + pax_track_stack();
76366 +
76367 if (*ppos & (PAGE_SIZE - 1)) {
76368 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
76369 return -EINVAL;
76370 @@ -3816,10 +3820,9 @@ static const struct file_operations tracing_dyn_info_fops = {
76371 };
76372 #endif
76373
76374 -static struct dentry *d_tracer;
76375 -
76376 struct dentry *tracing_init_dentry(void)
76377 {
76378 + static struct dentry *d_tracer;
76379 static int once;
76380
76381 if (d_tracer)
76382 @@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
76383 return d_tracer;
76384 }
76385
76386 -static struct dentry *d_percpu;
76387 -
76388 struct dentry *tracing_dentry_percpu(void)
76389 {
76390 + static struct dentry *d_percpu;
76391 static int once;
76392 struct dentry *d_tracer;
76393
76394 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
76395 index d128f65..f37b4af 100644
76396 --- a/kernel/trace/trace_events.c
76397 +++ b/kernel/trace/trace_events.c
76398 @@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list);
76399 * Modules must own their file_operations to keep up with
76400 * reference counting.
76401 */
76402 +
76403 struct ftrace_module_file_ops {
76404 struct list_head list;
76405 struct module *mod;
76406 - struct file_operations id;
76407 - struct file_operations enable;
76408 - struct file_operations format;
76409 - struct file_operations filter;
76410 };
76411
76412 static void remove_subsystem_dir(const char *name)
76413 @@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod)
76414
76415 file_ops->mod = mod;
76416
76417 - file_ops->id = ftrace_event_id_fops;
76418 - file_ops->id.owner = mod;
76419 -
76420 - file_ops->enable = ftrace_enable_fops;
76421 - file_ops->enable.owner = mod;
76422 -
76423 - file_ops->filter = ftrace_event_filter_fops;
76424 - file_ops->filter.owner = mod;
76425 -
76426 - file_ops->format = ftrace_event_format_fops;
76427 - file_ops->format.owner = mod;
76428 + pax_open_kernel();
76429 + *(void **)&mod->trace_id.owner = mod;
76430 + *(void **)&mod->trace_enable.owner = mod;
76431 + *(void **)&mod->trace_filter.owner = mod;
76432 + *(void **)&mod->trace_format.owner = mod;
76433 + pax_close_kernel();
76434
76435 list_add(&file_ops->list, &ftrace_module_file_list);
76436
76437 @@ -1063,8 +1055,8 @@ static void trace_module_add_events(struct module *mod)
76438 call->mod = mod;
76439 list_add(&call->list, &ftrace_events);
76440 event_create_dir(call, d_events,
76441 - &file_ops->id, &file_ops->enable,
76442 - &file_ops->filter, &file_ops->format);
76443 + &mod->trace_id, &mod->trace_enable,
76444 + &mod->trace_filter, &mod->trace_format);
76445 }
76446 }
76447
76448 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
76449 index 0acd834..b800b56 100644
76450 --- a/kernel/trace/trace_mmiotrace.c
76451 +++ b/kernel/trace/trace_mmiotrace.c
76452 @@ -23,7 +23,7 @@ struct header_iter {
76453 static struct trace_array *mmio_trace_array;
76454 static bool overrun_detected;
76455 static unsigned long prev_overruns;
76456 -static atomic_t dropped_count;
76457 +static atomic_unchecked_t dropped_count;
76458
76459 static void mmio_reset_data(struct trace_array *tr)
76460 {
76461 @@ -126,7 +126,7 @@ static void mmio_close(struct trace_iterator *iter)
76462
76463 static unsigned long count_overruns(struct trace_iterator *iter)
76464 {
76465 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
76466 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
76467 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
76468
76469 if (over > prev_overruns)
76470 @@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
76471 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
76472 sizeof(*entry), 0, pc);
76473 if (!event) {
76474 - atomic_inc(&dropped_count);
76475 + atomic_inc_unchecked(&dropped_count);
76476 return;
76477 }
76478 entry = ring_buffer_event_data(event);
76479 @@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
76480 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
76481 sizeof(*entry), 0, pc);
76482 if (!event) {
76483 - atomic_inc(&dropped_count);
76484 + atomic_inc_unchecked(&dropped_count);
76485 return;
76486 }
76487 entry = ring_buffer_event_data(event);
76488 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
76489 index b6c12c6..41fdc53 100644
76490 --- a/kernel/trace/trace_output.c
76491 +++ b/kernel/trace/trace_output.c
76492 @@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
76493 return 0;
76494 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
76495 if (!IS_ERR(p)) {
76496 - p = mangle_path(s->buffer + s->len, p, "\n");
76497 + p = mangle_path(s->buffer + s->len, p, "\n\\");
76498 if (p) {
76499 s->len = p - s->buffer;
76500 return 1;
76501 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
76502 index 8504ac7..ecf0adb 100644
76503 --- a/kernel/trace/trace_stack.c
76504 +++ b/kernel/trace/trace_stack.c
76505 @@ -50,7 +50,7 @@ static inline void check_stack(void)
76506 return;
76507
76508 /* we do not handle interrupt stacks yet */
76509 - if (!object_is_on_stack(&this_size))
76510 + if (!object_starts_on_stack(&this_size))
76511 return;
76512
76513 local_irq_save(flags);
76514 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
76515 index 40cafb0..d5ead43 100644
76516 --- a/kernel/trace/trace_workqueue.c
76517 +++ b/kernel/trace/trace_workqueue.c
76518 @@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
76519 int cpu;
76520 pid_t pid;
76521 /* Can be inserted from interrupt or user context, need to be atomic */
76522 - atomic_t inserted;
76523 + atomic_unchecked_t inserted;
76524 /*
76525 * Don't need to be atomic, works are serialized in a single workqueue thread
76526 * on a single CPU.
76527 @@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
76528 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
76529 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
76530 if (node->pid == wq_thread->pid) {
76531 - atomic_inc(&node->inserted);
76532 + atomic_inc_unchecked(&node->inserted);
76533 goto found;
76534 }
76535 }
76536 @@ -205,7 +205,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
76537 tsk = get_pid_task(pid, PIDTYPE_PID);
76538 if (tsk) {
76539 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
76540 - atomic_read(&cws->inserted), cws->executed,
76541 + atomic_read_unchecked(&cws->inserted), cws->executed,
76542 tsk->comm);
76543 put_task_struct(tsk);
76544 }
76545 diff --git a/kernel/user.c b/kernel/user.c
76546 index 1b91701..8795237 100644
76547 --- a/kernel/user.c
76548 +++ b/kernel/user.c
76549 @@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
76550 spin_lock_irq(&uidhash_lock);
76551 up = uid_hash_find(uid, hashent);
76552 if (up) {
76553 + put_user_ns(ns);
76554 key_put(new->uid_keyring);
76555 key_put(new->session_keyring);
76556 kmem_cache_free(uid_cachep, new);
76557 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
76558 index 234ceb1..ad74049 100644
76559 --- a/lib/Kconfig.debug
76560 +++ b/lib/Kconfig.debug
76561 @@ -905,7 +905,7 @@ config LATENCYTOP
76562 select STACKTRACE
76563 select SCHEDSTATS
76564 select SCHED_DEBUG
76565 - depends on HAVE_LATENCYTOP_SUPPORT
76566 + depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
76567 help
76568 Enable this option if you want to use the LatencyTOP tool
76569 to find out which userspace is blocking on what kernel operations.
76570 diff --git a/lib/bitmap.c b/lib/bitmap.c
76571 index 7025658..8d14cab 100644
76572 --- a/lib/bitmap.c
76573 +++ b/lib/bitmap.c
76574 @@ -341,7 +341,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
76575 {
76576 int c, old_c, totaldigits, ndigits, nchunks, nbits;
76577 u32 chunk;
76578 - const char __user *ubuf = buf;
76579 + const char __user *ubuf = (const char __force_user *)buf;
76580
76581 bitmap_zero(maskp, nmaskbits);
76582
76583 @@ -426,7 +426,7 @@ int bitmap_parse_user(const char __user *ubuf,
76584 {
76585 if (!access_ok(VERIFY_READ, ubuf, ulen))
76586 return -EFAULT;
76587 - return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
76588 + return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
76589 }
76590 EXPORT_SYMBOL(bitmap_parse_user);
76591
76592 diff --git a/lib/bug.c b/lib/bug.c
76593 index 300e41a..2779eb0 100644
76594 --- a/lib/bug.c
76595 +++ b/lib/bug.c
76596 @@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
76597 return BUG_TRAP_TYPE_NONE;
76598
76599 bug = find_bug(bugaddr);
76600 + if (!bug)
76601 + return BUG_TRAP_TYPE_NONE;
76602
76603 printk(KERN_EMERG "------------[ cut here ]------------\n");
76604
76605 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
76606 index 2b413db..e21d207 100644
76607 --- a/lib/debugobjects.c
76608 +++ b/lib/debugobjects.c
76609 @@ -277,7 +277,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
76610 if (limit > 4)
76611 return;
76612
76613 - is_on_stack = object_is_on_stack(addr);
76614 + is_on_stack = object_starts_on_stack(addr);
76615 if (is_on_stack == onstack)
76616 return;
76617
76618 diff --git a/lib/devres.c b/lib/devres.c
76619 index 72c8909..7543868 100644
76620 --- a/lib/devres.c
76621 +++ b/lib/devres.c
76622 @@ -80,7 +80,7 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
76623 {
76624 iounmap(addr);
76625 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
76626 - (void *)addr));
76627 + (void __force *)addr));
76628 }
76629 EXPORT_SYMBOL(devm_iounmap);
76630
76631 @@ -140,7 +140,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
76632 {
76633 ioport_unmap(addr);
76634 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
76635 - devm_ioport_map_match, (void *)addr));
76636 + devm_ioport_map_match, (void __force *)addr));
76637 }
76638 EXPORT_SYMBOL(devm_ioport_unmap);
76639
76640 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
76641 index 084e879..0674448 100644
76642 --- a/lib/dma-debug.c
76643 +++ b/lib/dma-debug.c
76644 @@ -861,7 +861,7 @@ out:
76645
76646 static void check_for_stack(struct device *dev, void *addr)
76647 {
76648 - if (object_is_on_stack(addr))
76649 + if (object_starts_on_stack(addr))
76650 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
76651 "stack [addr=%p]\n", addr);
76652 }
76653 diff --git a/lib/idr.c b/lib/idr.c
76654 index eda7ba3..915dfae 100644
76655 --- a/lib/idr.c
76656 +++ b/lib/idr.c
76657 @@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
76658 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
76659
76660 /* if already at the top layer, we need to grow */
76661 - if (id >= 1 << (idp->layers * IDR_BITS)) {
76662 + if (id >= (1 << (idp->layers * IDR_BITS))) {
76663 *starting_id = id;
76664 return IDR_NEED_TO_GROW;
76665 }
76666 diff --git a/lib/inflate.c b/lib/inflate.c
76667 index d102559..4215f31 100644
76668 --- a/lib/inflate.c
76669 +++ b/lib/inflate.c
76670 @@ -266,7 +266,7 @@ static void free(void *where)
76671 malloc_ptr = free_mem_ptr;
76672 }
76673 #else
76674 -#define malloc(a) kmalloc(a, GFP_KERNEL)
76675 +#define malloc(a) kmalloc((a), GFP_KERNEL)
76676 #define free(a) kfree(a)
76677 #endif
76678
76679 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
76680 index bd2bea9..6b3c95e 100644
76681 --- a/lib/is_single_threaded.c
76682 +++ b/lib/is_single_threaded.c
76683 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
76684 struct task_struct *p, *t;
76685 bool ret;
76686
76687 + if (!mm)
76688 + return true;
76689 +
76690 if (atomic_read(&task->signal->live) != 1)
76691 return false;
76692
76693 diff --git a/lib/kobject.c b/lib/kobject.c
76694 index b512b74..8115eb1 100644
76695 --- a/lib/kobject.c
76696 +++ b/lib/kobject.c
76697 @@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
76698 return ret;
76699 }
76700
76701 -struct sysfs_ops kobj_sysfs_ops = {
76702 +const struct sysfs_ops kobj_sysfs_ops = {
76703 .show = kobj_attr_show,
76704 .store = kobj_attr_store,
76705 };
76706 @@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
76707 * If the kset was not able to be created, NULL will be returned.
76708 */
76709 static struct kset *kset_create(const char *name,
76710 - struct kset_uevent_ops *uevent_ops,
76711 + const struct kset_uevent_ops *uevent_ops,
76712 struct kobject *parent_kobj)
76713 {
76714 struct kset *kset;
76715 @@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name,
76716 * If the kset was not able to be created, NULL will be returned.
76717 */
76718 struct kset *kset_create_and_add(const char *name,
76719 - struct kset_uevent_ops *uevent_ops,
76720 + const struct kset_uevent_ops *uevent_ops,
76721 struct kobject *parent_kobj)
76722 {
76723 struct kset *kset;
76724 diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
76725 index 507b821..0bf8ed0 100644
76726 --- a/lib/kobject_uevent.c
76727 +++ b/lib/kobject_uevent.c
76728 @@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
76729 const char *subsystem;
76730 struct kobject *top_kobj;
76731 struct kset *kset;
76732 - struct kset_uevent_ops *uevent_ops;
76733 + const struct kset_uevent_ops *uevent_ops;
76734 u64 seq;
76735 int i = 0;
76736 int retval = 0;
76737 diff --git a/lib/kref.c b/lib/kref.c
76738 index 9ecd6e8..12c94c1 100644
76739 --- a/lib/kref.c
76740 +++ b/lib/kref.c
76741 @@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
76742 */
76743 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
76744 {
76745 - WARN_ON(release == NULL);
76746 + BUG_ON(release == NULL);
76747 WARN_ON(release == (void (*)(struct kref *))kfree);
76748
76749 if (atomic_dec_and_test(&kref->refcount)) {
76750 diff --git a/lib/parser.c b/lib/parser.c
76751 index b00d020..1b34325 100644
76752 --- a/lib/parser.c
76753 +++ b/lib/parser.c
76754 @@ -126,7 +126,7 @@ static int match_number(substring_t *s, int *result, int base)
76755 char *buf;
76756 int ret;
76757
76758 - buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
76759 + buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
76760 if (!buf)
76761 return -ENOMEM;
76762 memcpy(buf, s->from, s->to - s->from);
76763 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
76764 index 92cdd99..a8149d7 100644
76765 --- a/lib/radix-tree.c
76766 +++ b/lib/radix-tree.c
76767 @@ -81,7 +81,7 @@ struct radix_tree_preload {
76768 int nr;
76769 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
76770 };
76771 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
76772 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
76773
76774 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
76775 {
76776 diff --git a/lib/random32.c b/lib/random32.c
76777 index 217d5c4..45aba8a 100644
76778 --- a/lib/random32.c
76779 +++ b/lib/random32.c
76780 @@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *state)
76781 */
76782 static inline u32 __seed(u32 x, u32 m)
76783 {
76784 - return (x < m) ? x + m : x;
76785 + return (x <= m) ? x + m + 1 : x;
76786 }
76787
76788 /**
76789 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
76790 index 33bed5e..1477e46 100644
76791 --- a/lib/vsprintf.c
76792 +++ b/lib/vsprintf.c
76793 @@ -16,6 +16,9 @@
76794 * - scnprintf and vscnprintf
76795 */
76796
76797 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76798 +#define __INCLUDED_BY_HIDESYM 1
76799 +#endif
76800 #include <stdarg.h>
76801 #include <linux/module.h>
76802 #include <linux/types.h>
76803 @@ -546,12 +549,12 @@ static char *number(char *buf, char *end, unsigned long long num,
76804 return buf;
76805 }
76806
76807 -static char *string(char *buf, char *end, char *s, struct printf_spec spec)
76808 +static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
76809 {
76810 int len, i;
76811
76812 if ((unsigned long)s < PAGE_SIZE)
76813 - s = "<NULL>";
76814 + s = "(null)";
76815
76816 len = strnlen(s, spec.precision);
76817
76818 @@ -581,7 +584,7 @@ static char *symbol_string(char *buf, char *end, void *ptr,
76819 unsigned long value = (unsigned long) ptr;
76820 #ifdef CONFIG_KALLSYMS
76821 char sym[KSYM_SYMBOL_LEN];
76822 - if (ext != 'f' && ext != 's')
76823 + if (ext != 'f' && ext != 's' && ext != 'a')
76824 sprint_symbol(sym, value);
76825 else
76826 kallsyms_lookup(value, NULL, NULL, NULL, sym);
76827 @@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
76828 * - 'f' For simple symbolic function names without offset
76829 * - 'S' For symbolic direct pointers with offset
76830 * - 's' For symbolic direct pointers without offset
76831 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
76832 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
76833 * - 'R' For a struct resource pointer, it prints the range of
76834 * addresses (not the name nor the flags)
76835 * - 'M' For a 6-byte MAC address, it prints the address in the
76836 @@ -822,7 +827,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
76837 struct printf_spec spec)
76838 {
76839 if (!ptr)
76840 - return string(buf, end, "(null)", spec);
76841 + return string(buf, end, "(nil)", spec);
76842
76843 switch (*fmt) {
76844 case 'F':
76845 @@ -831,6 +836,14 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
76846 case 's':
76847 /* Fallthrough */
76848 case 'S':
76849 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76850 + break;
76851 +#else
76852 + return symbol_string(buf, end, ptr, spec, *fmt);
76853 +#endif
76854 + case 'a':
76855 + /* Fallthrough */
76856 + case 'A':
76857 return symbol_string(buf, end, ptr, spec, *fmt);
76858 case 'R':
76859 return resource_string(buf, end, ptr, spec);
76860 @@ -1445,7 +1458,7 @@ do { \
76861 size_t len;
76862 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
76863 || (unsigned long)save_str < PAGE_SIZE)
76864 - save_str = "<NULL>";
76865 + save_str = "(null)";
76866 len = strlen(save_str);
76867 if (str + len + 1 < end)
76868 memcpy(str, save_str, len + 1);
76869 @@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
76870 typeof(type) value; \
76871 if (sizeof(type) == 8) { \
76872 args = PTR_ALIGN(args, sizeof(u32)); \
76873 - *(u32 *)&value = *(u32 *)args; \
76874 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
76875 + *(u32 *)&value = *(const u32 *)args; \
76876 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
76877 } else { \
76878 args = PTR_ALIGN(args, sizeof(type)); \
76879 - value = *(typeof(type) *)args; \
76880 + value = *(const typeof(type) *)args; \
76881 } \
76882 args += sizeof(type); \
76883 value; \
76884 @@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
76885 const char *str_arg = args;
76886 size_t len = strlen(str_arg);
76887 args += len + 1;
76888 - str = string(str, end, (char *)str_arg, spec);
76889 + str = string(str, end, str_arg, spec);
76890 break;
76891 }
76892
76893 diff --git a/localversion-grsec b/localversion-grsec
76894 new file mode 100644
76895 index 0000000..7cd6065
76896 --- /dev/null
76897 +++ b/localversion-grsec
76898 @@ -0,0 +1 @@
76899 +-grsec
76900 diff --git a/mm/Kconfig b/mm/Kconfig
76901 index 2c19c0b..f3c3f83 100644
76902 --- a/mm/Kconfig
76903 +++ b/mm/Kconfig
76904 @@ -228,7 +228,7 @@ config KSM
76905 config DEFAULT_MMAP_MIN_ADDR
76906 int "Low address space to protect from user allocation"
76907 depends on MMU
76908 - default 4096
76909 + default 65536
76910 help
76911 This is the portion of low virtual memory which should be protected
76912 from userspace allocation. Keeping a user from writing to low pages
76913 diff --git a/mm/backing-dev.c b/mm/backing-dev.c
76914 index 67a33a5..094dcf1 100644
76915 --- a/mm/backing-dev.c
76916 +++ b/mm/backing-dev.c
76917 @@ -272,7 +272,7 @@ static void bdi_task_init(struct backing_dev_info *bdi,
76918 list_add_tail_rcu(&wb->list, &bdi->wb_list);
76919 spin_unlock(&bdi->wb_lock);
76920
76921 - tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
76922 + tsk->flags |= PF_SWAPWRITE;
76923 set_freezable();
76924
76925 /*
76926 @@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rcu_head *head)
76927 * Add the default flusher task that gets created for any bdi
76928 * that has dirty data pending writeout
76929 */
76930 -void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
76931 +static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
76932 {
76933 if (!bdi_cap_writeback_dirty(bdi))
76934 return;
76935 diff --git a/mm/filemap.c b/mm/filemap.c
76936 index a1fe378..e26702f 100644
76937 --- a/mm/filemap.c
76938 +++ b/mm/filemap.c
76939 @@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
76940 struct address_space *mapping = file->f_mapping;
76941
76942 if (!mapping->a_ops->readpage)
76943 - return -ENOEXEC;
76944 + return -ENODEV;
76945 file_accessed(file);
76946 vma->vm_ops = &generic_file_vm_ops;
76947 vma->vm_flags |= VM_CAN_NONLINEAR;
76948 @@ -2024,6 +2024,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
76949 *pos = i_size_read(inode);
76950
76951 if (limit != RLIM_INFINITY) {
76952 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
76953 if (*pos >= limit) {
76954 send_sig(SIGXFSZ, current, 0);
76955 return -EFBIG;
76956 diff --git a/mm/fremap.c b/mm/fremap.c
76957 index b6ec85a..a24ac22 100644
76958 --- a/mm/fremap.c
76959 +++ b/mm/fremap.c
76960 @@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
76961 retry:
76962 vma = find_vma(mm, start);
76963
76964 +#ifdef CONFIG_PAX_SEGMEXEC
76965 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
76966 + goto out;
76967 +#endif
76968 +
76969 /*
76970 * Make sure the vma is shared, that it supports prefaulting,
76971 * and that the remapped range is valid and fully within
76972 @@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
76973 /*
76974 * drop PG_Mlocked flag for over-mapped range
76975 */
76976 - unsigned int saved_flags = vma->vm_flags;
76977 + unsigned long saved_flags = vma->vm_flags;
76978 munlock_vma_pages_range(vma, start, start + size);
76979 vma->vm_flags = saved_flags;
76980 }
76981 diff --git a/mm/highmem.c b/mm/highmem.c
76982 index 9c1e627..5ca9447 100644
76983 --- a/mm/highmem.c
76984 +++ b/mm/highmem.c
76985 @@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
76986 * So no dangers, even with speculative execution.
76987 */
76988 page = pte_page(pkmap_page_table[i]);
76989 + pax_open_kernel();
76990 pte_clear(&init_mm, (unsigned long)page_address(page),
76991 &pkmap_page_table[i]);
76992 -
76993 + pax_close_kernel();
76994 set_page_address(page, NULL);
76995 need_flush = 1;
76996 }
76997 @@ -177,9 +178,11 @@ start:
76998 }
76999 }
77000 vaddr = PKMAP_ADDR(last_pkmap_nr);
77001 +
77002 + pax_open_kernel();
77003 set_pte_at(&init_mm, vaddr,
77004 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
77005 -
77006 + pax_close_kernel();
77007 pkmap_count[last_pkmap_nr] = 1;
77008 set_page_address(page, (void *)vaddr);
77009
77010 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
77011 index 5e1e508..ac70275 100644
77012 --- a/mm/hugetlb.c
77013 +++ b/mm/hugetlb.c
77014 @@ -869,6 +869,7 @@ free:
77015 list_del(&page->lru);
77016 enqueue_huge_page(h, page);
77017 }
77018 + spin_unlock(&hugetlb_lock);
77019
77020 /* Free unnecessary surplus pages to the buddy allocator */
77021 if (!list_empty(&surplus_list)) {
77022 @@ -1933,6 +1934,26 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
77023 return 1;
77024 }
77025
77026 +#ifdef CONFIG_PAX_SEGMEXEC
77027 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
77028 +{
77029 + struct mm_struct *mm = vma->vm_mm;
77030 + struct vm_area_struct *vma_m;
77031 + unsigned long address_m;
77032 + pte_t *ptep_m;
77033 +
77034 + vma_m = pax_find_mirror_vma(vma);
77035 + if (!vma_m)
77036 + return;
77037 +
77038 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77039 + address_m = address + SEGMEXEC_TASK_SIZE;
77040 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
77041 + get_page(page_m);
77042 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
77043 +}
77044 +#endif
77045 +
77046 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
77047 unsigned long address, pte_t *ptep, pte_t pte,
77048 struct page *pagecache_page)
77049 @@ -2004,6 +2025,11 @@ retry_avoidcopy:
77050 huge_ptep_clear_flush(vma, address, ptep);
77051 set_huge_pte_at(mm, address, ptep,
77052 make_huge_pte(vma, new_page, 1));
77053 +
77054 +#ifdef CONFIG_PAX_SEGMEXEC
77055 + pax_mirror_huge_pte(vma, address, new_page);
77056 +#endif
77057 +
77058 /* Make the old page be freed below */
77059 new_page = old_page;
77060 }
77061 @@ -2135,6 +2161,10 @@ retry:
77062 && (vma->vm_flags & VM_SHARED)));
77063 set_huge_pte_at(mm, address, ptep, new_pte);
77064
77065 +#ifdef CONFIG_PAX_SEGMEXEC
77066 + pax_mirror_huge_pte(vma, address, page);
77067 +#endif
77068 +
77069 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
77070 /* Optimization, do the COW without a second fault */
77071 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
77072 @@ -2163,6 +2193,28 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77073 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
77074 struct hstate *h = hstate_vma(vma);
77075
77076 +#ifdef CONFIG_PAX_SEGMEXEC
77077 + struct vm_area_struct *vma_m;
77078 +
77079 + vma_m = pax_find_mirror_vma(vma);
77080 + if (vma_m) {
77081 + unsigned long address_m;
77082 +
77083 + if (vma->vm_start > vma_m->vm_start) {
77084 + address_m = address;
77085 + address -= SEGMEXEC_TASK_SIZE;
77086 + vma = vma_m;
77087 + h = hstate_vma(vma);
77088 + } else
77089 + address_m = address + SEGMEXEC_TASK_SIZE;
77090 +
77091 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
77092 + return VM_FAULT_OOM;
77093 + address_m &= HPAGE_MASK;
77094 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
77095 + }
77096 +#endif
77097 +
77098 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
77099 if (!ptep)
77100 return VM_FAULT_OOM;
77101 diff --git a/mm/internal.h b/mm/internal.h
77102 index f03e8e2..7354343 100644
77103 --- a/mm/internal.h
77104 +++ b/mm/internal.h
77105 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page *page);
77106 * in mm/page_alloc.c
77107 */
77108 extern void __free_pages_bootmem(struct page *page, unsigned int order);
77109 +extern void free_compound_page(struct page *page);
77110 extern void prep_compound_page(struct page *page, unsigned long order);
77111
77112
77113 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
77114 index c346660..b47382f 100644
77115 --- a/mm/kmemleak.c
77116 +++ b/mm/kmemleak.c
77117 @@ -358,7 +358,7 @@ static void print_unreferenced(struct seq_file *seq,
77118
77119 for (i = 0; i < object->trace_len; i++) {
77120 void *ptr = (void *)object->trace[i];
77121 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
77122 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
77123 }
77124 }
77125
77126 diff --git a/mm/maccess.c b/mm/maccess.c
77127 index 9073695..1127f348 100644
77128 --- a/mm/maccess.c
77129 +++ b/mm/maccess.c
77130 @@ -14,7 +14,7 @@
77131 * Safely read from address @src to the buffer at @dst. If a kernel fault
77132 * happens, handle that and return -EFAULT.
77133 */
77134 -long probe_kernel_read(void *dst, void *src, size_t size)
77135 +long probe_kernel_read(void *dst, const void *src, size_t size)
77136 {
77137 long ret;
77138 mm_segment_t old_fs = get_fs();
77139 @@ -22,7 +22,7 @@ long probe_kernel_read(void *dst, void *src, size_t size)
77140 set_fs(KERNEL_DS);
77141 pagefault_disable();
77142 ret = __copy_from_user_inatomic(dst,
77143 - (__force const void __user *)src, size);
77144 + (const void __force_user *)src, size);
77145 pagefault_enable();
77146 set_fs(old_fs);
77147
77148 @@ -39,14 +39,14 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
77149 * Safely write to address @dst from the buffer at @src. If a kernel fault
77150 * happens, handle that and return -EFAULT.
77151 */
77152 -long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
77153 +long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
77154 {
77155 long ret;
77156 mm_segment_t old_fs = get_fs();
77157
77158 set_fs(KERNEL_DS);
77159 pagefault_disable();
77160 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
77161 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
77162 pagefault_enable();
77163 set_fs(old_fs);
77164
77165 diff --git a/mm/madvise.c b/mm/madvise.c
77166 index 35b1479..499f7d4 100644
77167 --- a/mm/madvise.c
77168 +++ b/mm/madvise.c
77169 @@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
77170 pgoff_t pgoff;
77171 unsigned long new_flags = vma->vm_flags;
77172
77173 +#ifdef CONFIG_PAX_SEGMEXEC
77174 + struct vm_area_struct *vma_m;
77175 +#endif
77176 +
77177 switch (behavior) {
77178 case MADV_NORMAL:
77179 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
77180 @@ -103,6 +107,13 @@ success:
77181 /*
77182 * vm_flags is protected by the mmap_sem held in write mode.
77183 */
77184 +
77185 +#ifdef CONFIG_PAX_SEGMEXEC
77186 + vma_m = pax_find_mirror_vma(vma);
77187 + if (vma_m)
77188 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
77189 +#endif
77190 +
77191 vma->vm_flags = new_flags;
77192
77193 out:
77194 @@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
77195 struct vm_area_struct ** prev,
77196 unsigned long start, unsigned long end)
77197 {
77198 +
77199 +#ifdef CONFIG_PAX_SEGMEXEC
77200 + struct vm_area_struct *vma_m;
77201 +#endif
77202 +
77203 *prev = vma;
77204 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
77205 return -EINVAL;
77206 @@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
77207 zap_page_range(vma, start, end - start, &details);
77208 } else
77209 zap_page_range(vma, start, end - start, NULL);
77210 +
77211 +#ifdef CONFIG_PAX_SEGMEXEC
77212 + vma_m = pax_find_mirror_vma(vma);
77213 + if (vma_m) {
77214 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
77215 + struct zap_details details = {
77216 + .nonlinear_vma = vma_m,
77217 + .last_index = ULONG_MAX,
77218 + };
77219 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
77220 + } else
77221 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
77222 + }
77223 +#endif
77224 +
77225 return 0;
77226 }
77227
77228 @@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
77229 if (end < start)
77230 goto out;
77231
77232 +#ifdef CONFIG_PAX_SEGMEXEC
77233 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
77234 + if (end > SEGMEXEC_TASK_SIZE)
77235 + goto out;
77236 + } else
77237 +#endif
77238 +
77239 + if (end > TASK_SIZE)
77240 + goto out;
77241 +
77242 error = 0;
77243 if (end == start)
77244 goto out;
77245 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
77246 index 8aeba53..b4a4198 100644
77247 --- a/mm/memory-failure.c
77248 +++ b/mm/memory-failure.c
77249 @@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
77250
77251 int sysctl_memory_failure_recovery __read_mostly = 1;
77252
77253 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
77254 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
77255
77256 /*
77257 * Send all the processes who have the page mapped an ``action optional''
77258 @@ -64,7 +64,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
77259 si.si_signo = SIGBUS;
77260 si.si_errno = 0;
77261 si.si_code = BUS_MCEERR_AO;
77262 - si.si_addr = (void *)addr;
77263 + si.si_addr = (void __user *)addr;
77264 #ifdef __ARCH_SI_TRAPNO
77265 si.si_trapno = trapno;
77266 #endif
77267 @@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
77268 return 0;
77269 }
77270
77271 - atomic_long_add(1, &mce_bad_pages);
77272 + atomic_long_add_unchecked(1, &mce_bad_pages);
77273
77274 /*
77275 * We need/can do nothing about count=0 pages.
77276 diff --git a/mm/memory.c b/mm/memory.c
77277 index 6c836d3..48f3264 100644
77278 --- a/mm/memory.c
77279 +++ b/mm/memory.c
77280 @@ -187,8 +187,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
77281 return;
77282
77283 pmd = pmd_offset(pud, start);
77284 +
77285 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
77286 pud_clear(pud);
77287 pmd_free_tlb(tlb, pmd, start);
77288 +#endif
77289 +
77290 }
77291
77292 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
77293 @@ -219,9 +223,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
77294 if (end - 1 > ceiling - 1)
77295 return;
77296
77297 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
77298 pud = pud_offset(pgd, start);
77299 pgd_clear(pgd);
77300 pud_free_tlb(tlb, pud, start);
77301 +#endif
77302 +
77303 }
77304
77305 /*
77306 @@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
77307 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
77308 i = 0;
77309
77310 - do {
77311 + while (nr_pages) {
77312 struct vm_area_struct *vma;
77313
77314 - vma = find_extend_vma(mm, start);
77315 + vma = find_vma(mm, start);
77316 if (!vma && in_gate_area(tsk, start)) {
77317 unsigned long pg = start & PAGE_MASK;
77318 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
77319 @@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
77320 continue;
77321 }
77322
77323 - if (!vma ||
77324 + if (!vma || start < vma->vm_start ||
77325 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
77326 !(vm_flags & vma->vm_flags))
77327 return i ? : -EFAULT;
77328 @@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
77329 start += PAGE_SIZE;
77330 nr_pages--;
77331 } while (nr_pages && start < vma->vm_end);
77332 - } while (nr_pages);
77333 + }
77334 return i;
77335 }
77336
77337 @@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
77338 page_add_file_rmap(page);
77339 set_pte_at(mm, addr, pte, mk_pte(page, prot));
77340
77341 +#ifdef CONFIG_PAX_SEGMEXEC
77342 + pax_mirror_file_pte(vma, addr, page, ptl);
77343 +#endif
77344 +
77345 retval = 0;
77346 pte_unmap_unlock(pte, ptl);
77347 return retval;
77348 @@ -1560,10 +1571,22 @@ out:
77349 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
77350 struct page *page)
77351 {
77352 +
77353 +#ifdef CONFIG_PAX_SEGMEXEC
77354 + struct vm_area_struct *vma_m;
77355 +#endif
77356 +
77357 if (addr < vma->vm_start || addr >= vma->vm_end)
77358 return -EFAULT;
77359 if (!page_count(page))
77360 return -EINVAL;
77361 +
77362 +#ifdef CONFIG_PAX_SEGMEXEC
77363 + vma_m = pax_find_mirror_vma(vma);
77364 + if (vma_m)
77365 + vma_m->vm_flags |= VM_INSERTPAGE;
77366 +#endif
77367 +
77368 vma->vm_flags |= VM_INSERTPAGE;
77369 return insert_page(vma, addr, page, vma->vm_page_prot);
77370 }
77371 @@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
77372 unsigned long pfn)
77373 {
77374 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
77375 + BUG_ON(vma->vm_mirror);
77376
77377 if (addr < vma->vm_start || addr >= vma->vm_end)
77378 return -EFAULT;
77379 @@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
77380 copy_user_highpage(dst, src, va, vma);
77381 }
77382
77383 +#ifdef CONFIG_PAX_SEGMEXEC
77384 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
77385 +{
77386 + struct mm_struct *mm = vma->vm_mm;
77387 + spinlock_t *ptl;
77388 + pte_t *pte, entry;
77389 +
77390 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
77391 + entry = *pte;
77392 + if (!pte_present(entry)) {
77393 + if (!pte_none(entry)) {
77394 + BUG_ON(pte_file(entry));
77395 + free_swap_and_cache(pte_to_swp_entry(entry));
77396 + pte_clear_not_present_full(mm, address, pte, 0);
77397 + }
77398 + } else {
77399 + struct page *page;
77400 +
77401 + flush_cache_page(vma, address, pte_pfn(entry));
77402 + entry = ptep_clear_flush(vma, address, pte);
77403 + BUG_ON(pte_dirty(entry));
77404 + page = vm_normal_page(vma, address, entry);
77405 + if (page) {
77406 + update_hiwater_rss(mm);
77407 + if (PageAnon(page))
77408 + dec_mm_counter(mm, anon_rss);
77409 + else
77410 + dec_mm_counter(mm, file_rss);
77411 + page_remove_rmap(page);
77412 + page_cache_release(page);
77413 + }
77414 + }
77415 + pte_unmap_unlock(pte, ptl);
77416 +}
77417 +
77418 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
77419 + *
77420 + * the ptl of the lower mapped page is held on entry and is not released on exit
77421 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
77422 + */
77423 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
77424 +{
77425 + struct mm_struct *mm = vma->vm_mm;
77426 + unsigned long address_m;
77427 + spinlock_t *ptl_m;
77428 + struct vm_area_struct *vma_m;
77429 + pmd_t *pmd_m;
77430 + pte_t *pte_m, entry_m;
77431 +
77432 + BUG_ON(!page_m || !PageAnon(page_m));
77433 +
77434 + vma_m = pax_find_mirror_vma(vma);
77435 + if (!vma_m)
77436 + return;
77437 +
77438 + BUG_ON(!PageLocked(page_m));
77439 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77440 + address_m = address + SEGMEXEC_TASK_SIZE;
77441 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
77442 + pte_m = pte_offset_map_nested(pmd_m, address_m);
77443 + ptl_m = pte_lockptr(mm, pmd_m);
77444 + if (ptl != ptl_m) {
77445 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
77446 + if (!pte_none(*pte_m))
77447 + goto out;
77448 + }
77449 +
77450 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
77451 + page_cache_get(page_m);
77452 + page_add_anon_rmap(page_m, vma_m, address_m);
77453 + inc_mm_counter(mm, anon_rss);
77454 + set_pte_at(mm, address_m, pte_m, entry_m);
77455 + update_mmu_cache(vma_m, address_m, entry_m);
77456 +out:
77457 + if (ptl != ptl_m)
77458 + spin_unlock(ptl_m);
77459 + pte_unmap_nested(pte_m);
77460 + unlock_page(page_m);
77461 +}
77462 +
77463 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
77464 +{
77465 + struct mm_struct *mm = vma->vm_mm;
77466 + unsigned long address_m;
77467 + spinlock_t *ptl_m;
77468 + struct vm_area_struct *vma_m;
77469 + pmd_t *pmd_m;
77470 + pte_t *pte_m, entry_m;
77471 +
77472 + BUG_ON(!page_m || PageAnon(page_m));
77473 +
77474 + vma_m = pax_find_mirror_vma(vma);
77475 + if (!vma_m)
77476 + return;
77477 +
77478 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77479 + address_m = address + SEGMEXEC_TASK_SIZE;
77480 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
77481 + pte_m = pte_offset_map_nested(pmd_m, address_m);
77482 + ptl_m = pte_lockptr(mm, pmd_m);
77483 + if (ptl != ptl_m) {
77484 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
77485 + if (!pte_none(*pte_m))
77486 + goto out;
77487 + }
77488 +
77489 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
77490 + page_cache_get(page_m);
77491 + page_add_file_rmap(page_m);
77492 + inc_mm_counter(mm, file_rss);
77493 + set_pte_at(mm, address_m, pte_m, entry_m);
77494 + update_mmu_cache(vma_m, address_m, entry_m);
77495 +out:
77496 + if (ptl != ptl_m)
77497 + spin_unlock(ptl_m);
77498 + pte_unmap_nested(pte_m);
77499 +}
77500 +
77501 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
77502 +{
77503 + struct mm_struct *mm = vma->vm_mm;
77504 + unsigned long address_m;
77505 + spinlock_t *ptl_m;
77506 + struct vm_area_struct *vma_m;
77507 + pmd_t *pmd_m;
77508 + pte_t *pte_m, entry_m;
77509 +
77510 + vma_m = pax_find_mirror_vma(vma);
77511 + if (!vma_m)
77512 + return;
77513 +
77514 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77515 + address_m = address + SEGMEXEC_TASK_SIZE;
77516 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
77517 + pte_m = pte_offset_map_nested(pmd_m, address_m);
77518 + ptl_m = pte_lockptr(mm, pmd_m);
77519 + if (ptl != ptl_m) {
77520 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
77521 + if (!pte_none(*pte_m))
77522 + goto out;
77523 + }
77524 +
77525 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
77526 + set_pte_at(mm, address_m, pte_m, entry_m);
77527 +out:
77528 + if (ptl != ptl_m)
77529 + spin_unlock(ptl_m);
77530 + pte_unmap_nested(pte_m);
77531 +}
77532 +
77533 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
77534 +{
77535 + struct page *page_m;
77536 + pte_t entry;
77537 +
77538 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
77539 + goto out;
77540 +
77541 + entry = *pte;
77542 + page_m = vm_normal_page(vma, address, entry);
77543 + if (!page_m)
77544 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
77545 + else if (PageAnon(page_m)) {
77546 + if (pax_find_mirror_vma(vma)) {
77547 + pte_unmap_unlock(pte, ptl);
77548 + lock_page(page_m);
77549 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
77550 + if (pte_same(entry, *pte))
77551 + pax_mirror_anon_pte(vma, address, page_m, ptl);
77552 + else
77553 + unlock_page(page_m);
77554 + }
77555 + } else
77556 + pax_mirror_file_pte(vma, address, page_m, ptl);
77557 +
77558 +out:
77559 + pte_unmap_unlock(pte, ptl);
77560 +}
77561 +#endif
77562 +
77563 /*
77564 * This routine handles present pages, when users try to write
77565 * to a shared page. It is done by copying the page to a new address
77566 @@ -2156,6 +2360,12 @@ gotten:
77567 */
77568 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
77569 if (likely(pte_same(*page_table, orig_pte))) {
77570 +
77571 +#ifdef CONFIG_PAX_SEGMEXEC
77572 + if (pax_find_mirror_vma(vma))
77573 + BUG_ON(!trylock_page(new_page));
77574 +#endif
77575 +
77576 if (old_page) {
77577 if (!PageAnon(old_page)) {
77578 dec_mm_counter(mm, file_rss);
77579 @@ -2207,6 +2417,10 @@ gotten:
77580 page_remove_rmap(old_page);
77581 }
77582
77583 +#ifdef CONFIG_PAX_SEGMEXEC
77584 + pax_mirror_anon_pte(vma, address, new_page, ptl);
77585 +#endif
77586 +
77587 /* Free the old page.. */
77588 new_page = old_page;
77589 ret |= VM_FAULT_WRITE;
77590 @@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
77591 swap_free(entry);
77592 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
77593 try_to_free_swap(page);
77594 +
77595 +#ifdef CONFIG_PAX_SEGMEXEC
77596 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
77597 +#endif
77598 +
77599 unlock_page(page);
77600
77601 if (flags & FAULT_FLAG_WRITE) {
77602 @@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
77603
77604 /* No need to invalidate - it was non-present before */
77605 update_mmu_cache(vma, address, pte);
77606 +
77607 +#ifdef CONFIG_PAX_SEGMEXEC
77608 + pax_mirror_anon_pte(vma, address, page, ptl);
77609 +#endif
77610 +
77611 unlock:
77612 pte_unmap_unlock(page_table, ptl);
77613 out:
77614 @@ -2632,40 +2856,6 @@ out_release:
77615 }
77616
77617 /*
77618 - * This is like a special single-page "expand_{down|up}wards()",
77619 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
77620 - * doesn't hit another vma.
77621 - */
77622 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
77623 -{
77624 - address &= PAGE_MASK;
77625 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
77626 - struct vm_area_struct *prev = vma->vm_prev;
77627 -
77628 - /*
77629 - * Is there a mapping abutting this one below?
77630 - *
77631 - * That's only ok if it's the same stack mapping
77632 - * that has gotten split..
77633 - */
77634 - if (prev && prev->vm_end == address)
77635 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
77636 -
77637 - expand_stack(vma, address - PAGE_SIZE);
77638 - }
77639 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
77640 - struct vm_area_struct *next = vma->vm_next;
77641 -
77642 - /* As VM_GROWSDOWN but s/below/above/ */
77643 - if (next && next->vm_start == address + PAGE_SIZE)
77644 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
77645 -
77646 - expand_upwards(vma, address + PAGE_SIZE);
77647 - }
77648 - return 0;
77649 -}
77650 -
77651 -/*
77652 * We enter with non-exclusive mmap_sem (to exclude vma changes,
77653 * but allow concurrent faults), and pte mapped but not yet locked.
77654 * We return with mmap_sem still held, but pte unmapped and unlocked.
77655 @@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
77656 unsigned long address, pte_t *page_table, pmd_t *pmd,
77657 unsigned int flags)
77658 {
77659 - struct page *page;
77660 + struct page *page = NULL;
77661 spinlock_t *ptl;
77662 pte_t entry;
77663
77664 - pte_unmap(page_table);
77665 -
77666 - /* Check if we need to add a guard page to the stack */
77667 - if (check_stack_guard_page(vma, address) < 0)
77668 - return VM_FAULT_SIGBUS;
77669 -
77670 - /* Use the zero-page for reads */
77671 if (!(flags & FAULT_FLAG_WRITE)) {
77672 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
77673 vma->vm_page_prot));
77674 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
77675 + ptl = pte_lockptr(mm, pmd);
77676 + spin_lock(ptl);
77677 if (!pte_none(*page_table))
77678 goto unlock;
77679 goto setpte;
77680 }
77681
77682 /* Allocate our own private page. */
77683 + pte_unmap(page_table);
77684 +
77685 if (unlikely(anon_vma_prepare(vma)))
77686 goto oom;
77687 page = alloc_zeroed_user_highpage_movable(vma, address);
77688 @@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
77689 if (!pte_none(*page_table))
77690 goto release;
77691
77692 +#ifdef CONFIG_PAX_SEGMEXEC
77693 + if (pax_find_mirror_vma(vma))
77694 + BUG_ON(!trylock_page(page));
77695 +#endif
77696 +
77697 inc_mm_counter(mm, anon_rss);
77698 page_add_new_anon_rmap(page, vma, address);
77699 setpte:
77700 @@ -2720,6 +2911,12 @@ setpte:
77701
77702 /* No need to invalidate - it was non-present before */
77703 update_mmu_cache(vma, address, entry);
77704 +
77705 +#ifdef CONFIG_PAX_SEGMEXEC
77706 + if (page)
77707 + pax_mirror_anon_pte(vma, address, page, ptl);
77708 +#endif
77709 +
77710 unlock:
77711 pte_unmap_unlock(page_table, ptl);
77712 return 0;
77713 @@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77714 */
77715 /* Only go through if we didn't race with anybody else... */
77716 if (likely(pte_same(*page_table, orig_pte))) {
77717 +
77718 +#ifdef CONFIG_PAX_SEGMEXEC
77719 + if (anon && pax_find_mirror_vma(vma))
77720 + BUG_ON(!trylock_page(page));
77721 +#endif
77722 +
77723 flush_icache_page(vma, page);
77724 entry = mk_pte(page, vma->vm_page_prot);
77725 if (flags & FAULT_FLAG_WRITE)
77726 @@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77727
77728 /* no need to invalidate: a not-present page won't be cached */
77729 update_mmu_cache(vma, address, entry);
77730 +
77731 +#ifdef CONFIG_PAX_SEGMEXEC
77732 + if (anon)
77733 + pax_mirror_anon_pte(vma, address, page, ptl);
77734 + else
77735 + pax_mirror_file_pte(vma, address, page, ptl);
77736 +#endif
77737 +
77738 } else {
77739 if (charged)
77740 mem_cgroup_uncharge_page(page);
77741 @@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struct mm_struct *mm,
77742 if (flags & FAULT_FLAG_WRITE)
77743 flush_tlb_page(vma, address);
77744 }
77745 +
77746 +#ifdef CONFIG_PAX_SEGMEXEC
77747 + pax_mirror_pte(vma, address, pte, pmd, ptl);
77748 + return 0;
77749 +#endif
77750 +
77751 unlock:
77752 pte_unmap_unlock(pte, ptl);
77753 return 0;
77754 @@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77755 pmd_t *pmd;
77756 pte_t *pte;
77757
77758 +#ifdef CONFIG_PAX_SEGMEXEC
77759 + struct vm_area_struct *vma_m;
77760 +#endif
77761 +
77762 __set_current_state(TASK_RUNNING);
77763
77764 count_vm_event(PGFAULT);
77765 @@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77766 if (unlikely(is_vm_hugetlb_page(vma)))
77767 return hugetlb_fault(mm, vma, address, flags);
77768
77769 +#ifdef CONFIG_PAX_SEGMEXEC
77770 + vma_m = pax_find_mirror_vma(vma);
77771 + if (vma_m) {
77772 + unsigned long address_m;
77773 + pgd_t *pgd_m;
77774 + pud_t *pud_m;
77775 + pmd_t *pmd_m;
77776 +
77777 + if (vma->vm_start > vma_m->vm_start) {
77778 + address_m = address;
77779 + address -= SEGMEXEC_TASK_SIZE;
77780 + vma = vma_m;
77781 + } else
77782 + address_m = address + SEGMEXEC_TASK_SIZE;
77783 +
77784 + pgd_m = pgd_offset(mm, address_m);
77785 + pud_m = pud_alloc(mm, pgd_m, address_m);
77786 + if (!pud_m)
77787 + return VM_FAULT_OOM;
77788 + pmd_m = pmd_alloc(mm, pud_m, address_m);
77789 + if (!pmd_m)
77790 + return VM_FAULT_OOM;
77791 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
77792 + return VM_FAULT_OOM;
77793 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
77794 + }
77795 +#endif
77796 +
77797 pgd = pgd_offset(mm, address);
77798 pud = pud_alloc(mm, pgd, address);
77799 if (!pud)
77800 @@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
77801 gate_vma.vm_start = FIXADDR_USER_START;
77802 gate_vma.vm_end = FIXADDR_USER_END;
77803 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
77804 - gate_vma.vm_page_prot = __P101;
77805 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
77806 /*
77807 * Make sure the vDSO gets into every core dump.
77808 * Dumping its contents makes post-mortem fully interpretable later
77809 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
77810 index 3c6e3e2..b1ddbb8 100644
77811 --- a/mm/mempolicy.c
77812 +++ b/mm/mempolicy.c
77813 @@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
77814 struct vm_area_struct *next;
77815 int err;
77816
77817 +#ifdef CONFIG_PAX_SEGMEXEC
77818 + struct vm_area_struct *vma_m;
77819 +#endif
77820 +
77821 err = 0;
77822 for (; vma && vma->vm_start < end; vma = next) {
77823 next = vma->vm_next;
77824 @@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
77825 err = policy_vma(vma, new);
77826 if (err)
77827 break;
77828 +
77829 +#ifdef CONFIG_PAX_SEGMEXEC
77830 + vma_m = pax_find_mirror_vma(vma);
77831 + if (vma_m) {
77832 + err = policy_vma(vma_m, new);
77833 + if (err)
77834 + break;
77835 + }
77836 +#endif
77837 +
77838 }
77839 return err;
77840 }
77841 @@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start, unsigned long len,
77842
77843 if (end < start)
77844 return -EINVAL;
77845 +
77846 +#ifdef CONFIG_PAX_SEGMEXEC
77847 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
77848 + if (end > SEGMEXEC_TASK_SIZE)
77849 + return -EINVAL;
77850 + } else
77851 +#endif
77852 +
77853 + if (end > TASK_SIZE)
77854 + return -EINVAL;
77855 +
77856 if (end == start)
77857 return 0;
77858
77859 @@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
77860 if (!mm)
77861 return -EINVAL;
77862
77863 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77864 + if (mm != current->mm &&
77865 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
77866 + err = -EPERM;
77867 + goto out;
77868 + }
77869 +#endif
77870 +
77871 /*
77872 * Check if this process has the right to modify the specified
77873 * process. The right exists if the process has administrative
77874 @@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
77875 rcu_read_lock();
77876 tcred = __task_cred(task);
77877 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
77878 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
77879 - !capable(CAP_SYS_NICE)) {
77880 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
77881 rcu_read_unlock();
77882 err = -EPERM;
77883 goto out;
77884 @@ -2367,6 +2399,12 @@ static inline void check_huge_range(struct vm_area_struct *vma,
77885 }
77886 #endif
77887
77888 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77889 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
77890 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
77891 + _mm->pax_flags & MF_PAX_SEGMEXEC))
77892 +#endif
77893 +
77894 /*
77895 * Display pages allocated per node and memory policy via /proc.
77896 */
77897 @@ -2381,6 +2419,13 @@ int show_numa_map(struct seq_file *m, void *v)
77898 int n;
77899 char buffer[50];
77900
77901 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77902 + if (current->exec_id != m->exec_id) {
77903 + gr_log_badprocpid("numa_maps");
77904 + return 0;
77905 + }
77906 +#endif
77907 +
77908 if (!mm)
77909 return 0;
77910
77911 @@ -2392,11 +2437,15 @@ int show_numa_map(struct seq_file *m, void *v)
77912 mpol_to_str(buffer, sizeof(buffer), pol, 0);
77913 mpol_cond_put(pol);
77914
77915 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77916 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
77917 +#else
77918 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
77919 +#endif
77920
77921 if (file) {
77922 seq_printf(m, " file=");
77923 - seq_path(m, &file->f_path, "\n\t= ");
77924 + seq_path(m, &file->f_path, "\n\t\\= ");
77925 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
77926 seq_printf(m, " heap");
77927 } else if (vma->vm_start <= mm->start_stack &&
77928 diff --git a/mm/migrate.c b/mm/migrate.c
77929 index aaca868..2ebecdc 100644
77930 --- a/mm/migrate.c
77931 +++ b/mm/migrate.c
77932 @@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
77933 unsigned long chunk_start;
77934 int err;
77935
77936 + pax_track_stack();
77937 +
77938 task_nodes = cpuset_mems_allowed(task);
77939
77940 err = -ENOMEM;
77941 @@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
77942 if (!mm)
77943 return -EINVAL;
77944
77945 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77946 + if (mm != current->mm &&
77947 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
77948 + err = -EPERM;
77949 + goto out;
77950 + }
77951 +#endif
77952 +
77953 /*
77954 * Check if this process has the right to modify the specified
77955 * process. The right exists if the process has administrative
77956 @@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
77957 rcu_read_lock();
77958 tcred = __task_cred(task);
77959 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
77960 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
77961 - !capable(CAP_SYS_NICE)) {
77962 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
77963 rcu_read_unlock();
77964 err = -EPERM;
77965 goto out;
77966 diff --git a/mm/mlock.c b/mm/mlock.c
77967 index 2d846cf..98134d2 100644
77968 --- a/mm/mlock.c
77969 +++ b/mm/mlock.c
77970 @@ -13,6 +13,7 @@
77971 #include <linux/pagemap.h>
77972 #include <linux/mempolicy.h>
77973 #include <linux/syscalls.h>
77974 +#include <linux/security.h>
77975 #include <linux/sched.h>
77976 #include <linux/module.h>
77977 #include <linux/rmap.h>
77978 @@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
77979 }
77980 }
77981
77982 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
77983 -{
77984 - return (vma->vm_flags & VM_GROWSDOWN) &&
77985 - (vma->vm_start == addr) &&
77986 - !vma_stack_continue(vma->vm_prev, addr);
77987 -}
77988 -
77989 /**
77990 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
77991 * @vma: target vma
77992 @@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
77993 if (vma->vm_flags & VM_WRITE)
77994 gup_flags |= FOLL_WRITE;
77995
77996 - /* We don't try to access the guard page of a stack vma */
77997 - if (stack_guard_page(vma, start)) {
77998 - addr += PAGE_SIZE;
77999 - nr_pages--;
78000 - }
78001 -
78002 while (nr_pages > 0) {
78003 int i;
78004
78005 @@ -440,7 +428,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
78006 {
78007 unsigned long nstart, end, tmp;
78008 struct vm_area_struct * vma, * prev;
78009 - int error;
78010 + int error = -EINVAL;
78011
78012 len = PAGE_ALIGN(len);
78013 end = start + len;
78014 @@ -448,6 +436,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
78015 return -EINVAL;
78016 if (end == start)
78017 return 0;
78018 + if (end > TASK_SIZE)
78019 + return -EINVAL;
78020 +
78021 vma = find_vma_prev(current->mm, start, &prev);
78022 if (!vma || vma->vm_start > start)
78023 return -ENOMEM;
78024 @@ -458,6 +449,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
78025 for (nstart = start ; ; ) {
78026 unsigned int newflags;
78027
78028 +#ifdef CONFIG_PAX_SEGMEXEC
78029 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
78030 + break;
78031 +#endif
78032 +
78033 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
78034
78035 newflags = vma->vm_flags | VM_LOCKED;
78036 @@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
78037 lock_limit >>= PAGE_SHIFT;
78038
78039 /* check against resource limits */
78040 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
78041 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
78042 error = do_mlock(start, len, 1);
78043 up_write(&current->mm->mmap_sem);
78044 @@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
78045 static int do_mlockall(int flags)
78046 {
78047 struct vm_area_struct * vma, * prev = NULL;
78048 - unsigned int def_flags = 0;
78049
78050 if (flags & MCL_FUTURE)
78051 - def_flags = VM_LOCKED;
78052 - current->mm->def_flags = def_flags;
78053 + current->mm->def_flags |= VM_LOCKED;
78054 + else
78055 + current->mm->def_flags &= ~VM_LOCKED;
78056 if (flags == MCL_FUTURE)
78057 goto out;
78058
78059 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
78060 - unsigned int newflags;
78061 + unsigned long newflags;
78062
78063 +#ifdef CONFIG_PAX_SEGMEXEC
78064 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
78065 + break;
78066 +#endif
78067 +
78068 + BUG_ON(vma->vm_end > TASK_SIZE);
78069 newflags = vma->vm_flags | VM_LOCKED;
78070 if (!(flags & MCL_CURRENT))
78071 newflags &= ~VM_LOCKED;
78072 @@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
78073 lock_limit >>= PAGE_SHIFT;
78074
78075 ret = -ENOMEM;
78076 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
78077 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
78078 capable(CAP_IPC_LOCK))
78079 ret = do_mlockall(flags);
78080 diff --git a/mm/mmap.c b/mm/mmap.c
78081 index 4b80cbf..cd3731c 100644
78082 --- a/mm/mmap.c
78083 +++ b/mm/mmap.c
78084 @@ -45,6 +45,16 @@
78085 #define arch_rebalance_pgtables(addr, len) (addr)
78086 #endif
78087
78088 +static inline void verify_mm_writelocked(struct mm_struct *mm)
78089 +{
78090 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
78091 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
78092 + up_read(&mm->mmap_sem);
78093 + BUG();
78094 + }
78095 +#endif
78096 +}
78097 +
78098 static void unmap_region(struct mm_struct *mm,
78099 struct vm_area_struct *vma, struct vm_area_struct *prev,
78100 unsigned long start, unsigned long end);
78101 @@ -70,22 +80,32 @@ static void unmap_region(struct mm_struct *mm,
78102 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
78103 *
78104 */
78105 -pgprot_t protection_map[16] = {
78106 +pgprot_t protection_map[16] __read_only = {
78107 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
78108 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
78109 };
78110
78111 pgprot_t vm_get_page_prot(unsigned long vm_flags)
78112 {
78113 - return __pgprot(pgprot_val(protection_map[vm_flags &
78114 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
78115 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
78116 pgprot_val(arch_vm_get_page_prot(vm_flags)));
78117 +
78118 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
78119 + if (!nx_enabled &&
78120 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
78121 + (vm_flags & (VM_READ | VM_WRITE)))
78122 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
78123 +#endif
78124 +
78125 + return prot;
78126 }
78127 EXPORT_SYMBOL(vm_get_page_prot);
78128
78129 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
78130 int sysctl_overcommit_ratio = 50; /* default is 50% */
78131 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
78132 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
78133 struct percpu_counter vm_committed_as;
78134
78135 /*
78136 @@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
78137 struct vm_area_struct *next = vma->vm_next;
78138
78139 might_sleep();
78140 + BUG_ON(vma->vm_mirror);
78141 if (vma->vm_ops && vma->vm_ops->close)
78142 vma->vm_ops->close(vma);
78143 if (vma->vm_file) {
78144 @@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
78145 * not page aligned -Ram Gupta
78146 */
78147 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
78148 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
78149 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
78150 (mm->end_data - mm->start_data) > rlim)
78151 goto out;
78152 @@ -704,6 +726,12 @@ static int
78153 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
78154 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
78155 {
78156 +
78157 +#ifdef CONFIG_PAX_SEGMEXEC
78158 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
78159 + return 0;
78160 +#endif
78161 +
78162 if (is_mergeable_vma(vma, file, vm_flags) &&
78163 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
78164 if (vma->vm_pgoff == vm_pgoff)
78165 @@ -723,6 +751,12 @@ static int
78166 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
78167 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
78168 {
78169 +
78170 +#ifdef CONFIG_PAX_SEGMEXEC
78171 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
78172 + return 0;
78173 +#endif
78174 +
78175 if (is_mergeable_vma(vma, file, vm_flags) &&
78176 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
78177 pgoff_t vm_pglen;
78178 @@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
78179 struct vm_area_struct *vma_merge(struct mm_struct *mm,
78180 struct vm_area_struct *prev, unsigned long addr,
78181 unsigned long end, unsigned long vm_flags,
78182 - struct anon_vma *anon_vma, struct file *file,
78183 + struct anon_vma *anon_vma, struct file *file,
78184 pgoff_t pgoff, struct mempolicy *policy)
78185 {
78186 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
78187 struct vm_area_struct *area, *next;
78188
78189 +#ifdef CONFIG_PAX_SEGMEXEC
78190 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
78191 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
78192 +
78193 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
78194 +#endif
78195 +
78196 /*
78197 * We later require that vma->vm_flags == vm_flags,
78198 * so this tests vma->vm_flags & VM_SPECIAL, too.
78199 @@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
78200 if (next && next->vm_end == end) /* cases 6, 7, 8 */
78201 next = next->vm_next;
78202
78203 +#ifdef CONFIG_PAX_SEGMEXEC
78204 + if (prev)
78205 + prev_m = pax_find_mirror_vma(prev);
78206 + if (area)
78207 + area_m = pax_find_mirror_vma(area);
78208 + if (next)
78209 + next_m = pax_find_mirror_vma(next);
78210 +#endif
78211 +
78212 /*
78213 * Can it merge with the predecessor?
78214 */
78215 @@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
78216 /* cases 1, 6 */
78217 vma_adjust(prev, prev->vm_start,
78218 next->vm_end, prev->vm_pgoff, NULL);
78219 - } else /* cases 2, 5, 7 */
78220 +
78221 +#ifdef CONFIG_PAX_SEGMEXEC
78222 + if (prev_m)
78223 + vma_adjust(prev_m, prev_m->vm_start,
78224 + next_m->vm_end, prev_m->vm_pgoff, NULL);
78225 +#endif
78226 +
78227 + } else { /* cases 2, 5, 7 */
78228 vma_adjust(prev, prev->vm_start,
78229 end, prev->vm_pgoff, NULL);
78230 +
78231 +#ifdef CONFIG_PAX_SEGMEXEC
78232 + if (prev_m)
78233 + vma_adjust(prev_m, prev_m->vm_start,
78234 + end_m, prev_m->vm_pgoff, NULL);
78235 +#endif
78236 +
78237 + }
78238 return prev;
78239 }
78240
78241 @@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
78242 mpol_equal(policy, vma_policy(next)) &&
78243 can_vma_merge_before(next, vm_flags,
78244 anon_vma, file, pgoff+pglen)) {
78245 - if (prev && addr < prev->vm_end) /* case 4 */
78246 + if (prev && addr < prev->vm_end) { /* case 4 */
78247 vma_adjust(prev, prev->vm_start,
78248 addr, prev->vm_pgoff, NULL);
78249 - else /* cases 3, 8 */
78250 +
78251 +#ifdef CONFIG_PAX_SEGMEXEC
78252 + if (prev_m)
78253 + vma_adjust(prev_m, prev_m->vm_start,
78254 + addr_m, prev_m->vm_pgoff, NULL);
78255 +#endif
78256 +
78257 + } else { /* cases 3, 8 */
78258 vma_adjust(area, addr, next->vm_end,
78259 next->vm_pgoff - pglen, NULL);
78260 +
78261 +#ifdef CONFIG_PAX_SEGMEXEC
78262 + if (area_m)
78263 + vma_adjust(area_m, addr_m, next_m->vm_end,
78264 + next_m->vm_pgoff - pglen, NULL);
78265 +#endif
78266 +
78267 + }
78268 return area;
78269 }
78270
78271 @@ -898,14 +978,11 @@ none:
78272 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
78273 struct file *file, long pages)
78274 {
78275 - const unsigned long stack_flags
78276 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
78277 -
78278 if (file) {
78279 mm->shared_vm += pages;
78280 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
78281 mm->exec_vm += pages;
78282 - } else if (flags & stack_flags)
78283 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
78284 mm->stack_vm += pages;
78285 if (flags & (VM_RESERVED|VM_IO))
78286 mm->reserved_vm += pages;
78287 @@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78288 * (the exception is when the underlying filesystem is noexec
78289 * mounted, in which case we dont add PROT_EXEC.)
78290 */
78291 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
78292 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
78293 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
78294 prot |= PROT_EXEC;
78295
78296 @@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78297 /* Obtain the address to map to. we verify (or select) it and ensure
78298 * that it represents a valid section of the address space.
78299 */
78300 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
78301 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
78302 if (addr & ~PAGE_MASK)
78303 return addr;
78304
78305 @@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78306 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
78307 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
78308
78309 +#ifdef CONFIG_PAX_MPROTECT
78310 + if (mm->pax_flags & MF_PAX_MPROTECT) {
78311 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
78312 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
78313 + gr_log_rwxmmap(file);
78314 +
78315 +#ifdef CONFIG_PAX_EMUPLT
78316 + vm_flags &= ~VM_EXEC;
78317 +#else
78318 + return -EPERM;
78319 +#endif
78320 +
78321 + }
78322 +
78323 + if (!(vm_flags & VM_EXEC))
78324 + vm_flags &= ~VM_MAYEXEC;
78325 +#else
78326 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
78327 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
78328 +#endif
78329 + else
78330 + vm_flags &= ~VM_MAYWRITE;
78331 + }
78332 +#endif
78333 +
78334 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
78335 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
78336 + vm_flags &= ~VM_PAGEEXEC;
78337 +#endif
78338 +
78339 if (flags & MAP_LOCKED)
78340 if (!can_do_mlock())
78341 return -EPERM;
78342 @@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78343 locked += mm->locked_vm;
78344 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
78345 lock_limit >>= PAGE_SHIFT;
78346 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
78347 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
78348 return -EAGAIN;
78349 }
78350 @@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78351 if (error)
78352 return error;
78353
78354 + if (!gr_acl_handle_mmap(file, prot))
78355 + return -EACCES;
78356 +
78357 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
78358 }
78359 EXPORT_SYMBOL(do_mmap_pgoff);
78360 @@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
78361 */
78362 int vma_wants_writenotify(struct vm_area_struct *vma)
78363 {
78364 - unsigned int vm_flags = vma->vm_flags;
78365 + unsigned long vm_flags = vma->vm_flags;
78366
78367 /* If it was private or non-writable, the write bit is already clear */
78368 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
78369 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
78370 return 0;
78371
78372 /* The backer wishes to know when pages are first written to? */
78373 @@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
78374 unsigned long charged = 0;
78375 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
78376
78377 +#ifdef CONFIG_PAX_SEGMEXEC
78378 + struct vm_area_struct *vma_m = NULL;
78379 +#endif
78380 +
78381 + /*
78382 + * mm->mmap_sem is required to protect against another thread
78383 + * changing the mappings in case we sleep.
78384 + */
78385 + verify_mm_writelocked(mm);
78386 +
78387 /* Clear old maps */
78388 error = -ENOMEM;
78389 -munmap_back:
78390 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
78391 if (vma && vma->vm_start < addr + len) {
78392 if (do_munmap(mm, addr, len))
78393 return -ENOMEM;
78394 - goto munmap_back;
78395 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
78396 + BUG_ON(vma && vma->vm_start < addr + len);
78397 }
78398
78399 /* Check against address space limit. */
78400 @@ -1173,6 +1294,16 @@ munmap_back:
78401 goto unacct_error;
78402 }
78403
78404 +#ifdef CONFIG_PAX_SEGMEXEC
78405 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
78406 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
78407 + if (!vma_m) {
78408 + error = -ENOMEM;
78409 + goto free_vma;
78410 + }
78411 + }
78412 +#endif
78413 +
78414 vma->vm_mm = mm;
78415 vma->vm_start = addr;
78416 vma->vm_end = addr + len;
78417 @@ -1195,6 +1326,19 @@ munmap_back:
78418 error = file->f_op->mmap(file, vma);
78419 if (error)
78420 goto unmap_and_free_vma;
78421 +
78422 +#ifdef CONFIG_PAX_SEGMEXEC
78423 + if (vma_m && (vm_flags & VM_EXECUTABLE))
78424 + added_exe_file_vma(mm);
78425 +#endif
78426 +
78427 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
78428 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
78429 + vma->vm_flags |= VM_PAGEEXEC;
78430 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
78431 + }
78432 +#endif
78433 +
78434 if (vm_flags & VM_EXECUTABLE)
78435 added_exe_file_vma(mm);
78436
78437 @@ -1218,6 +1362,11 @@ munmap_back:
78438 vma_link(mm, vma, prev, rb_link, rb_parent);
78439 file = vma->vm_file;
78440
78441 +#ifdef CONFIG_PAX_SEGMEXEC
78442 + if (vma_m)
78443 + pax_mirror_vma(vma_m, vma);
78444 +#endif
78445 +
78446 /* Once vma denies write, undo our temporary denial count */
78447 if (correct_wcount)
78448 atomic_inc(&inode->i_writecount);
78449 @@ -1226,6 +1375,7 @@ out:
78450
78451 mm->total_vm += len >> PAGE_SHIFT;
78452 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
78453 + track_exec_limit(mm, addr, addr + len, vm_flags);
78454 if (vm_flags & VM_LOCKED) {
78455 /*
78456 * makes pages present; downgrades, drops, reacquires mmap_sem
78457 @@ -1248,6 +1398,12 @@ unmap_and_free_vma:
78458 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
78459 charged = 0;
78460 free_vma:
78461 +
78462 +#ifdef CONFIG_PAX_SEGMEXEC
78463 + if (vma_m)
78464 + kmem_cache_free(vm_area_cachep, vma_m);
78465 +#endif
78466 +
78467 kmem_cache_free(vm_area_cachep, vma);
78468 unacct_error:
78469 if (charged)
78470 @@ -1255,6 +1411,44 @@ unacct_error:
78471 return error;
78472 }
78473
78474 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
78475 +{
78476 + if (!vma) {
78477 +#ifdef CONFIG_STACK_GROWSUP
78478 + if (addr > sysctl_heap_stack_gap)
78479 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
78480 + else
78481 + vma = find_vma(current->mm, 0);
78482 + if (vma && (vma->vm_flags & VM_GROWSUP))
78483 + return false;
78484 +#endif
78485 + return true;
78486 + }
78487 +
78488 + if (addr + len > vma->vm_start)
78489 + return false;
78490 +
78491 + if (vma->vm_flags & VM_GROWSDOWN)
78492 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
78493 +#ifdef CONFIG_STACK_GROWSUP
78494 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
78495 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
78496 +#endif
78497 +
78498 + return true;
78499 +}
78500 +
78501 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
78502 +{
78503 + if (vma->vm_start < len)
78504 + return -ENOMEM;
78505 + if (!(vma->vm_flags & VM_GROWSDOWN))
78506 + return vma->vm_start - len;
78507 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
78508 + return vma->vm_start - len - sysctl_heap_stack_gap;
78509 + return -ENOMEM;
78510 +}
78511 +
78512 /* Get an address range which is currently unmapped.
78513 * For shmat() with addr=0.
78514 *
78515 @@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
78516 if (flags & MAP_FIXED)
78517 return addr;
78518
78519 +#ifdef CONFIG_PAX_RANDMMAP
78520 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
78521 +#endif
78522 +
78523 if (addr) {
78524 addr = PAGE_ALIGN(addr);
78525 - vma = find_vma(mm, addr);
78526 - if (TASK_SIZE - len >= addr &&
78527 - (!vma || addr + len <= vma->vm_start))
78528 - return addr;
78529 + if (TASK_SIZE - len >= addr) {
78530 + vma = find_vma(mm, addr);
78531 + if (check_heap_stack_gap(vma, addr, len))
78532 + return addr;
78533 + }
78534 }
78535 if (len > mm->cached_hole_size) {
78536 - start_addr = addr = mm->free_area_cache;
78537 + start_addr = addr = mm->free_area_cache;
78538 } else {
78539 - start_addr = addr = TASK_UNMAPPED_BASE;
78540 - mm->cached_hole_size = 0;
78541 + start_addr = addr = mm->mmap_base;
78542 + mm->cached_hole_size = 0;
78543 }
78544
78545 full_search:
78546 @@ -1303,34 +1502,40 @@ full_search:
78547 * Start a new search - just in case we missed
78548 * some holes.
78549 */
78550 - if (start_addr != TASK_UNMAPPED_BASE) {
78551 - addr = TASK_UNMAPPED_BASE;
78552 - start_addr = addr;
78553 + if (start_addr != mm->mmap_base) {
78554 + start_addr = addr = mm->mmap_base;
78555 mm->cached_hole_size = 0;
78556 goto full_search;
78557 }
78558 return -ENOMEM;
78559 }
78560 - if (!vma || addr + len <= vma->vm_start) {
78561 - /*
78562 - * Remember the place where we stopped the search:
78563 - */
78564 - mm->free_area_cache = addr + len;
78565 - return addr;
78566 - }
78567 + if (check_heap_stack_gap(vma, addr, len))
78568 + break;
78569 if (addr + mm->cached_hole_size < vma->vm_start)
78570 mm->cached_hole_size = vma->vm_start - addr;
78571 addr = vma->vm_end;
78572 }
78573 +
78574 + /*
78575 + * Remember the place where we stopped the search:
78576 + */
78577 + mm->free_area_cache = addr + len;
78578 + return addr;
78579 }
78580 #endif
78581
78582 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
78583 {
78584 +
78585 +#ifdef CONFIG_PAX_SEGMEXEC
78586 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
78587 + return;
78588 +#endif
78589 +
78590 /*
78591 * Is this a new hole at the lowest possible address?
78592 */
78593 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
78594 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
78595 mm->free_area_cache = addr;
78596 mm->cached_hole_size = ~0UL;
78597 }
78598 @@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78599 {
78600 struct vm_area_struct *vma;
78601 struct mm_struct *mm = current->mm;
78602 - unsigned long addr = addr0;
78603 + unsigned long base = mm->mmap_base, addr = addr0;
78604
78605 /* requested length too big for entire address space */
78606 if (len > TASK_SIZE)
78607 @@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78608 if (flags & MAP_FIXED)
78609 return addr;
78610
78611 +#ifdef CONFIG_PAX_RANDMMAP
78612 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
78613 +#endif
78614 +
78615 /* requesting a specific address */
78616 if (addr) {
78617 addr = PAGE_ALIGN(addr);
78618 - vma = find_vma(mm, addr);
78619 - if (TASK_SIZE - len >= addr &&
78620 - (!vma || addr + len <= vma->vm_start))
78621 - return addr;
78622 + if (TASK_SIZE - len >= addr) {
78623 + vma = find_vma(mm, addr);
78624 + if (check_heap_stack_gap(vma, addr, len))
78625 + return addr;
78626 + }
78627 }
78628
78629 /* check if free_area_cache is useful for us */
78630 @@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78631 /* make sure it can fit in the remaining address space */
78632 if (addr > len) {
78633 vma = find_vma(mm, addr-len);
78634 - if (!vma || addr <= vma->vm_start)
78635 + if (check_heap_stack_gap(vma, addr - len, len))
78636 /* remember the address as a hint for next time */
78637 return (mm->free_area_cache = addr-len);
78638 }
78639 @@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78640 * return with success:
78641 */
78642 vma = find_vma(mm, addr);
78643 - if (!vma || addr+len <= vma->vm_start)
78644 + if (check_heap_stack_gap(vma, addr, len))
78645 /* remember the address as a hint for next time */
78646 return (mm->free_area_cache = addr);
78647
78648 @@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78649 mm->cached_hole_size = vma->vm_start - addr;
78650
78651 /* try just below the current vma->vm_start */
78652 - addr = vma->vm_start-len;
78653 - } while (len < vma->vm_start);
78654 + addr = skip_heap_stack_gap(vma, len);
78655 + } while (!IS_ERR_VALUE(addr));
78656
78657 bottomup:
78658 /*
78659 @@ -1414,13 +1624,21 @@ bottomup:
78660 * can happen with large stack limits and large mmap()
78661 * allocations.
78662 */
78663 + mm->mmap_base = TASK_UNMAPPED_BASE;
78664 +
78665 +#ifdef CONFIG_PAX_RANDMMAP
78666 + if (mm->pax_flags & MF_PAX_RANDMMAP)
78667 + mm->mmap_base += mm->delta_mmap;
78668 +#endif
78669 +
78670 + mm->free_area_cache = mm->mmap_base;
78671 mm->cached_hole_size = ~0UL;
78672 - mm->free_area_cache = TASK_UNMAPPED_BASE;
78673 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
78674 /*
78675 * Restore the topdown base:
78676 */
78677 - mm->free_area_cache = mm->mmap_base;
78678 + mm->mmap_base = base;
78679 + mm->free_area_cache = base;
78680 mm->cached_hole_size = ~0UL;
78681
78682 return addr;
78683 @@ -1429,6 +1647,12 @@ bottomup:
78684
78685 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
78686 {
78687 +
78688 +#ifdef CONFIG_PAX_SEGMEXEC
78689 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
78690 + return;
78691 +#endif
78692 +
78693 /*
78694 * Is this a new hole at the highest possible address?
78695 */
78696 @@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
78697 mm->free_area_cache = addr;
78698
78699 /* dont allow allocations above current base */
78700 - if (mm->free_area_cache > mm->mmap_base)
78701 + if (mm->free_area_cache > mm->mmap_base) {
78702 mm->free_area_cache = mm->mmap_base;
78703 + mm->cached_hole_size = ~0UL;
78704 + }
78705 }
78706
78707 unsigned long
78708 @@ -1510,40 +1736,41 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
78709
78710 EXPORT_SYMBOL(find_vma);
78711
78712 -/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
78713 +/*
78714 + * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
78715 + * Note: pprev is set to NULL when return value is NULL.
78716 + */
78717 struct vm_area_struct *
78718 find_vma_prev(struct mm_struct *mm, unsigned long addr,
78719 struct vm_area_struct **pprev)
78720 {
78721 - struct vm_area_struct *vma = NULL, *prev = NULL;
78722 - struct rb_node *rb_node;
78723 - if (!mm)
78724 - goto out;
78725 + struct vm_area_struct *vma;
78726
78727 - /* Guard against addr being lower than the first VMA */
78728 - vma = mm->mmap;
78729 + vma = find_vma(mm, addr);
78730 + *pprev = vma ? vma->vm_prev : NULL;
78731 + return vma;
78732 +}
78733
78734 - /* Go through the RB tree quickly. */
78735 - rb_node = mm->mm_rb.rb_node;
78736 +#ifdef CONFIG_PAX_SEGMEXEC
78737 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
78738 +{
78739 + struct vm_area_struct *vma_m;
78740
78741 - while (rb_node) {
78742 - struct vm_area_struct *vma_tmp;
78743 - vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
78744 -
78745 - if (addr < vma_tmp->vm_end) {
78746 - rb_node = rb_node->rb_left;
78747 - } else {
78748 - prev = vma_tmp;
78749 - if (!prev->vm_next || (addr < prev->vm_next->vm_end))
78750 - break;
78751 - rb_node = rb_node->rb_right;
78752 - }
78753 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
78754 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
78755 + BUG_ON(vma->vm_mirror);
78756 + return NULL;
78757 }
78758 -
78759 -out:
78760 - *pprev = prev;
78761 - return prev ? prev->vm_next : vma;
78762 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
78763 + vma_m = vma->vm_mirror;
78764 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
78765 + BUG_ON(vma->vm_file != vma_m->vm_file);
78766 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
78767 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
78768 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
78769 + return vma_m;
78770 }
78771 +#endif
78772
78773 /*
78774 * Verify that the stack growth is acceptable and
78775 @@ -1561,6 +1788,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
78776 return -ENOMEM;
78777
78778 /* Stack limit test */
78779 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
78780 if (size > rlim[RLIMIT_STACK].rlim_cur)
78781 return -ENOMEM;
78782
78783 @@ -1570,6 +1798,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
78784 unsigned long limit;
78785 locked = mm->locked_vm + grow;
78786 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
78787 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
78788 if (locked > limit && !capable(CAP_IPC_LOCK))
78789 return -ENOMEM;
78790 }
78791 @@ -1600,37 +1829,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
78792 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
78793 * vma is the last one with address > vma->vm_end. Have to extend vma.
78794 */
78795 +#ifndef CONFIG_IA64
78796 +static
78797 +#endif
78798 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
78799 {
78800 int error;
78801 + bool locknext;
78802
78803 if (!(vma->vm_flags & VM_GROWSUP))
78804 return -EFAULT;
78805
78806 + /* Also guard against wrapping around to address 0. */
78807 + if (address < PAGE_ALIGN(address+1))
78808 + address = PAGE_ALIGN(address+1);
78809 + else
78810 + return -ENOMEM;
78811 +
78812 /*
78813 * We must make sure the anon_vma is allocated
78814 * so that the anon_vma locking is not a noop.
78815 */
78816 if (unlikely(anon_vma_prepare(vma)))
78817 return -ENOMEM;
78818 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
78819 + if (locknext && anon_vma_prepare(vma->vm_next))
78820 + return -ENOMEM;
78821 anon_vma_lock(vma);
78822 + if (locknext)
78823 + anon_vma_lock(vma->vm_next);
78824
78825 /*
78826 * vma->vm_start/vm_end cannot change under us because the caller
78827 * is required to hold the mmap_sem in read mode. We need the
78828 - * anon_vma lock to serialize against concurrent expand_stacks.
78829 - * Also guard against wrapping around to address 0.
78830 + * anon_vma locks to serialize against concurrent expand_stacks
78831 + * and expand_upwards.
78832 */
78833 - if (address < PAGE_ALIGN(address+4))
78834 - address = PAGE_ALIGN(address+4);
78835 - else {
78836 - anon_vma_unlock(vma);
78837 - return -ENOMEM;
78838 - }
78839 error = 0;
78840
78841 /* Somebody else might have raced and expanded it already */
78842 - if (address > vma->vm_end) {
78843 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
78844 + error = -ENOMEM;
78845 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
78846 unsigned long size, grow;
78847
78848 size = address - vma->vm_start;
78849 @@ -1643,6 +1883,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
78850 vma->vm_end = address;
78851 }
78852 }
78853 + if (locknext)
78854 + anon_vma_unlock(vma->vm_next);
78855 anon_vma_unlock(vma);
78856 return error;
78857 }
78858 @@ -1655,6 +1897,8 @@ static int expand_downwards(struct vm_area_struct *vma,
78859 unsigned long address)
78860 {
78861 int error;
78862 + bool lockprev = false;
78863 + struct vm_area_struct *prev;
78864
78865 /*
78866 * We must make sure the anon_vma is allocated
78867 @@ -1668,6 +1912,15 @@ static int expand_downwards(struct vm_area_struct *vma,
78868 if (error)
78869 return error;
78870
78871 + prev = vma->vm_prev;
78872 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
78873 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
78874 +#endif
78875 + if (lockprev && anon_vma_prepare(prev))
78876 + return -ENOMEM;
78877 + if (lockprev)
78878 + anon_vma_lock(prev);
78879 +
78880 anon_vma_lock(vma);
78881
78882 /*
78883 @@ -1677,9 +1930,17 @@ static int expand_downwards(struct vm_area_struct *vma,
78884 */
78885
78886 /* Somebody else might have raced and expanded it already */
78887 - if (address < vma->vm_start) {
78888 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
78889 + error = -ENOMEM;
78890 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
78891 unsigned long size, grow;
78892
78893 +#ifdef CONFIG_PAX_SEGMEXEC
78894 + struct vm_area_struct *vma_m;
78895 +
78896 + vma_m = pax_find_mirror_vma(vma);
78897 +#endif
78898 +
78899 size = vma->vm_end - address;
78900 grow = (vma->vm_start - address) >> PAGE_SHIFT;
78901
78902 @@ -1689,10 +1950,22 @@ static int expand_downwards(struct vm_area_struct *vma,
78903 if (!error) {
78904 vma->vm_start = address;
78905 vma->vm_pgoff -= grow;
78906 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
78907 +
78908 +#ifdef CONFIG_PAX_SEGMEXEC
78909 + if (vma_m) {
78910 + vma_m->vm_start -= grow << PAGE_SHIFT;
78911 + vma_m->vm_pgoff -= grow;
78912 + }
78913 +#endif
78914 +
78915 +
78916 }
78917 }
78918 }
78919 anon_vma_unlock(vma);
78920 + if (lockprev)
78921 + anon_vma_unlock(prev);
78922 return error;
78923 }
78924
78925 @@ -1768,6 +2041,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
78926 do {
78927 long nrpages = vma_pages(vma);
78928
78929 +#ifdef CONFIG_PAX_SEGMEXEC
78930 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
78931 + vma = remove_vma(vma);
78932 + continue;
78933 + }
78934 +#endif
78935 +
78936 mm->total_vm -= nrpages;
78937 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
78938 vma = remove_vma(vma);
78939 @@ -1813,6 +2093,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
78940 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
78941 vma->vm_prev = NULL;
78942 do {
78943 +
78944 +#ifdef CONFIG_PAX_SEGMEXEC
78945 + if (vma->vm_mirror) {
78946 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
78947 + vma->vm_mirror->vm_mirror = NULL;
78948 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
78949 + vma->vm_mirror = NULL;
78950 + }
78951 +#endif
78952 +
78953 rb_erase(&vma->vm_rb, &mm->mm_rb);
78954 mm->map_count--;
78955 tail_vma = vma;
78956 @@ -1840,10 +2130,25 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78957 struct mempolicy *pol;
78958 struct vm_area_struct *new;
78959
78960 +#ifdef CONFIG_PAX_SEGMEXEC
78961 + struct vm_area_struct *vma_m, *new_m = NULL;
78962 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
78963 +#endif
78964 +
78965 if (is_vm_hugetlb_page(vma) && (addr &
78966 ~(huge_page_mask(hstate_vma(vma)))))
78967 return -EINVAL;
78968
78969 +#ifdef CONFIG_PAX_SEGMEXEC
78970 + vma_m = pax_find_mirror_vma(vma);
78971 +
78972 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
78973 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
78974 + if (mm->map_count >= sysctl_max_map_count-1)
78975 + return -ENOMEM;
78976 + } else
78977 +#endif
78978 +
78979 if (mm->map_count >= sysctl_max_map_count)
78980 return -ENOMEM;
78981
78982 @@ -1851,6 +2156,16 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78983 if (!new)
78984 return -ENOMEM;
78985
78986 +#ifdef CONFIG_PAX_SEGMEXEC
78987 + if (vma_m) {
78988 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
78989 + if (!new_m) {
78990 + kmem_cache_free(vm_area_cachep, new);
78991 + return -ENOMEM;
78992 + }
78993 + }
78994 +#endif
78995 +
78996 /* most fields are the same, copy all, and then fixup */
78997 *new = *vma;
78998
78999 @@ -1861,8 +2176,29 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
79000 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
79001 }
79002
79003 +#ifdef CONFIG_PAX_SEGMEXEC
79004 + if (vma_m) {
79005 + *new_m = *vma_m;
79006 + new_m->vm_mirror = new;
79007 + new->vm_mirror = new_m;
79008 +
79009 + if (new_below)
79010 + new_m->vm_end = addr_m;
79011 + else {
79012 + new_m->vm_start = addr_m;
79013 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
79014 + }
79015 + }
79016 +#endif
79017 +
79018 pol = mpol_dup(vma_policy(vma));
79019 if (IS_ERR(pol)) {
79020 +
79021 +#ifdef CONFIG_PAX_SEGMEXEC
79022 + if (new_m)
79023 + kmem_cache_free(vm_area_cachep, new_m);
79024 +#endif
79025 +
79026 kmem_cache_free(vm_area_cachep, new);
79027 return PTR_ERR(pol);
79028 }
79029 @@ -1883,6 +2219,28 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
79030 else
79031 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
79032
79033 +#ifdef CONFIG_PAX_SEGMEXEC
79034 + if (vma_m) {
79035 + mpol_get(pol);
79036 + vma_set_policy(new_m, pol);
79037 +
79038 + if (new_m->vm_file) {
79039 + get_file(new_m->vm_file);
79040 + if (vma_m->vm_flags & VM_EXECUTABLE)
79041 + added_exe_file_vma(mm);
79042 + }
79043 +
79044 + if (new_m->vm_ops && new_m->vm_ops->open)
79045 + new_m->vm_ops->open(new_m);
79046 +
79047 + if (new_below)
79048 + vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
79049 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
79050 + else
79051 + vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
79052 + }
79053 +#endif
79054 +
79055 return 0;
79056 }
79057
79058 @@ -1891,11 +2249,30 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
79059 * work. This now handles partial unmappings.
79060 * Jeremy Fitzhardinge <jeremy@goop.org>
79061 */
79062 +#ifdef CONFIG_PAX_SEGMEXEC
79063 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
79064 {
79065 + int ret = __do_munmap(mm, start, len);
79066 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
79067 + return ret;
79068 +
79069 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
79070 +}
79071 +
79072 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
79073 +#else
79074 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
79075 +#endif
79076 +{
79077 unsigned long end;
79078 struct vm_area_struct *vma, *prev, *last;
79079
79080 + /*
79081 + * mm->mmap_sem is required to protect against another thread
79082 + * changing the mappings in case we sleep.
79083 + */
79084 + verify_mm_writelocked(mm);
79085 +
79086 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
79087 return -EINVAL;
79088
79089 @@ -1959,6 +2336,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
79090 /* Fix up all other VM information */
79091 remove_vma_list(mm, vma);
79092
79093 + track_exec_limit(mm, start, end, 0UL);
79094 +
79095 return 0;
79096 }
79097
79098 @@ -1971,22 +2350,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
79099
79100 profile_munmap(addr);
79101
79102 +#ifdef CONFIG_PAX_SEGMEXEC
79103 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
79104 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
79105 + return -EINVAL;
79106 +#endif
79107 +
79108 down_write(&mm->mmap_sem);
79109 ret = do_munmap(mm, addr, len);
79110 up_write(&mm->mmap_sem);
79111 return ret;
79112 }
79113
79114 -static inline void verify_mm_writelocked(struct mm_struct *mm)
79115 -{
79116 -#ifdef CONFIG_DEBUG_VM
79117 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
79118 - WARN_ON(1);
79119 - up_read(&mm->mmap_sem);
79120 - }
79121 -#endif
79122 -}
79123 -
79124 /*
79125 * this is really a simplified "do_mmap". it only handles
79126 * anonymous maps. eventually we may be able to do some
79127 @@ -2000,6 +2375,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79128 struct rb_node ** rb_link, * rb_parent;
79129 pgoff_t pgoff = addr >> PAGE_SHIFT;
79130 int error;
79131 + unsigned long charged;
79132
79133 len = PAGE_ALIGN(len);
79134 if (!len)
79135 @@ -2011,16 +2387,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79136
79137 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
79138
79139 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
79140 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
79141 + flags &= ~VM_EXEC;
79142 +
79143 +#ifdef CONFIG_PAX_MPROTECT
79144 + if (mm->pax_flags & MF_PAX_MPROTECT)
79145 + flags &= ~VM_MAYEXEC;
79146 +#endif
79147 +
79148 + }
79149 +#endif
79150 +
79151 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
79152 if (error & ~PAGE_MASK)
79153 return error;
79154
79155 + charged = len >> PAGE_SHIFT;
79156 +
79157 /*
79158 * mlock MCL_FUTURE?
79159 */
79160 if (mm->def_flags & VM_LOCKED) {
79161 unsigned long locked, lock_limit;
79162 - locked = len >> PAGE_SHIFT;
79163 + locked = charged;
79164 locked += mm->locked_vm;
79165 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
79166 lock_limit >>= PAGE_SHIFT;
79167 @@ -2037,22 +2427,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79168 /*
79169 * Clear old maps. this also does some error checking for us
79170 */
79171 - munmap_back:
79172 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
79173 if (vma && vma->vm_start < addr + len) {
79174 if (do_munmap(mm, addr, len))
79175 return -ENOMEM;
79176 - goto munmap_back;
79177 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
79178 + BUG_ON(vma && vma->vm_start < addr + len);
79179 }
79180
79181 /* Check against address space limits *after* clearing old maps... */
79182 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
79183 + if (!may_expand_vm(mm, charged))
79184 return -ENOMEM;
79185
79186 if (mm->map_count > sysctl_max_map_count)
79187 return -ENOMEM;
79188
79189 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
79190 + if (security_vm_enough_memory(charged))
79191 return -ENOMEM;
79192
79193 /* Can we just expand an old private anonymous mapping? */
79194 @@ -2066,7 +2456,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79195 */
79196 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
79197 if (!vma) {
79198 - vm_unacct_memory(len >> PAGE_SHIFT);
79199 + vm_unacct_memory(charged);
79200 return -ENOMEM;
79201 }
79202
79203 @@ -2078,11 +2468,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79204 vma->vm_page_prot = vm_get_page_prot(flags);
79205 vma_link(mm, vma, prev, rb_link, rb_parent);
79206 out:
79207 - mm->total_vm += len >> PAGE_SHIFT;
79208 + mm->total_vm += charged;
79209 if (flags & VM_LOCKED) {
79210 if (!mlock_vma_pages_range(vma, addr, addr + len))
79211 - mm->locked_vm += (len >> PAGE_SHIFT);
79212 + mm->locked_vm += charged;
79213 }
79214 + track_exec_limit(mm, addr, addr + len, flags);
79215 return addr;
79216 }
79217
79218 @@ -2129,8 +2520,10 @@ void exit_mmap(struct mm_struct *mm)
79219 * Walk the list again, actually closing and freeing it,
79220 * with preemption enabled, without holding any MM locks.
79221 */
79222 - while (vma)
79223 + while (vma) {
79224 + vma->vm_mirror = NULL;
79225 vma = remove_vma(vma);
79226 + }
79227
79228 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
79229 }
79230 @@ -2144,6 +2537,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
79231 struct vm_area_struct * __vma, * prev;
79232 struct rb_node ** rb_link, * rb_parent;
79233
79234 +#ifdef CONFIG_PAX_SEGMEXEC
79235 + struct vm_area_struct *vma_m = NULL;
79236 +#endif
79237 +
79238 /*
79239 * The vm_pgoff of a purely anonymous vma should be irrelevant
79240 * until its first write fault, when page's anon_vma and index
79241 @@ -2166,7 +2563,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
79242 if ((vma->vm_flags & VM_ACCOUNT) &&
79243 security_vm_enough_memory_mm(mm, vma_pages(vma)))
79244 return -ENOMEM;
79245 +
79246 +#ifdef CONFIG_PAX_SEGMEXEC
79247 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
79248 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
79249 + if (!vma_m)
79250 + return -ENOMEM;
79251 + }
79252 +#endif
79253 +
79254 vma_link(mm, vma, prev, rb_link, rb_parent);
79255 +
79256 +#ifdef CONFIG_PAX_SEGMEXEC
79257 + if (vma_m)
79258 + pax_mirror_vma(vma_m, vma);
79259 +#endif
79260 +
79261 return 0;
79262 }
79263
79264 @@ -2184,6 +2596,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
79265 struct rb_node **rb_link, *rb_parent;
79266 struct mempolicy *pol;
79267
79268 + BUG_ON(vma->vm_mirror);
79269 +
79270 /*
79271 * If anonymous vma has not yet been faulted, update new pgoff
79272 * to match new location, to increase its chance of merging.
79273 @@ -2227,6 +2641,35 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
79274 return new_vma;
79275 }
79276
79277 +#ifdef CONFIG_PAX_SEGMEXEC
79278 +void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
79279 +{
79280 + struct vm_area_struct *prev_m;
79281 + struct rb_node **rb_link_m, *rb_parent_m;
79282 + struct mempolicy *pol_m;
79283 +
79284 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
79285 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
79286 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
79287 + *vma_m = *vma;
79288 + pol_m = vma_policy(vma_m);
79289 + mpol_get(pol_m);
79290 + vma_set_policy(vma_m, pol_m);
79291 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
79292 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
79293 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
79294 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
79295 + if (vma_m->vm_file)
79296 + get_file(vma_m->vm_file);
79297 + if (vma_m->vm_ops && vma_m->vm_ops->open)
79298 + vma_m->vm_ops->open(vma_m);
79299 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
79300 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
79301 + vma_m->vm_mirror = vma;
79302 + vma->vm_mirror = vma_m;
79303 +}
79304 +#endif
79305 +
79306 /*
79307 * Return true if the calling process may expand its vm space by the passed
79308 * number of pages
79309 @@ -2237,7 +2680,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
79310 unsigned long lim;
79311
79312 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
79313 -
79314 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
79315 if (cur + npages > lim)
79316 return 0;
79317 return 1;
79318 @@ -2307,6 +2750,22 @@ int install_special_mapping(struct mm_struct *mm,
79319 vma->vm_start = addr;
79320 vma->vm_end = addr + len;
79321
79322 +#ifdef CONFIG_PAX_MPROTECT
79323 + if (mm->pax_flags & MF_PAX_MPROTECT) {
79324 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
79325 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
79326 + return -EPERM;
79327 + if (!(vm_flags & VM_EXEC))
79328 + vm_flags &= ~VM_MAYEXEC;
79329 +#else
79330 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
79331 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
79332 +#endif
79333 + else
79334 + vm_flags &= ~VM_MAYWRITE;
79335 + }
79336 +#endif
79337 +
79338 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
79339 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
79340
79341 diff --git a/mm/mprotect.c b/mm/mprotect.c
79342 index 1737c7e..c7faeb4 100644
79343 --- a/mm/mprotect.c
79344 +++ b/mm/mprotect.c
79345 @@ -24,10 +24,16 @@
79346 #include <linux/mmu_notifier.h>
79347 #include <linux/migrate.h>
79348 #include <linux/perf_event.h>
79349 +
79350 +#ifdef CONFIG_PAX_MPROTECT
79351 +#include <linux/elf.h>
79352 +#endif
79353 +
79354 #include <asm/uaccess.h>
79355 #include <asm/pgtable.h>
79356 #include <asm/cacheflush.h>
79357 #include <asm/tlbflush.h>
79358 +#include <asm/mmu_context.h>
79359
79360 #ifndef pgprot_modify
79361 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
79362 @@ -132,6 +138,48 @@ static void change_protection(struct vm_area_struct *vma,
79363 flush_tlb_range(vma, start, end);
79364 }
79365
79366 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
79367 +/* called while holding the mmap semaphor for writing except stack expansion */
79368 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
79369 +{
79370 + unsigned long oldlimit, newlimit = 0UL;
79371 +
79372 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
79373 + return;
79374 +
79375 + spin_lock(&mm->page_table_lock);
79376 + oldlimit = mm->context.user_cs_limit;
79377 + if ((prot & VM_EXEC) && oldlimit < end)
79378 + /* USER_CS limit moved up */
79379 + newlimit = end;
79380 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
79381 + /* USER_CS limit moved down */
79382 + newlimit = start;
79383 +
79384 + if (newlimit) {
79385 + mm->context.user_cs_limit = newlimit;
79386 +
79387 +#ifdef CONFIG_SMP
79388 + wmb();
79389 + cpus_clear(mm->context.cpu_user_cs_mask);
79390 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
79391 +#endif
79392 +
79393 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
79394 + }
79395 + spin_unlock(&mm->page_table_lock);
79396 + if (newlimit == end) {
79397 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
79398 +
79399 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
79400 + if (is_vm_hugetlb_page(vma))
79401 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
79402 + else
79403 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
79404 + }
79405 +}
79406 +#endif
79407 +
79408 int
79409 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
79410 unsigned long start, unsigned long end, unsigned long newflags)
79411 @@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
79412 int error;
79413 int dirty_accountable = 0;
79414
79415 +#ifdef CONFIG_PAX_SEGMEXEC
79416 + struct vm_area_struct *vma_m = NULL;
79417 + unsigned long start_m, end_m;
79418 +
79419 + start_m = start + SEGMEXEC_TASK_SIZE;
79420 + end_m = end + SEGMEXEC_TASK_SIZE;
79421 +#endif
79422 +
79423 if (newflags == oldflags) {
79424 *pprev = vma;
79425 return 0;
79426 }
79427
79428 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
79429 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
79430 +
79431 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
79432 + return -ENOMEM;
79433 +
79434 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
79435 + return -ENOMEM;
79436 + }
79437 +
79438 /*
79439 * If we make a private mapping writable we increase our commit;
79440 * but (without finer accounting) cannot reduce our commit if we
79441 @@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
79442 }
79443 }
79444
79445 +#ifdef CONFIG_PAX_SEGMEXEC
79446 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
79447 + if (start != vma->vm_start) {
79448 + error = split_vma(mm, vma, start, 1);
79449 + if (error)
79450 + goto fail;
79451 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
79452 + *pprev = (*pprev)->vm_next;
79453 + }
79454 +
79455 + if (end != vma->vm_end) {
79456 + error = split_vma(mm, vma, end, 0);
79457 + if (error)
79458 + goto fail;
79459 + }
79460 +
79461 + if (pax_find_mirror_vma(vma)) {
79462 + error = __do_munmap(mm, start_m, end_m - start_m);
79463 + if (error)
79464 + goto fail;
79465 + } else {
79466 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
79467 + if (!vma_m) {
79468 + error = -ENOMEM;
79469 + goto fail;
79470 + }
79471 + vma->vm_flags = newflags;
79472 + pax_mirror_vma(vma_m, vma);
79473 + }
79474 + }
79475 +#endif
79476 +
79477 /*
79478 * First try to merge with previous and/or next vma.
79479 */
79480 @@ -195,9 +293,21 @@ success:
79481 * vm_flags and vm_page_prot are protected by the mmap_sem
79482 * held in write mode.
79483 */
79484 +
79485 +#ifdef CONFIG_PAX_SEGMEXEC
79486 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
79487 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
79488 +#endif
79489 +
79490 vma->vm_flags = newflags;
79491 +
79492 +#ifdef CONFIG_PAX_MPROTECT
79493 + if (mm->binfmt && mm->binfmt->handle_mprotect)
79494 + mm->binfmt->handle_mprotect(vma, newflags);
79495 +#endif
79496 +
79497 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
79498 - vm_get_page_prot(newflags));
79499 + vm_get_page_prot(vma->vm_flags));
79500
79501 if (vma_wants_writenotify(vma)) {
79502 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
79503 @@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79504 end = start + len;
79505 if (end <= start)
79506 return -ENOMEM;
79507 +
79508 +#ifdef CONFIG_PAX_SEGMEXEC
79509 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
79510 + if (end > SEGMEXEC_TASK_SIZE)
79511 + return -EINVAL;
79512 + } else
79513 +#endif
79514 +
79515 + if (end > TASK_SIZE)
79516 + return -EINVAL;
79517 +
79518 if (!arch_validate_prot(prot))
79519 return -EINVAL;
79520
79521 @@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79522 /*
79523 * Does the application expect PROT_READ to imply PROT_EXEC:
79524 */
79525 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
79526 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
79527 prot |= PROT_EXEC;
79528
79529 vm_flags = calc_vm_prot_bits(prot);
79530 @@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79531 if (start > vma->vm_start)
79532 prev = vma;
79533
79534 +#ifdef CONFIG_PAX_MPROTECT
79535 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
79536 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
79537 +#endif
79538 +
79539 for (nstart = start ; ; ) {
79540 unsigned long newflags;
79541
79542 @@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79543
79544 /* newflags >> 4 shift VM_MAY% in place of VM_% */
79545 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
79546 + if (prot & (PROT_WRITE | PROT_EXEC))
79547 + gr_log_rwxmprotect(vma->vm_file);
79548 +
79549 + error = -EACCES;
79550 + goto out;
79551 + }
79552 +
79553 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
79554 error = -EACCES;
79555 goto out;
79556 }
79557 @@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79558 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
79559 if (error)
79560 goto out;
79561 +
79562 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
79563 +
79564 nstart = tmp;
79565
79566 if (nstart < prev->vm_end)
79567 diff --git a/mm/mremap.c b/mm/mremap.c
79568 index 3e98d79..1706cec 100644
79569 --- a/mm/mremap.c
79570 +++ b/mm/mremap.c
79571 @@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
79572 continue;
79573 pte = ptep_clear_flush(vma, old_addr, old_pte);
79574 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
79575 +
79576 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
79577 + if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
79578 + pte = pte_exprotect(pte);
79579 +#endif
79580 +
79581 set_pte_at(mm, new_addr, new_pte, pte);
79582 }
79583
79584 @@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
79585 if (is_vm_hugetlb_page(vma))
79586 goto Einval;
79587
79588 +#ifdef CONFIG_PAX_SEGMEXEC
79589 + if (pax_find_mirror_vma(vma))
79590 + goto Einval;
79591 +#endif
79592 +
79593 /* We can't remap across vm area boundaries */
79594 if (old_len > vma->vm_end - addr)
79595 goto Efault;
79596 @@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned long addr,
79597 unsigned long ret = -EINVAL;
79598 unsigned long charged = 0;
79599 unsigned long map_flags;
79600 + unsigned long pax_task_size = TASK_SIZE;
79601
79602 if (new_addr & ~PAGE_MASK)
79603 goto out;
79604
79605 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
79606 +#ifdef CONFIG_PAX_SEGMEXEC
79607 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
79608 + pax_task_size = SEGMEXEC_TASK_SIZE;
79609 +#endif
79610 +
79611 + pax_task_size -= PAGE_SIZE;
79612 +
79613 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
79614 goto out;
79615
79616 /* Check if the location we're moving into overlaps the
79617 * old location at all, and fail if it does.
79618 */
79619 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
79620 - goto out;
79621 -
79622 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
79623 + if (addr + old_len > new_addr && new_addr + new_len > addr)
79624 goto out;
79625
79626 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
79627 @@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long addr,
79628 struct vm_area_struct *vma;
79629 unsigned long ret = -EINVAL;
79630 unsigned long charged = 0;
79631 + unsigned long pax_task_size = TASK_SIZE;
79632
79633 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
79634 goto out;
79635 @@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long addr,
79636 if (!new_len)
79637 goto out;
79638
79639 +#ifdef CONFIG_PAX_SEGMEXEC
79640 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
79641 + pax_task_size = SEGMEXEC_TASK_SIZE;
79642 +#endif
79643 +
79644 + pax_task_size -= PAGE_SIZE;
79645 +
79646 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
79647 + old_len > pax_task_size || addr > pax_task_size-old_len)
79648 + goto out;
79649 +
79650 if (flags & MREMAP_FIXED) {
79651 if (flags & MREMAP_MAYMOVE)
79652 ret = mremap_to(addr, old_len, new_addr, new_len);
79653 @@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long addr,
79654 addr + new_len);
79655 }
79656 ret = addr;
79657 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
79658 goto out;
79659 }
79660 }
79661 @@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long addr,
79662 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
79663 if (ret)
79664 goto out;
79665 +
79666 + map_flags = vma->vm_flags;
79667 ret = move_vma(vma, addr, old_len, new_len, new_addr);
79668 + if (!(ret & ~PAGE_MASK)) {
79669 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
79670 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
79671 + }
79672 }
79673 out:
79674 if (ret & ~PAGE_MASK)
79675 diff --git a/mm/nommu.c b/mm/nommu.c
79676 index 406e8d4..53970d3 100644
79677 --- a/mm/nommu.c
79678 +++ b/mm/nommu.c
79679 @@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
79680 int sysctl_overcommit_ratio = 50; /* default is 50% */
79681 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
79682 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
79683 -int heap_stack_gap = 0;
79684
79685 atomic_long_t mmap_pages_allocated;
79686
79687 @@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
79688 EXPORT_SYMBOL(find_vma);
79689
79690 /*
79691 - * find a VMA
79692 - * - we don't extend stack VMAs under NOMMU conditions
79693 - */
79694 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
79695 -{
79696 - return find_vma(mm, addr);
79697 -}
79698 -
79699 -/*
79700 * expand a stack to a given address
79701 * - not supported under NOMMU conditions
79702 */
79703 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
79704 index 3ecab7e..594a471 100644
79705 --- a/mm/page_alloc.c
79706 +++ b/mm/page_alloc.c
79707 @@ -289,7 +289,7 @@ out:
79708 * This usage means that zero-order pages may not be compound.
79709 */
79710
79711 -static void free_compound_page(struct page *page)
79712 +void free_compound_page(struct page *page)
79713 {
79714 __free_pages_ok(page, compound_order(page));
79715 }
79716 @@ -587,6 +587,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
79717 int bad = 0;
79718 int wasMlocked = __TestClearPageMlocked(page);
79719
79720 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
79721 + unsigned long index = 1UL << order;
79722 +#endif
79723 +
79724 kmemcheck_free_shadow(page, order);
79725
79726 for (i = 0 ; i < (1 << order) ; ++i)
79727 @@ -599,6 +603,12 @@ static void __free_pages_ok(struct page *page, unsigned int order)
79728 debug_check_no_obj_freed(page_address(page),
79729 PAGE_SIZE << order);
79730 }
79731 +
79732 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
79733 + for (; index; --index)
79734 + sanitize_highpage(page + index - 1);
79735 +#endif
79736 +
79737 arch_free_page(page, order);
79738 kernel_map_pages(page, 1 << order, 0);
79739
79740 @@ -702,8 +712,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
79741 arch_alloc_page(page, order);
79742 kernel_map_pages(page, 1 << order, 1);
79743
79744 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
79745 if (gfp_flags & __GFP_ZERO)
79746 prep_zero_page(page, order, gfp_flags);
79747 +#endif
79748
79749 if (order && (gfp_flags & __GFP_COMP))
79750 prep_compound_page(page, order);
79751 @@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct page *page, int cold)
79752 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
79753 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
79754 }
79755 +
79756 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
79757 + sanitize_highpage(page);
79758 +#endif
79759 +
79760 arch_free_page(page, 0);
79761 kernel_map_pages(page, 1, 0);
79762
79763 @@ -2179,6 +2196,8 @@ void show_free_areas(void)
79764 int cpu;
79765 struct zone *zone;
79766
79767 + pax_track_stack();
79768 +
79769 for_each_populated_zone(zone) {
79770 show_node(zone);
79771 printk("%s per-cpu:\n", zone->name);
79772 @@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct pglist_data *pgdat,
79773 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
79774 }
79775 #else
79776 -static void inline setup_usemap(struct pglist_data *pgdat,
79777 +static inline void setup_usemap(struct pglist_data *pgdat,
79778 struct zone *zone, unsigned long zonesize) {}
79779 #endif /* CONFIG_SPARSEMEM */
79780
79781 diff --git a/mm/percpu.c b/mm/percpu.c
79782 index c90614a..5f7b7b8 100644
79783 --- a/mm/percpu.c
79784 +++ b/mm/percpu.c
79785 @@ -115,7 +115,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
79786 static unsigned int pcpu_high_unit_cpu __read_mostly;
79787
79788 /* the address of the first chunk which starts with the kernel static area */
79789 -void *pcpu_base_addr __read_mostly;
79790 +void *pcpu_base_addr __read_only;
79791 EXPORT_SYMBOL_GPL(pcpu_base_addr);
79792
79793 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
79794 diff --git a/mm/rmap.c b/mm/rmap.c
79795 index dd43373..d848cd7 100644
79796 --- a/mm/rmap.c
79797 +++ b/mm/rmap.c
79798 @@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_struct *vma)
79799 /* page_table_lock to protect against threads */
79800 spin_lock(&mm->page_table_lock);
79801 if (likely(!vma->anon_vma)) {
79802 +
79803 +#ifdef CONFIG_PAX_SEGMEXEC
79804 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
79805 +
79806 + if (vma_m) {
79807 + BUG_ON(vma_m->anon_vma);
79808 + vma_m->anon_vma = anon_vma;
79809 + list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
79810 + }
79811 +#endif
79812 +
79813 vma->anon_vma = anon_vma;
79814 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
79815 allocated = NULL;
79816 diff --git a/mm/shmem.c b/mm/shmem.c
79817 index 3e0005b..1d659a8 100644
79818 --- a/mm/shmem.c
79819 +++ b/mm/shmem.c
79820 @@ -31,7 +31,7 @@
79821 #include <linux/swap.h>
79822 #include <linux/ima.h>
79823
79824 -static struct vfsmount *shm_mnt;
79825 +struct vfsmount *shm_mnt;
79826
79827 #ifdef CONFIG_SHMEM
79828 /*
79829 @@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
79830 goto unlock;
79831 }
79832 entry = shmem_swp_entry(info, index, NULL);
79833 + if (!entry)
79834 + goto unlock;
79835 if (entry->val) {
79836 /*
79837 * The more uptodate page coming down from a stacked
79838 @@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
79839 struct vm_area_struct pvma;
79840 struct page *page;
79841
79842 + pax_track_stack();
79843 +
79844 spol = mpol_cond_copy(&mpol,
79845 mpol_shared_policy_lookup(&info->policy, idx));
79846
79847 @@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
79848
79849 info = SHMEM_I(inode);
79850 inode->i_size = len-1;
79851 - if (len <= (char *)inode - (char *)info) {
79852 + if (len <= (char *)inode - (char *)info && len <= 64) {
79853 /* do it inline */
79854 memcpy(info, symname, len);
79855 inode->i_op = &shmem_symlink_inline_operations;
79856 @@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
79857 int err = -ENOMEM;
79858
79859 /* Round up to L1_CACHE_BYTES to resist false sharing */
79860 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
79861 - L1_CACHE_BYTES), GFP_KERNEL);
79862 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
79863 if (!sbinfo)
79864 return -ENOMEM;
79865
79866 diff --git a/mm/slab.c b/mm/slab.c
79867 index c8d466a..909e01e 100644
79868 --- a/mm/slab.c
79869 +++ b/mm/slab.c
79870 @@ -174,7 +174,7 @@
79871
79872 /* Legal flag mask for kmem_cache_create(). */
79873 #if DEBUG
79874 -# define CREATE_MASK (SLAB_RED_ZONE | \
79875 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
79876 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
79877 SLAB_CACHE_DMA | \
79878 SLAB_STORE_USER | \
79879 @@ -182,7 +182,7 @@
79880 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
79881 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
79882 #else
79883 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
79884 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
79885 SLAB_CACHE_DMA | \
79886 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
79887 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
79888 @@ -308,7 +308,7 @@ struct kmem_list3 {
79889 * Need this for bootstrapping a per node allocator.
79890 */
79891 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
79892 -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
79893 +struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
79894 #define CACHE_CACHE 0
79895 #define SIZE_AC MAX_NUMNODES
79896 #define SIZE_L3 (2 * MAX_NUMNODES)
79897 @@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
79898 if ((x)->max_freeable < i) \
79899 (x)->max_freeable = i; \
79900 } while (0)
79901 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
79902 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
79903 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
79904 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
79905 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
79906 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
79907 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
79908 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
79909 #else
79910 #define STATS_INC_ACTIVE(x) do { } while (0)
79911 #define STATS_DEC_ACTIVE(x) do { } while (0)
79912 @@ -558,7 +558,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
79913 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
79914 */
79915 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
79916 - const struct slab *slab, void *obj)
79917 + const struct slab *slab, const void *obj)
79918 {
79919 u32 offset = (obj - slab->s_mem);
79920 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
79921 @@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
79922 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
79923 sizes[INDEX_AC].cs_size,
79924 ARCH_KMALLOC_MINALIGN,
79925 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
79926 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
79927 NULL);
79928
79929 if (INDEX_AC != INDEX_L3) {
79930 @@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
79931 kmem_cache_create(names[INDEX_L3].name,
79932 sizes[INDEX_L3].cs_size,
79933 ARCH_KMALLOC_MINALIGN,
79934 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
79935 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
79936 NULL);
79937 }
79938
79939 @@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
79940 sizes->cs_cachep = kmem_cache_create(names->name,
79941 sizes->cs_size,
79942 ARCH_KMALLOC_MINALIGN,
79943 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
79944 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
79945 NULL);
79946 }
79947 #ifdef CONFIG_ZONE_DMA
79948 @@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, void *p)
79949 }
79950 /* cpu stats */
79951 {
79952 - unsigned long allochit = atomic_read(&cachep->allochit);
79953 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
79954 - unsigned long freehit = atomic_read(&cachep->freehit);
79955 - unsigned long freemiss = atomic_read(&cachep->freemiss);
79956 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
79957 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
79958 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
79959 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
79960
79961 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
79962 allochit, allocmiss, freehit, freemiss);
79963 @@ -4471,15 +4471,70 @@ static const struct file_operations proc_slabstats_operations = {
79964
79965 static int __init slab_proc_init(void)
79966 {
79967 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
79968 + mode_t gr_mode = S_IRUGO;
79969 +
79970 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
79971 + gr_mode = S_IRUSR;
79972 +#endif
79973 +
79974 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
79975 #ifdef CONFIG_DEBUG_SLAB_LEAK
79976 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
79977 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
79978 #endif
79979 return 0;
79980 }
79981 module_init(slab_proc_init);
79982 #endif
79983
79984 +void check_object_size(const void *ptr, unsigned long n, bool to)
79985 +{
79986 +
79987 +#ifdef CONFIG_PAX_USERCOPY
79988 + struct page *page;
79989 + struct kmem_cache *cachep = NULL;
79990 + struct slab *slabp;
79991 + unsigned int objnr;
79992 + unsigned long offset;
79993 + const char *type;
79994 +
79995 + if (!n)
79996 + return;
79997 +
79998 + type = "<null>";
79999 + if (ZERO_OR_NULL_PTR(ptr))
80000 + goto report;
80001 +
80002 + if (!virt_addr_valid(ptr))
80003 + return;
80004 +
80005 + page = virt_to_head_page(ptr);
80006 +
80007 + type = "<process stack>";
80008 + if (!PageSlab(page)) {
80009 + if (object_is_on_stack(ptr, n) == -1)
80010 + goto report;
80011 + return;
80012 + }
80013 +
80014 + cachep = page_get_cache(page);
80015 + type = cachep->name;
80016 + if (!(cachep->flags & SLAB_USERCOPY))
80017 + goto report;
80018 +
80019 + slabp = page_get_slab(page);
80020 + objnr = obj_to_index(cachep, slabp, ptr);
80021 + BUG_ON(objnr >= cachep->num);
80022 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
80023 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
80024 + return;
80025 +
80026 +report:
80027 + pax_report_usercopy(ptr, n, to, type);
80028 +#endif
80029 +
80030 +}
80031 +EXPORT_SYMBOL(check_object_size);
80032 +
80033 /**
80034 * ksize - get the actual amount of memory allocated for a given object
80035 * @objp: Pointer to the object
80036 diff --git a/mm/slob.c b/mm/slob.c
80037 index 837ebd6..0bd23bc 100644
80038 --- a/mm/slob.c
80039 +++ b/mm/slob.c
80040 @@ -29,7 +29,7 @@
80041 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
80042 * alloc_pages() directly, allocating compound pages so the page order
80043 * does not have to be separately tracked, and also stores the exact
80044 - * allocation size in page->private so that it can be used to accurately
80045 + * allocation size in slob_page->size so that it can be used to accurately
80046 * provide ksize(). These objects are detected in kfree() because slob_page()
80047 * is false for them.
80048 *
80049 @@ -58,6 +58,7 @@
80050 */
80051
80052 #include <linux/kernel.h>
80053 +#include <linux/sched.h>
80054 #include <linux/slab.h>
80055 #include <linux/mm.h>
80056 #include <linux/swap.h> /* struct reclaim_state */
80057 @@ -100,7 +101,8 @@ struct slob_page {
80058 unsigned long flags; /* mandatory */
80059 atomic_t _count; /* mandatory */
80060 slobidx_t units; /* free units left in page */
80061 - unsigned long pad[2];
80062 + unsigned long pad[1];
80063 + unsigned long size; /* size when >=PAGE_SIZE */
80064 slob_t *free; /* first free slob_t in page */
80065 struct list_head list; /* linked list of free pages */
80066 };
80067 @@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
80068 */
80069 static inline int is_slob_page(struct slob_page *sp)
80070 {
80071 - return PageSlab((struct page *)sp);
80072 + return PageSlab((struct page *)sp) && !sp->size;
80073 }
80074
80075 static inline void set_slob_page(struct slob_page *sp)
80076 @@ -148,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp)
80077
80078 static inline struct slob_page *slob_page(const void *addr)
80079 {
80080 - return (struct slob_page *)virt_to_page(addr);
80081 + return (struct slob_page *)virt_to_head_page(addr);
80082 }
80083
80084 /*
80085 @@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
80086 /*
80087 * Return the size of a slob block.
80088 */
80089 -static slobidx_t slob_units(slob_t *s)
80090 +static slobidx_t slob_units(const slob_t *s)
80091 {
80092 if (s->units > 0)
80093 return s->units;
80094 @@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
80095 /*
80096 * Return the next free slob block pointer after this one.
80097 */
80098 -static slob_t *slob_next(slob_t *s)
80099 +static slob_t *slob_next(const slob_t *s)
80100 {
80101 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
80102 slobidx_t next;
80103 @@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
80104 /*
80105 * Returns true if s is the last free block in its page.
80106 */
80107 -static int slob_last(slob_t *s)
80108 +static int slob_last(const slob_t *s)
80109 {
80110 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
80111 }
80112 @@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
80113 if (!page)
80114 return NULL;
80115
80116 + set_slob_page(page);
80117 return page_address(page);
80118 }
80119
80120 @@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
80121 if (!b)
80122 return NULL;
80123 sp = slob_page(b);
80124 - set_slob_page(sp);
80125
80126 spin_lock_irqsave(&slob_lock, flags);
80127 sp->units = SLOB_UNITS(PAGE_SIZE);
80128 sp->free = b;
80129 + sp->size = 0;
80130 INIT_LIST_HEAD(&sp->list);
80131 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
80132 set_slob_page_free(sp, slob_list);
80133 @@ -475,10 +478,9 @@ out:
80134 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
80135 #endif
80136
80137 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
80138 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
80139 {
80140 - unsigned int *m;
80141 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
80142 + slob_t *m;
80143 void *ret;
80144
80145 lockdep_trace_alloc(gfp);
80146 @@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
80147
80148 if (!m)
80149 return NULL;
80150 - *m = size;
80151 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
80152 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
80153 + m[0].units = size;
80154 + m[1].units = align;
80155 ret = (void *)m + align;
80156
80157 trace_kmalloc_node(_RET_IP_, ret,
80158 @@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
80159
80160 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
80161 if (ret) {
80162 - struct page *page;
80163 - page = virt_to_page(ret);
80164 - page->private = size;
80165 + struct slob_page *sp;
80166 + sp = slob_page(ret);
80167 + sp->size = size;
80168 }
80169
80170 trace_kmalloc_node(_RET_IP_, ret,
80171 size, PAGE_SIZE << order, gfp, node);
80172 }
80173
80174 - kmemleak_alloc(ret, size, 1, gfp);
80175 + return ret;
80176 +}
80177 +
80178 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
80179 +{
80180 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
80181 + void *ret = __kmalloc_node_align(size, gfp, node, align);
80182 +
80183 + if (!ZERO_OR_NULL_PTR(ret))
80184 + kmemleak_alloc(ret, size, 1, gfp);
80185 return ret;
80186 }
80187 EXPORT_SYMBOL(__kmalloc_node);
80188 @@ -528,13 +542,92 @@ void kfree(const void *block)
80189 sp = slob_page(block);
80190 if (is_slob_page(sp)) {
80191 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
80192 - unsigned int *m = (unsigned int *)(block - align);
80193 - slob_free(m, *m + align);
80194 - } else
80195 + slob_t *m = (slob_t *)(block - align);
80196 + slob_free(m, m[0].units + align);
80197 + } else {
80198 + clear_slob_page(sp);
80199 + free_slob_page(sp);
80200 + sp->size = 0;
80201 put_page(&sp->page);
80202 + }
80203 }
80204 EXPORT_SYMBOL(kfree);
80205
80206 +void check_object_size(const void *ptr, unsigned long n, bool to)
80207 +{
80208 +
80209 +#ifdef CONFIG_PAX_USERCOPY
80210 + struct slob_page *sp;
80211 + const slob_t *free;
80212 + const void *base;
80213 + unsigned long flags;
80214 + const char *type;
80215 +
80216 + if (!n)
80217 + return;
80218 +
80219 + type = "<null>";
80220 + if (ZERO_OR_NULL_PTR(ptr))
80221 + goto report;
80222 +
80223 + if (!virt_addr_valid(ptr))
80224 + return;
80225 +
80226 + type = "<process stack>";
80227 + sp = slob_page(ptr);
80228 + if (!PageSlab((struct page *)sp)) {
80229 + if (object_is_on_stack(ptr, n) == -1)
80230 + goto report;
80231 + return;
80232 + }
80233 +
80234 + type = "<slob>";
80235 + if (sp->size) {
80236 + base = page_address(&sp->page);
80237 + if (base <= ptr && n <= sp->size - (ptr - base))
80238 + return;
80239 + goto report;
80240 + }
80241 +
80242 + /* some tricky double walking to find the chunk */
80243 + spin_lock_irqsave(&slob_lock, flags);
80244 + base = (void *)((unsigned long)ptr & PAGE_MASK);
80245 + free = sp->free;
80246 +
80247 + while (!slob_last(free) && (void *)free <= ptr) {
80248 + base = free + slob_units(free);
80249 + free = slob_next(free);
80250 + }
80251 +
80252 + while (base < (void *)free) {
80253 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
80254 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
80255 + int offset;
80256 +
80257 + if (ptr < base + align)
80258 + break;
80259 +
80260 + offset = ptr - base - align;
80261 + if (offset >= m) {
80262 + base += size;
80263 + continue;
80264 + }
80265 +
80266 + if (n > m - offset)
80267 + break;
80268 +
80269 + spin_unlock_irqrestore(&slob_lock, flags);
80270 + return;
80271 + }
80272 +
80273 + spin_unlock_irqrestore(&slob_lock, flags);
80274 +report:
80275 + pax_report_usercopy(ptr, n, to, type);
80276 +#endif
80277 +
80278 +}
80279 +EXPORT_SYMBOL(check_object_size);
80280 +
80281 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
80282 size_t ksize(const void *block)
80283 {
80284 @@ -547,10 +640,10 @@ size_t ksize(const void *block)
80285 sp = slob_page(block);
80286 if (is_slob_page(sp)) {
80287 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
80288 - unsigned int *m = (unsigned int *)(block - align);
80289 - return SLOB_UNITS(*m) * SLOB_UNIT;
80290 + slob_t *m = (slob_t *)(block - align);
80291 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
80292 } else
80293 - return sp->page.private;
80294 + return sp->size;
80295 }
80296 EXPORT_SYMBOL(ksize);
80297
80298 @@ -566,8 +659,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
80299 {
80300 struct kmem_cache *c;
80301
80302 +#ifdef CONFIG_PAX_USERCOPY
80303 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
80304 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
80305 +#else
80306 c = slob_alloc(sizeof(struct kmem_cache),
80307 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
80308 +#endif
80309
80310 if (c) {
80311 c->name = name;
80312 @@ -605,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
80313 {
80314 void *b;
80315
80316 +#ifdef CONFIG_PAX_USERCOPY
80317 + b = __kmalloc_node_align(c->size, flags, node, c->align);
80318 +#else
80319 if (c->size < PAGE_SIZE) {
80320 b = slob_alloc(c->size, flags, c->align, node);
80321 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
80322 SLOB_UNITS(c->size) * SLOB_UNIT,
80323 flags, node);
80324 } else {
80325 + struct slob_page *sp;
80326 +
80327 b = slob_new_pages(flags, get_order(c->size), node);
80328 + sp = slob_page(b);
80329 + sp->size = c->size;
80330 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
80331 PAGE_SIZE << get_order(c->size),
80332 flags, node);
80333 }
80334 +#endif
80335
80336 if (c->ctor)
80337 c->ctor(b);
80338 @@ -627,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
80339
80340 static void __kmem_cache_free(void *b, int size)
80341 {
80342 - if (size < PAGE_SIZE)
80343 + struct slob_page *sp = slob_page(b);
80344 +
80345 + if (is_slob_page(sp))
80346 slob_free(b, size);
80347 - else
80348 + else {
80349 + clear_slob_page(sp);
80350 + free_slob_page(sp);
80351 + sp->size = 0;
80352 slob_free_pages(b, get_order(size));
80353 + }
80354 }
80355
80356 static void kmem_rcu_free(struct rcu_head *head)
80357 @@ -643,18 +755,32 @@ static void kmem_rcu_free(struct rcu_head *head)
80358
80359 void kmem_cache_free(struct kmem_cache *c, void *b)
80360 {
80361 + int size = c->size;
80362 +
80363 +#ifdef CONFIG_PAX_USERCOPY
80364 + if (size + c->align < PAGE_SIZE) {
80365 + size += c->align;
80366 + b -= c->align;
80367 + }
80368 +#endif
80369 +
80370 kmemleak_free_recursive(b, c->flags);
80371 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
80372 struct slob_rcu *slob_rcu;
80373 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
80374 + slob_rcu = b + (size - sizeof(struct slob_rcu));
80375 INIT_RCU_HEAD(&slob_rcu->head);
80376 - slob_rcu->size = c->size;
80377 + slob_rcu->size = size;
80378 call_rcu(&slob_rcu->head, kmem_rcu_free);
80379 } else {
80380 - __kmem_cache_free(b, c->size);
80381 + __kmem_cache_free(b, size);
80382 }
80383
80384 +#ifdef CONFIG_PAX_USERCOPY
80385 + trace_kfree(_RET_IP_, b);
80386 +#else
80387 trace_kmem_cache_free(_RET_IP_, b);
80388 +#endif
80389 +
80390 }
80391 EXPORT_SYMBOL(kmem_cache_free);
80392
80393 diff --git a/mm/slub.c b/mm/slub.c
80394 index 4996fc7..87e01d0 100644
80395 --- a/mm/slub.c
80396 +++ b/mm/slub.c
80397 @@ -201,7 +201,7 @@ struct track {
80398
80399 enum track_item { TRACK_ALLOC, TRACK_FREE };
80400
80401 -#ifdef CONFIG_SLUB_DEBUG
80402 +#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
80403 static int sysfs_slab_add(struct kmem_cache *);
80404 static int sysfs_slab_alias(struct kmem_cache *, const char *);
80405 static void sysfs_slab_remove(struct kmem_cache *);
80406 @@ -410,7 +410,7 @@ static void print_track(const char *s, struct track *t)
80407 if (!t->addr)
80408 return;
80409
80410 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
80411 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
80412 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
80413 }
80414
80415 @@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
80416
80417 page = virt_to_head_page(x);
80418
80419 + BUG_ON(!PageSlab(page));
80420 +
80421 slab_free(s, page, x, _RET_IP_);
80422
80423 trace_kmem_cache_free(_RET_IP_, x);
80424 @@ -1937,7 +1939,7 @@ static int slub_min_objects;
80425 * Merge control. If this is set then no merging of slab caches will occur.
80426 * (Could be removed. This was introduced to pacify the merge skeptics.)
80427 */
80428 -static int slub_nomerge;
80429 +static int slub_nomerge = 1;
80430
80431 /*
80432 * Calculate the order of allocation given an slab object size.
80433 @@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
80434 * list to avoid pounding the page allocator excessively.
80435 */
80436 set_min_partial(s, ilog2(s->size));
80437 - s->refcount = 1;
80438 + atomic_set(&s->refcount, 1);
80439 #ifdef CONFIG_NUMA
80440 s->remote_node_defrag_ratio = 1000;
80441 #endif
80442 @@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
80443 void kmem_cache_destroy(struct kmem_cache *s)
80444 {
80445 down_write(&slub_lock);
80446 - s->refcount--;
80447 - if (!s->refcount) {
80448 + if (atomic_dec_and_test(&s->refcount)) {
80449 list_del(&s->list);
80450 up_write(&slub_lock);
80451 if (kmem_cache_close(s)) {
80452 @@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(char *str)
80453 __setup("slub_nomerge", setup_slub_nomerge);
80454
80455 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
80456 - const char *name, int size, gfp_t gfp_flags)
80457 + const char *name, int size, gfp_t gfp_flags, unsigned int flags)
80458 {
80459 - unsigned int flags = 0;
80460 -
80461 if (gfp_flags & SLUB_DMA)
80462 - flags = SLAB_CACHE_DMA;
80463 + flags |= SLAB_CACHE_DMA;
80464
80465 /*
80466 * This function is called with IRQs disabled during early-boot on
80467 @@ -2915,6 +2914,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
80468 EXPORT_SYMBOL(__kmalloc_node);
80469 #endif
80470
80471 +void check_object_size(const void *ptr, unsigned long n, bool to)
80472 +{
80473 +
80474 +#ifdef CONFIG_PAX_USERCOPY
80475 + struct page *page;
80476 + struct kmem_cache *s = NULL;
80477 + unsigned long offset;
80478 + const char *type;
80479 +
80480 + if (!n)
80481 + return;
80482 +
80483 + type = "<null>";
80484 + if (ZERO_OR_NULL_PTR(ptr))
80485 + goto report;
80486 +
80487 + if (!virt_addr_valid(ptr))
80488 + return;
80489 +
80490 + page = get_object_page(ptr);
80491 +
80492 + type = "<process stack>";
80493 + if (!page) {
80494 + if (object_is_on_stack(ptr, n) == -1)
80495 + goto report;
80496 + return;
80497 + }
80498 +
80499 + s = page->slab;
80500 + type = s->name;
80501 + if (!(s->flags & SLAB_USERCOPY))
80502 + goto report;
80503 +
80504 + offset = (ptr - page_address(page)) % s->size;
80505 + if (offset <= s->objsize && n <= s->objsize - offset)
80506 + return;
80507 +
80508 +report:
80509 + pax_report_usercopy(ptr, n, to, type);
80510 +#endif
80511 +
80512 +}
80513 +EXPORT_SYMBOL(check_object_size);
80514 +
80515 size_t ksize(const void *object)
80516 {
80517 struct page *page;
80518 @@ -3185,8 +3228,8 @@ void __init kmem_cache_init(void)
80519 * kmem_cache_open for slab_state == DOWN.
80520 */
80521 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
80522 - sizeof(struct kmem_cache_node), GFP_NOWAIT);
80523 - kmalloc_caches[0].refcount = -1;
80524 + sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
80525 + atomic_set(&kmalloc_caches[0].refcount, -1);
80526 caches++;
80527
80528 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
80529 @@ -3198,18 +3241,18 @@ void __init kmem_cache_init(void)
80530 /* Caches that are not of the two-to-the-power-of size */
80531 if (KMALLOC_MIN_SIZE <= 32) {
80532 create_kmalloc_cache(&kmalloc_caches[1],
80533 - "kmalloc-96", 96, GFP_NOWAIT);
80534 + "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
80535 caches++;
80536 }
80537 if (KMALLOC_MIN_SIZE <= 64) {
80538 create_kmalloc_cache(&kmalloc_caches[2],
80539 - "kmalloc-192", 192, GFP_NOWAIT);
80540 + "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
80541 caches++;
80542 }
80543
80544 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
80545 create_kmalloc_cache(&kmalloc_caches[i],
80546 - "kmalloc", 1 << i, GFP_NOWAIT);
80547 + "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
80548 caches++;
80549 }
80550
80551 @@ -3293,7 +3336,7 @@ static int slab_unmergeable(struct kmem_cache *s)
80552 /*
80553 * We may have set a slab to be unmergeable during bootstrap.
80554 */
80555 - if (s->refcount < 0)
80556 + if (atomic_read(&s->refcount) < 0)
80557 return 1;
80558
80559 return 0;
80560 @@ -3353,7 +3396,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
80561 if (s) {
80562 int cpu;
80563
80564 - s->refcount++;
80565 + atomic_inc(&s->refcount);
80566 /*
80567 * Adjust the object sizes so that we clear
80568 * the complete object on kzalloc.
80569 @@ -3372,7 +3415,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
80570
80571 if (sysfs_slab_alias(s, name)) {
80572 down_write(&slub_lock);
80573 - s->refcount--;
80574 + atomic_dec(&s->refcount);
80575 up_write(&slub_lock);
80576 goto err;
80577 }
80578 @@ -4101,7 +4144,7 @@ SLAB_ATTR_RO(ctor);
80579
80580 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
80581 {
80582 - return sprintf(buf, "%d\n", s->refcount - 1);
80583 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
80584 }
80585 SLAB_ATTR_RO(aliases);
80586
80587 @@ -4503,7 +4546,7 @@ static void kmem_cache_release(struct kobject *kobj)
80588 kfree(s);
80589 }
80590
80591 -static struct sysfs_ops slab_sysfs_ops = {
80592 +static const struct sysfs_ops slab_sysfs_ops = {
80593 .show = slab_attr_show,
80594 .store = slab_attr_store,
80595 };
80596 @@ -4522,7 +4565,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
80597 return 0;
80598 }
80599
80600 -static struct kset_uevent_ops slab_uevent_ops = {
80601 +static const struct kset_uevent_ops slab_uevent_ops = {
80602 .filter = uevent_filter,
80603 };
80604
80605 @@ -4564,6 +4607,7 @@ static char *create_unique_id(struct kmem_cache *s)
80606 return name;
80607 }
80608
80609 +#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
80610 static int sysfs_slab_add(struct kmem_cache *s)
80611 {
80612 int err;
80613 @@ -4619,6 +4663,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
80614 kobject_del(&s->kobj);
80615 kobject_put(&s->kobj);
80616 }
80617 +#endif
80618
80619 /*
80620 * Need to buffer aliases during bootup until sysfs becomes
80621 @@ -4632,6 +4677,7 @@ struct saved_alias {
80622
80623 static struct saved_alias *alias_list;
80624
80625 +#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
80626 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
80627 {
80628 struct saved_alias *al;
80629 @@ -4654,6 +4700,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
80630 alias_list = al;
80631 return 0;
80632 }
80633 +#endif
80634
80635 static int __init slab_sysfs_init(void)
80636 {
80637 @@ -4785,7 +4832,13 @@ static const struct file_operations proc_slabinfo_operations = {
80638
80639 static int __init slab_proc_init(void)
80640 {
80641 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
80642 + mode_t gr_mode = S_IRUGO;
80643 +
80644 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
80645 + gr_mode = S_IRUSR;
80646 +#endif
80647 +
80648 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
80649 return 0;
80650 }
80651 module_init(slab_proc_init);
80652 diff --git a/mm/swap.c b/mm/swap.c
80653 index 308e57d..5de19c0 100644
80654 --- a/mm/swap.c
80655 +++ b/mm/swap.c
80656 @@ -30,6 +30,7 @@
80657 #include <linux/notifier.h>
80658 #include <linux/backing-dev.h>
80659 #include <linux/memcontrol.h>
80660 +#include <linux/hugetlb.h>
80661
80662 #include "internal.h"
80663
80664 @@ -65,6 +66,8 @@ static void put_compound_page(struct page *page)
80665 compound_page_dtor *dtor;
80666
80667 dtor = get_compound_page_dtor(page);
80668 + if (!PageHuge(page))
80669 + BUG_ON(dtor != free_compound_page);
80670 (*dtor)(page);
80671 }
80672 }
80673 diff --git a/mm/util.c b/mm/util.c
80674 index e48b493..24a601d 100644
80675 --- a/mm/util.c
80676 +++ b/mm/util.c
80677 @@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
80678 void arch_pick_mmap_layout(struct mm_struct *mm)
80679 {
80680 mm->mmap_base = TASK_UNMAPPED_BASE;
80681 +
80682 +#ifdef CONFIG_PAX_RANDMMAP
80683 + if (mm->pax_flags & MF_PAX_RANDMMAP)
80684 + mm->mmap_base += mm->delta_mmap;
80685 +#endif
80686 +
80687 mm->get_unmapped_area = arch_get_unmapped_area;
80688 mm->unmap_area = arch_unmap_area;
80689 }
80690 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
80691 index f34ffd0..e60c44f 100644
80692 --- a/mm/vmalloc.c
80693 +++ b/mm/vmalloc.c
80694 @@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
80695
80696 pte = pte_offset_kernel(pmd, addr);
80697 do {
80698 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
80699 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
80700 +
80701 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80702 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
80703 + BUG_ON(!pte_exec(*pte));
80704 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
80705 + continue;
80706 + }
80707 +#endif
80708 +
80709 + {
80710 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
80711 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
80712 + }
80713 } while (pte++, addr += PAGE_SIZE, addr != end);
80714 }
80715
80716 @@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
80717 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
80718 {
80719 pte_t *pte;
80720 + int ret = -ENOMEM;
80721
80722 /*
80723 * nr is a running index into the array which helps higher level
80724 @@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
80725 pte = pte_alloc_kernel(pmd, addr);
80726 if (!pte)
80727 return -ENOMEM;
80728 +
80729 + pax_open_kernel();
80730 do {
80731 struct page *page = pages[*nr];
80732
80733 - if (WARN_ON(!pte_none(*pte)))
80734 - return -EBUSY;
80735 - if (WARN_ON(!page))
80736 - return -ENOMEM;
80737 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80738 + if (!(pgprot_val(prot) & _PAGE_NX))
80739 + BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
80740 + else
80741 +#endif
80742 +
80743 + if (WARN_ON(!pte_none(*pte))) {
80744 + ret = -EBUSY;
80745 + goto out;
80746 + }
80747 + if (WARN_ON(!page)) {
80748 + ret = -ENOMEM;
80749 + goto out;
80750 + }
80751 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
80752 (*nr)++;
80753 } while (pte++, addr += PAGE_SIZE, addr != end);
80754 - return 0;
80755 + ret = 0;
80756 +out:
80757 + pax_close_kernel();
80758 + return ret;
80759 }
80760
80761 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
80762 @@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void *x)
80763 * and fall back on vmalloc() if that fails. Others
80764 * just put it in the vmalloc space.
80765 */
80766 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
80767 +#ifdef CONFIG_MODULES
80768 +#ifdef MODULES_VADDR
80769 unsigned long addr = (unsigned long)x;
80770 if (addr >= MODULES_VADDR && addr < MODULES_END)
80771 return 1;
80772 #endif
80773 +
80774 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80775 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
80776 + return 1;
80777 +#endif
80778 +
80779 +#endif
80780 +
80781 return is_vmalloc_addr(x);
80782 }
80783
80784 @@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
80785
80786 if (!pgd_none(*pgd)) {
80787 pud_t *pud = pud_offset(pgd, addr);
80788 +#ifdef CONFIG_X86
80789 + if (!pud_large(*pud))
80790 +#endif
80791 if (!pud_none(*pud)) {
80792 pmd_t *pmd = pmd_offset(pud, addr);
80793 +#ifdef CONFIG_X86
80794 + if (!pmd_large(*pmd))
80795 +#endif
80796 if (!pmd_none(*pmd)) {
80797 pte_t *ptep, pte;
80798
80799 @@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vmap_area *va)
80800 struct rb_node *tmp;
80801
80802 while (*p) {
80803 - struct vmap_area *tmp;
80804 + struct vmap_area *varea;
80805
80806 parent = *p;
80807 - tmp = rb_entry(parent, struct vmap_area, rb_node);
80808 - if (va->va_start < tmp->va_end)
80809 + varea = rb_entry(parent, struct vmap_area, rb_node);
80810 + if (va->va_start < varea->va_end)
80811 p = &(*p)->rb_left;
80812 - else if (va->va_end > tmp->va_start)
80813 + else if (va->va_end > varea->va_start)
80814 p = &(*p)->rb_right;
80815 else
80816 BUG();
80817 @@ -1245,6 +1287,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
80818 struct vm_struct *area;
80819
80820 BUG_ON(in_interrupt());
80821 +
80822 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
80823 + if (flags & VM_KERNEXEC) {
80824 + if (start != VMALLOC_START || end != VMALLOC_END)
80825 + return NULL;
80826 + start = (unsigned long)MODULES_EXEC_VADDR;
80827 + end = (unsigned long)MODULES_EXEC_END;
80828 + }
80829 +#endif
80830 +
80831 if (flags & VM_IOREMAP) {
80832 int bit = fls(size);
80833
80834 @@ -1484,6 +1536,11 @@ void *vmap(struct page **pages, unsigned int count,
80835 if (count > totalram_pages)
80836 return NULL;
80837
80838 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
80839 + if (!(pgprot_val(prot) & _PAGE_NX))
80840 + flags |= VM_KERNEXEC;
80841 +#endif
80842 +
80843 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
80844 __builtin_return_address(0));
80845 if (!area)
80846 @@ -1594,6 +1651,14 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
80847 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
80848 return NULL;
80849
80850 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
80851 + if (!(pgprot_val(prot) & _PAGE_NX))
80852 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
80853 + VMALLOC_START, VMALLOC_END, node,
80854 + gfp_mask, caller);
80855 + else
80856 +#endif
80857 +
80858 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
80859 VMALLOC_START, VMALLOC_END, node,
80860 gfp_mask, caller);
80861 @@ -1619,6 +1684,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
80862 return addr;
80863 }
80864
80865 +#undef __vmalloc
80866 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
80867 {
80868 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
80869 @@ -1635,6 +1701,7 @@ EXPORT_SYMBOL(__vmalloc);
80870 * For tight control over page level allocator and protection flags
80871 * use __vmalloc() instead.
80872 */
80873 +#undef vmalloc
80874 void *vmalloc(unsigned long size)
80875 {
80876 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
80877 @@ -1649,6 +1716,7 @@ EXPORT_SYMBOL(vmalloc);
80878 * The resulting memory area is zeroed so it can be mapped to userspace
80879 * without leaking data.
80880 */
80881 +#undef vmalloc_user
80882 void *vmalloc_user(unsigned long size)
80883 {
80884 struct vm_struct *area;
80885 @@ -1676,6 +1744,7 @@ EXPORT_SYMBOL(vmalloc_user);
80886 * For tight control over page level allocator and protection flags
80887 * use __vmalloc() instead.
80888 */
80889 +#undef vmalloc_node
80890 void *vmalloc_node(unsigned long size, int node)
80891 {
80892 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
80893 @@ -1698,10 +1767,10 @@ EXPORT_SYMBOL(vmalloc_node);
80894 * For tight control over page level allocator and protection flags
80895 * use __vmalloc() instead.
80896 */
80897 -
80898 +#undef vmalloc_exec
80899 void *vmalloc_exec(unsigned long size)
80900 {
80901 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
80902 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
80903 -1, __builtin_return_address(0));
80904 }
80905
80906 @@ -1720,6 +1789,7 @@ void *vmalloc_exec(unsigned long size)
80907 * Allocate enough 32bit PA addressable pages to cover @size from the
80908 * page level allocator and map them into contiguous kernel virtual space.
80909 */
80910 +#undef vmalloc_32
80911 void *vmalloc_32(unsigned long size)
80912 {
80913 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
80914 @@ -1734,6 +1804,7 @@ EXPORT_SYMBOL(vmalloc_32);
80915 * The resulting memory area is 32bit addressable and zeroed so it can be
80916 * mapped to userspace without leaking data.
80917 */
80918 +#undef vmalloc_32_user
80919 void *vmalloc_32_user(unsigned long size)
80920 {
80921 struct vm_struct *area;
80922 @@ -1998,6 +2069,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
80923 unsigned long uaddr = vma->vm_start;
80924 unsigned long usize = vma->vm_end - vma->vm_start;
80925
80926 + BUG_ON(vma->vm_mirror);
80927 +
80928 if ((PAGE_SIZE-1) & (unsigned long)addr)
80929 return -EINVAL;
80930
80931 diff --git a/mm/vmstat.c b/mm/vmstat.c
80932 index 42d76c6..5643dc4 100644
80933 --- a/mm/vmstat.c
80934 +++ b/mm/vmstat.c
80935 @@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
80936 *
80937 * vm_stat contains the global counters
80938 */
80939 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
80940 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
80941 EXPORT_SYMBOL(vm_stat);
80942
80943 #ifdef CONFIG_SMP
80944 @@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
80945 v = p->vm_stat_diff[i];
80946 p->vm_stat_diff[i] = 0;
80947 local_irq_restore(flags);
80948 - atomic_long_add(v, &zone->vm_stat[i]);
80949 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
80950 global_diff[i] += v;
80951 #ifdef CONFIG_NUMA
80952 /* 3 seconds idle till flush */
80953 @@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
80954
80955 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
80956 if (global_diff[i])
80957 - atomic_long_add(global_diff[i], &vm_stat[i]);
80958 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
80959 }
80960
80961 #endif
80962 @@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
80963 start_cpu_timer(cpu);
80964 #endif
80965 #ifdef CONFIG_PROC_FS
80966 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
80967 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
80968 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
80969 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
80970 + {
80971 + mode_t gr_mode = S_IRUGO;
80972 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
80973 + gr_mode = S_IRUSR;
80974 +#endif
80975 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
80976 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
80977 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
80978 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
80979 +#else
80980 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
80981 +#endif
80982 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
80983 + }
80984 #endif
80985 return 0;
80986 }
80987 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
80988 index a29c5ab..6143f20 100644
80989 --- a/net/8021q/vlan.c
80990 +++ b/net/8021q/vlan.c
80991 @@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
80992 err = -EPERM;
80993 if (!capable(CAP_NET_ADMIN))
80994 break;
80995 - if ((args.u.name_type >= 0) &&
80996 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
80997 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
80998 struct vlan_net *vn;
80999
81000 vn = net_generic(net, vlan_net_id);
81001 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
81002 index a2d2984..f9eb711 100644
81003 --- a/net/9p/trans_fd.c
81004 +++ b/net/9p/trans_fd.c
81005 @@ -419,7 +419,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
81006 oldfs = get_fs();
81007 set_fs(get_ds());
81008 /* The cast to a user pointer is valid due to the set_fs() */
81009 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
81010 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
81011 set_fs(oldfs);
81012
81013 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
81014 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
81015 index 02cc7e7..4514f1b 100644
81016 --- a/net/atm/atm_misc.c
81017 +++ b/net/atm/atm_misc.c
81018 @@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int truesize)
81019 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
81020 return 1;
81021 atm_return(vcc,truesize);
81022 - atomic_inc(&vcc->stats->rx_drop);
81023 + atomic_inc_unchecked(&vcc->stats->rx_drop);
81024 return 0;
81025 }
81026
81027 @@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
81028 }
81029 }
81030 atm_return(vcc,guess);
81031 - atomic_inc(&vcc->stats->rx_drop);
81032 + atomic_inc_unchecked(&vcc->stats->rx_drop);
81033 return NULL;
81034 }
81035
81036 @@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafprm *tp)
81037
81038 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
81039 {
81040 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
81041 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
81042 __SONET_ITEMS
81043 #undef __HANDLE_ITEM
81044 }
81045 @@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
81046
81047 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
81048 {
81049 -#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
81050 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
81051 __SONET_ITEMS
81052 #undef __HANDLE_ITEM
81053 }
81054 diff --git a/net/atm/lec.h b/net/atm/lec.h
81055 index 9d14d19..5c145f3 100644
81056 --- a/net/atm/lec.h
81057 +++ b/net/atm/lec.h
81058 @@ -48,7 +48,7 @@ struct lane2_ops {
81059 const u8 *tlvs, u32 sizeoftlvs);
81060 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
81061 const u8 *tlvs, u32 sizeoftlvs);
81062 -};
81063 +} __no_const;
81064
81065 /*
81066 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
81067 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
81068 index 0919a88..a23d54e 100644
81069 --- a/net/atm/mpc.h
81070 +++ b/net/atm/mpc.h
81071 @@ -33,7 +33,7 @@ struct mpoa_client {
81072 struct mpc_parameters parameters; /* parameters for this client */
81073
81074 const struct net_device_ops *old_ops;
81075 - struct net_device_ops new_ops;
81076 + net_device_ops_no_const new_ops;
81077 };
81078
81079
81080 diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
81081 index 4504a4b..1733f1e 100644
81082 --- a/net/atm/mpoa_caches.c
81083 +++ b/net/atm/mpoa_caches.c
81084 @@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_client *client)
81085 struct timeval now;
81086 struct k_message msg;
81087
81088 + pax_track_stack();
81089 +
81090 do_gettimeofday(&now);
81091
81092 write_lock_irq(&client->egress_lock);
81093 diff --git a/net/atm/proc.c b/net/atm/proc.c
81094 index ab8419a..aa91497 100644
81095 --- a/net/atm/proc.c
81096 +++ b/net/atm/proc.c
81097 @@ -43,9 +43,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
81098 const struct k_atm_aal_stats *stats)
81099 {
81100 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
81101 - atomic_read(&stats->tx),atomic_read(&stats->tx_err),
81102 - atomic_read(&stats->rx),atomic_read(&stats->rx_err),
81103 - atomic_read(&stats->rx_drop));
81104 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
81105 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
81106 + atomic_read_unchecked(&stats->rx_drop));
81107 }
81108
81109 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
81110 @@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
81111 {
81112 struct sock *sk = sk_atm(vcc);
81113
81114 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81115 + seq_printf(seq, "%p ", NULL);
81116 +#else
81117 seq_printf(seq, "%p ", vcc);
81118 +#endif
81119 +
81120 if (!vcc->dev)
81121 seq_printf(seq, "Unassigned ");
81122 else
81123 @@ -214,7 +219,11 @@ static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
81124 {
81125 if (!vcc->dev)
81126 seq_printf(seq, sizeof(void *) == 4 ?
81127 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81128 + "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
81129 +#else
81130 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
81131 +#endif
81132 else
81133 seq_printf(seq, "%3d %3d %5d ",
81134 vcc->dev->number, vcc->vpi, vcc->vci);
81135 diff --git a/net/atm/resources.c b/net/atm/resources.c
81136 index 56b7322..c48b84e 100644
81137 --- a/net/atm/resources.c
81138 +++ b/net/atm/resources.c
81139 @@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *dev)
81140 static void copy_aal_stats(struct k_atm_aal_stats *from,
81141 struct atm_aal_stats *to)
81142 {
81143 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
81144 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
81145 __AAL_STAT_ITEMS
81146 #undef __HANDLE_ITEM
81147 }
81148 @@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
81149 static void subtract_aal_stats(struct k_atm_aal_stats *from,
81150 struct atm_aal_stats *to)
81151 {
81152 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
81153 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
81154 __AAL_STAT_ITEMS
81155 #undef __HANDLE_ITEM
81156 }
81157 diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
81158 index 8567d47..bba2292 100644
81159 --- a/net/bridge/br_private.h
81160 +++ b/net/bridge/br_private.h
81161 @@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
81162
81163 #ifdef CONFIG_SYSFS
81164 /* br_sysfs_if.c */
81165 -extern struct sysfs_ops brport_sysfs_ops;
81166 +extern const struct sysfs_ops brport_sysfs_ops;
81167 extern int br_sysfs_addif(struct net_bridge_port *p);
81168
81169 /* br_sysfs_br.c */
81170 diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
81171 index 9a52ac5..c97538e 100644
81172 --- a/net/bridge/br_stp_if.c
81173 +++ b/net/bridge/br_stp_if.c
81174 @@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridge *br)
81175 char *envp[] = { NULL };
81176
81177 if (br->stp_enabled == BR_USER_STP) {
81178 - r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
81179 + r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
81180 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
81181 br->dev->name, r);
81182
81183 diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
81184 index 820643a..ce77fb3 100644
81185 --- a/net/bridge/br_sysfs_if.c
81186 +++ b/net/bridge/br_sysfs_if.c
81187 @@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobject * kobj,
81188 return ret;
81189 }
81190
81191 -struct sysfs_ops brport_sysfs_ops = {
81192 +const struct sysfs_ops brport_sysfs_ops = {
81193 .show = brport_show,
81194 .store = brport_store,
81195 };
81196 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
81197 index d73d47f..72df42a 100644
81198 --- a/net/bridge/netfilter/ebtables.c
81199 +++ b/net/bridge/netfilter/ebtables.c
81200 @@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
81201 unsigned int entries_size, nentries;
81202 char *entries;
81203
81204 + pax_track_stack();
81205 +
81206 if (cmd == EBT_SO_GET_ENTRIES) {
81207 entries_size = t->private->entries_size;
81208 nentries = t->private->nentries;
81209 diff --git a/net/can/bcm.c b/net/can/bcm.c
81210 index 2ffd2e0..72a7486 100644
81211 --- a/net/can/bcm.c
81212 +++ b/net/can/bcm.c
81213 @@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file *m, void *v)
81214 struct bcm_sock *bo = bcm_sk(sk);
81215 struct bcm_op *op;
81216
81217 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81218 + seq_printf(m, ">>> socket %p", NULL);
81219 + seq_printf(m, " / sk %p", NULL);
81220 + seq_printf(m, " / bo %p", NULL);
81221 +#else
81222 seq_printf(m, ">>> socket %p", sk->sk_socket);
81223 seq_printf(m, " / sk %p", sk);
81224 seq_printf(m, " / bo %p", bo);
81225 +#endif
81226 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
81227 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
81228 seq_printf(m, " <<<\n");
81229 diff --git a/net/compat.c b/net/compat.c
81230 index 9559afc..ccd74e1 100644
81231 --- a/net/compat.c
81232 +++ b/net/compat.c
81233 @@ -69,9 +69,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
81234 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
81235 __get_user(kmsg->msg_flags, &umsg->msg_flags))
81236 return -EFAULT;
81237 - kmsg->msg_name = compat_ptr(tmp1);
81238 - kmsg->msg_iov = compat_ptr(tmp2);
81239 - kmsg->msg_control = compat_ptr(tmp3);
81240 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
81241 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
81242 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
81243 return 0;
81244 }
81245
81246 @@ -94,7 +94,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
81247 kern_msg->msg_name = NULL;
81248
81249 tot_len = iov_from_user_compat_to_kern(kern_iov,
81250 - (struct compat_iovec __user *)kern_msg->msg_iov,
81251 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
81252 kern_msg->msg_iovlen);
81253 if (tot_len >= 0)
81254 kern_msg->msg_iov = kern_iov;
81255 @@ -114,20 +114,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
81256
81257 #define CMSG_COMPAT_FIRSTHDR(msg) \
81258 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
81259 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
81260 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
81261 (struct compat_cmsghdr __user *)NULL)
81262
81263 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
81264 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
81265 (ucmlen) <= (unsigned long) \
81266 ((mhdr)->msg_controllen - \
81267 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
81268 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
81269
81270 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
81271 struct compat_cmsghdr __user *cmsg, int cmsg_len)
81272 {
81273 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
81274 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
81275 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
81276 msg->msg_controllen)
81277 return NULL;
81278 return (struct compat_cmsghdr __user *)ptr;
81279 @@ -219,7 +219,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
81280 {
81281 struct compat_timeval ctv;
81282 struct compat_timespec cts[3];
81283 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
81284 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
81285 struct compat_cmsghdr cmhdr;
81286 int cmlen;
81287
81288 @@ -271,7 +271,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
81289
81290 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
81291 {
81292 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
81293 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
81294 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
81295 int fdnum = scm->fp->count;
81296 struct file **fp = scm->fp->fp;
81297 @@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
81298 len = sizeof(ktime);
81299 old_fs = get_fs();
81300 set_fs(KERNEL_DS);
81301 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
81302 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
81303 set_fs(old_fs);
81304
81305 if (!err) {
81306 @@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
81307 case MCAST_JOIN_GROUP:
81308 case MCAST_LEAVE_GROUP:
81309 {
81310 - struct compat_group_req __user *gr32 = (void *)optval;
81311 + struct compat_group_req __user *gr32 = (void __user *)optval;
81312 struct group_req __user *kgr =
81313 compat_alloc_user_space(sizeof(struct group_req));
81314 u32 interface;
81315 @@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
81316 case MCAST_BLOCK_SOURCE:
81317 case MCAST_UNBLOCK_SOURCE:
81318 {
81319 - struct compat_group_source_req __user *gsr32 = (void *)optval;
81320 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
81321 struct group_source_req __user *kgsr = compat_alloc_user_space(
81322 sizeof(struct group_source_req));
81323 u32 interface;
81324 @@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
81325 }
81326 case MCAST_MSFILTER:
81327 {
81328 - struct compat_group_filter __user *gf32 = (void *)optval;
81329 + struct compat_group_filter __user *gf32 = (void __user *)optval;
81330 struct group_filter __user *kgf;
81331 u32 interface, fmode, numsrc;
81332
81333 diff --git a/net/core/dev.c b/net/core/dev.c
81334 index 84a0705..575db4c 100644
81335 --- a/net/core/dev.c
81336 +++ b/net/core/dev.c
81337 @@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const char *name)
81338 if (no_module && capable(CAP_NET_ADMIN))
81339 no_module = request_module("netdev-%s", name);
81340 if (no_module && capable(CAP_SYS_MODULE)) {
81341 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
81342 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
81343 +#else
81344 if (!request_module("%s", name))
81345 pr_err("Loading kernel module for a network device "
81346 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
81347 "instead\n", name);
81348 +#endif
81349 }
81350 }
81351 EXPORT_SYMBOL(dev_load);
81352 @@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
81353
81354 struct dev_gso_cb {
81355 void (*destructor)(struct sk_buff *skb);
81356 -};
81357 +} __no_const;
81358
81359 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
81360
81361 @@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
81362 }
81363 EXPORT_SYMBOL(netif_rx_ni);
81364
81365 -static void net_tx_action(struct softirq_action *h)
81366 +static void net_tx_action(void)
81367 {
81368 struct softnet_data *sd = &__get_cpu_var(softnet_data);
81369
81370 @@ -2827,7 +2831,7 @@ void netif_napi_del(struct napi_struct *napi)
81371 EXPORT_SYMBOL(netif_napi_del);
81372
81373
81374 -static void net_rx_action(struct softirq_action *h)
81375 +static void net_rx_action(void)
81376 {
81377 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
81378 unsigned long time_limit = jiffies + 2;
81379 diff --git a/net/core/flow.c b/net/core/flow.c
81380 index 9601587..8c4824e 100644
81381 --- a/net/core/flow.c
81382 +++ b/net/core/flow.c
81383 @@ -35,11 +35,11 @@ struct flow_cache_entry {
81384 atomic_t *object_ref;
81385 };
81386
81387 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
81388 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
81389
81390 static u32 flow_hash_shift;
81391 #define flow_hash_size (1 << flow_hash_shift)
81392 -static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
81393 +static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
81394
81395 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
81396
81397 @@ -52,7 +52,7 @@ struct flow_percpu_info {
81398 u32 hash_rnd;
81399 int count;
81400 };
81401 -static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
81402 +static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
81403
81404 #define flow_hash_rnd_recalc(cpu) \
81405 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
81406 @@ -69,7 +69,7 @@ struct flow_flush_info {
81407 atomic_t cpuleft;
81408 struct completion completion;
81409 };
81410 -static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
81411 +static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
81412
81413 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
81414
81415 @@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
81416 if (fle->family == family &&
81417 fle->dir == dir &&
81418 flow_key_compare(key, &fle->key) == 0) {
81419 - if (fle->genid == atomic_read(&flow_cache_genid)) {
81420 + if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
81421 void *ret = fle->object;
81422
81423 if (ret)
81424 @@ -228,7 +228,7 @@ nocache:
81425 err = resolver(net, key, family, dir, &obj, &obj_ref);
81426
81427 if (fle && !err) {
81428 - fle->genid = atomic_read(&flow_cache_genid);
81429 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
81430
81431 if (fle->object)
81432 atomic_dec(fle->object_ref);
81433 @@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
81434
81435 fle = flow_table(cpu)[i];
81436 for (; fle; fle = fle->next) {
81437 - unsigned genid = atomic_read(&flow_cache_genid);
81438 + unsigned genid = atomic_read_unchecked(&flow_cache_genid);
81439
81440 if (!fle->object || fle->genid == genid)
81441 continue;
81442 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
81443 index d4fd895..ac9b1e6 100644
81444 --- a/net/core/rtnetlink.c
81445 +++ b/net/core/rtnetlink.c
81446 @@ -57,7 +57,7 @@ struct rtnl_link
81447 {
81448 rtnl_doit_func doit;
81449 rtnl_dumpit_func dumpit;
81450 -};
81451 +} __no_const;
81452
81453 static DEFINE_MUTEX(rtnl_mutex);
81454
81455 diff --git a/net/core/scm.c b/net/core/scm.c
81456 index d98eafc..1a190a9 100644
81457 --- a/net/core/scm.c
81458 +++ b/net/core/scm.c
81459 @@ -191,7 +191,7 @@ error:
81460 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
81461 {
81462 struct cmsghdr __user *cm
81463 - = (__force struct cmsghdr __user *)msg->msg_control;
81464 + = (struct cmsghdr __force_user *)msg->msg_control;
81465 struct cmsghdr cmhdr;
81466 int cmlen = CMSG_LEN(len);
81467 int err;
81468 @@ -214,7 +214,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
81469 err = -EFAULT;
81470 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
81471 goto out;
81472 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
81473 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
81474 goto out;
81475 cmlen = CMSG_SPACE(len);
81476 if (msg->msg_controllen < cmlen)
81477 @@ -229,7 +229,7 @@ out:
81478 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
81479 {
81480 struct cmsghdr __user *cm
81481 - = (__force struct cmsghdr __user*)msg->msg_control;
81482 + = (struct cmsghdr __force_user *)msg->msg_control;
81483
81484 int fdmax = 0;
81485 int fdnum = scm->fp->count;
81486 @@ -249,7 +249,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
81487 if (fdnum < fdmax)
81488 fdmax = fdnum;
81489
81490 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
81491 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
81492 i++, cmfptr++)
81493 {
81494 int new_fd;
81495 diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
81496 index 45329d7..626aaa6 100644
81497 --- a/net/core/secure_seq.c
81498 +++ b/net/core/secure_seq.c
81499 @@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
81500 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
81501
81502 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
81503 - __be16 dport)
81504 + __be16 dport)
81505 {
81506 u32 secret[MD5_MESSAGE_BYTES / 4];
81507 u32 hash[MD5_DIGEST_WORDS];
81508 @@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
81509 secret[i] = net_secret[i];
81510
81511 md5_transform(hash, secret);
81512 -
81513 return hash[0];
81514 }
81515 #endif
81516 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
81517 index 025f924..70a71c4 100644
81518 --- a/net/core/skbuff.c
81519 +++ b/net/core/skbuff.c
81520 @@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
81521 struct sk_buff *frag_iter;
81522 struct sock *sk = skb->sk;
81523
81524 + pax_track_stack();
81525 +
81526 /*
81527 * __skb_splice_bits() only fails if the output has no room left,
81528 * so no point in going over the frag_list for the error case.
81529 diff --git a/net/core/sock.c b/net/core/sock.c
81530 index 6605e75..3acebda 100644
81531 --- a/net/core/sock.c
81532 +++ b/net/core/sock.c
81533 @@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
81534 break;
81535
81536 case SO_PEERCRED:
81537 + {
81538 + struct ucred peercred;
81539 if (len > sizeof(sk->sk_peercred))
81540 len = sizeof(sk->sk_peercred);
81541 - if (copy_to_user(optval, &sk->sk_peercred, len))
81542 + peercred = sk->sk_peercred;
81543 + if (copy_to_user(optval, &peercred, len))
81544 return -EFAULT;
81545 goto lenout;
81546 + }
81547
81548 case SO_PEERNAME:
81549 {
81550 @@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
81551 */
81552 smp_wmb();
81553 atomic_set(&sk->sk_refcnt, 1);
81554 - atomic_set(&sk->sk_drops, 0);
81555 + atomic_set_unchecked(&sk->sk_drops, 0);
81556 }
81557 EXPORT_SYMBOL(sock_init_data);
81558
81559 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
81560 index 2036568..c55883d 100644
81561 --- a/net/decnet/sysctl_net_decnet.c
81562 +++ b/net/decnet/sysctl_net_decnet.c
81563 @@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
81564
81565 if (len > *lenp) len = *lenp;
81566
81567 - if (copy_to_user(buffer, addr, len))
81568 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
81569 return -EFAULT;
81570
81571 *lenp = len;
81572 @@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
81573
81574 if (len > *lenp) len = *lenp;
81575
81576 - if (copy_to_user(buffer, devname, len))
81577 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
81578 return -EFAULT;
81579
81580 *lenp = len;
81581 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
81582 index 39a2d29..f39c0fe 100644
81583 --- a/net/econet/Kconfig
81584 +++ b/net/econet/Kconfig
81585 @@ -4,7 +4,7 @@
81586
81587 config ECONET
81588 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
81589 - depends on EXPERIMENTAL && INET
81590 + depends on EXPERIMENTAL && INET && BROKEN
81591 ---help---
81592 Econet is a fairly old and slow networking protocol mainly used by
81593 Acorn computers to access file and print servers. It uses native
81594 diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
81595 index a413b1b..380849c 100644
81596 --- a/net/ieee802154/dgram.c
81597 +++ b/net/ieee802154/dgram.c
81598 @@ -318,7 +318,7 @@ out:
81599 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
81600 {
81601 if (sock_queue_rcv_skb(sk, skb) < 0) {
81602 - atomic_inc(&sk->sk_drops);
81603 + atomic_inc_unchecked(&sk->sk_drops);
81604 kfree_skb(skb);
81605 return NET_RX_DROP;
81606 }
81607 diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
81608 index 30e74ee..bfc6ee0 100644
81609 --- a/net/ieee802154/raw.c
81610 +++ b/net/ieee802154/raw.c
81611 @@ -206,7 +206,7 @@ out:
81612 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
81613 {
81614 if (sock_queue_rcv_skb(sk, skb) < 0) {
81615 - atomic_inc(&sk->sk_drops);
81616 + atomic_inc_unchecked(&sk->sk_drops);
81617 kfree_skb(skb);
81618 return NET_RX_DROP;
81619 }
81620 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
81621 index dba56d2..acee5d6 100644
81622 --- a/net/ipv4/inet_diag.c
81623 +++ b/net/ipv4/inet_diag.c
81624 @@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct sock *sk,
81625 r->idiag_retrans = 0;
81626
81627 r->id.idiag_if = sk->sk_bound_dev_if;
81628 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81629 + r->id.idiag_cookie[0] = 0;
81630 + r->id.idiag_cookie[1] = 0;
81631 +#else
81632 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
81633 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
81634 +#endif
81635
81636 r->id.idiag_sport = inet->sport;
81637 r->id.idiag_dport = inet->dport;
81638 @@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
81639 r->idiag_family = tw->tw_family;
81640 r->idiag_retrans = 0;
81641 r->id.idiag_if = tw->tw_bound_dev_if;
81642 +
81643 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81644 + r->id.idiag_cookie[0] = 0;
81645 + r->id.idiag_cookie[1] = 0;
81646 +#else
81647 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
81648 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
81649 +#endif
81650 +
81651 r->id.idiag_sport = tw->tw_sport;
81652 r->id.idiag_dport = tw->tw_dport;
81653 r->id.idiag_src[0] = tw->tw_rcv_saddr;
81654 @@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
81655 if (sk == NULL)
81656 goto unlock;
81657
81658 +#ifndef CONFIG_GRKERNSEC_HIDESYM
81659 err = -ESTALE;
81660 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
81661 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
81662 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
81663 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
81664 goto out;
81665 +#endif
81666
81667 err = -ENOMEM;
81668 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
81669 @@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
81670 r->idiag_retrans = req->retrans;
81671
81672 r->id.idiag_if = sk->sk_bound_dev_if;
81673 +
81674 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81675 + r->id.idiag_cookie[0] = 0;
81676 + r->id.idiag_cookie[1] = 0;
81677 +#else
81678 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
81679 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
81680 +#endif
81681
81682 tmo = req->expires - jiffies;
81683 if (tmo < 0)
81684 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
81685 index d717267..56de7e7 100644
81686 --- a/net/ipv4/inet_hashtables.c
81687 +++ b/net/ipv4/inet_hashtables.c
81688 @@ -18,12 +18,15 @@
81689 #include <linux/sched.h>
81690 #include <linux/slab.h>
81691 #include <linux/wait.h>
81692 +#include <linux/security.h>
81693
81694 #include <net/inet_connection_sock.h>
81695 #include <net/inet_hashtables.h>
81696 #include <net/secure_seq.h>
81697 #include <net/ip.h>
81698
81699 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
81700 +
81701 /*
81702 * Allocate and initialize a new local port bind bucket.
81703 * The bindhash mutex for snum's hash chain must be held here.
81704 @@ -491,6 +494,8 @@ ok:
81705 }
81706 spin_unlock(&head->lock);
81707
81708 + gr_update_task_in_ip_table(current, inet_sk(sk));
81709 +
81710 if (tw) {
81711 inet_twsk_deschedule(tw, death_row);
81712 inet_twsk_put(tw);
81713 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
81714 index 13b229f..6956484 100644
81715 --- a/net/ipv4/inetpeer.c
81716 +++ b/net/ipv4/inetpeer.c
81717 @@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
81718 struct inet_peer *p, *n;
81719 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
81720
81721 + pax_track_stack();
81722 +
81723 /* Look up for the address quickly. */
81724 read_lock_bh(&peer_pool_lock);
81725 p = lookup(daddr, NULL);
81726 @@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
81727 return NULL;
81728 n->v4daddr = daddr;
81729 atomic_set(&n->refcnt, 1);
81730 - atomic_set(&n->rid, 0);
81731 + atomic_set_unchecked(&n->rid, 0);
81732 n->ip_id_count = secure_ip_id(daddr);
81733 n->tcp_ts_stamp = 0;
81734
81735 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
81736 index d3fe10b..feeafc9 100644
81737 --- a/net/ipv4/ip_fragment.c
81738 +++ b/net/ipv4/ip_fragment.c
81739 @@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
81740 return 0;
81741
81742 start = qp->rid;
81743 - end = atomic_inc_return(&peer->rid);
81744 + end = atomic_inc_return_unchecked(&peer->rid);
81745 qp->rid = end;
81746
81747 rc = qp->q.fragments && (end - start) > max;
81748 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
81749 index e982b5c..f079d75 100644
81750 --- a/net/ipv4/ip_sockglue.c
81751 +++ b/net/ipv4/ip_sockglue.c
81752 @@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
81753 int val;
81754 int len;
81755
81756 + pax_track_stack();
81757 +
81758 if (level != SOL_IP)
81759 return -EOPNOTSUPP;
81760
81761 @@ -1173,7 +1175,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
81762 if (sk->sk_type != SOCK_STREAM)
81763 return -ENOPROTOOPT;
81764
81765 - msg.msg_control = optval;
81766 + msg.msg_control = (void __force_kernel *)optval;
81767 msg.msg_controllen = len;
81768 msg.msg_flags = 0;
81769
81770 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
81771 index f8d04c2..c1188f2 100644
81772 --- a/net/ipv4/ipconfig.c
81773 +++ b/net/ipv4/ipconfig.c
81774 @@ -295,7 +295,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
81775
81776 mm_segment_t oldfs = get_fs();
81777 set_fs(get_ds());
81778 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
81779 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
81780 set_fs(oldfs);
81781 return res;
81782 }
81783 @@ -306,7 +306,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
81784
81785 mm_segment_t oldfs = get_fs();
81786 set_fs(get_ds());
81787 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
81788 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
81789 set_fs(oldfs);
81790 return res;
81791 }
81792 @@ -317,7 +317,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
81793
81794 mm_segment_t oldfs = get_fs();
81795 set_fs(get_ds());
81796 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
81797 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
81798 set_fs(oldfs);
81799 return res;
81800 }
81801 diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
81802 index c8b0cc3..4da5ae2 100644
81803 --- a/net/ipv4/netfilter/arp_tables.c
81804 +++ b/net/ipv4/netfilter/arp_tables.c
81805 @@ -934,6 +934,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
81806 private = &tmp;
81807 }
81808 #endif
81809 + memset(&info, 0, sizeof(info));
81810 info.valid_hooks = t->valid_hooks;
81811 memcpy(info.hook_entry, private->hook_entry,
81812 sizeof(info.hook_entry));
81813 diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
81814 index c156db2..e772975 100644
81815 --- a/net/ipv4/netfilter/ip_queue.c
81816 +++ b/net/ipv4/netfilter/ip_queue.c
81817 @@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
81818
81819 if (v->data_len < sizeof(*user_iph))
81820 return 0;
81821 + if (v->data_len > 65535)
81822 + return -EMSGSIZE;
81823 +
81824 diff = v->data_len - e->skb->len;
81825 if (diff < 0) {
81826 if (pskb_trim(e->skb, v->data_len))
81827 @@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex)
81828 static inline void
81829 __ipq_rcv_skb(struct sk_buff *skb)
81830 {
81831 - int status, type, pid, flags, nlmsglen, skblen;
81832 + int status, type, pid, flags;
81833 + unsigned int nlmsglen, skblen;
81834 struct nlmsghdr *nlh;
81835
81836 skblen = skb->len;
81837 diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
81838 index 0606db1..02e7e4c 100644
81839 --- a/net/ipv4/netfilter/ip_tables.c
81840 +++ b/net/ipv4/netfilter/ip_tables.c
81841 @@ -1141,6 +1141,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
81842 private = &tmp;
81843 }
81844 #endif
81845 + memset(&info, 0, sizeof(info));
81846 info.valid_hooks = t->valid_hooks;
81847 memcpy(info.hook_entry, private->hook_entry,
81848 sizeof(info.hook_entry));
81849 diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
81850 index d9521f6..3c3eb25 100644
81851 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
81852 +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
81853 @@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
81854
81855 *len = 0;
81856
81857 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
81858 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
81859 if (*octets == NULL) {
81860 if (net_ratelimit())
81861 printk("OOM in bsalg (%d)\n", __LINE__);
81862 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
81863 index ab996f9..3da5f96 100644
81864 --- a/net/ipv4/raw.c
81865 +++ b/net/ipv4/raw.c
81866 @@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
81867 /* Charge it to the socket. */
81868
81869 if (sock_queue_rcv_skb(sk, skb) < 0) {
81870 - atomic_inc(&sk->sk_drops);
81871 + atomic_inc_unchecked(&sk->sk_drops);
81872 kfree_skb(skb);
81873 return NET_RX_DROP;
81874 }
81875 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
81876 int raw_rcv(struct sock *sk, struct sk_buff *skb)
81877 {
81878 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
81879 - atomic_inc(&sk->sk_drops);
81880 + atomic_inc_unchecked(&sk->sk_drops);
81881 kfree_skb(skb);
81882 return NET_RX_DROP;
81883 }
81884 @@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
81885
81886 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
81887 {
81888 + struct icmp_filter filter;
81889 +
81890 + if (optlen < 0)
81891 + return -EINVAL;
81892 if (optlen > sizeof(struct icmp_filter))
81893 optlen = sizeof(struct icmp_filter);
81894 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
81895 + if (copy_from_user(&filter, optval, optlen))
81896 return -EFAULT;
81897 + raw_sk(sk)->filter = filter;
81898 +
81899 return 0;
81900 }
81901
81902 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
81903 {
81904 int len, ret = -EFAULT;
81905 + struct icmp_filter filter;
81906
81907 if (get_user(len, optlen))
81908 goto out;
81909 @@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
81910 if (len > sizeof(struct icmp_filter))
81911 len = sizeof(struct icmp_filter);
81912 ret = -EFAULT;
81913 - if (put_user(len, optlen) ||
81914 - copy_to_user(optval, &raw_sk(sk)->filter, len))
81915 + filter = raw_sk(sk)->filter;
81916 + if (put_user(len, optlen) || len > sizeof filter ||
81917 + copy_to_user(optval, &filter, len))
81918 goto out;
81919 ret = 0;
81920 out: return ret;
81921 @@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
81922 sk_wmem_alloc_get(sp),
81923 sk_rmem_alloc_get(sp),
81924 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
81925 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
81926 + atomic_read(&sp->sk_refcnt),
81927 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81928 + NULL,
81929 +#else
81930 + sp,
81931 +#endif
81932 + atomic_read_unchecked(&sp->sk_drops));
81933 }
81934
81935 static int raw_seq_show(struct seq_file *seq, void *v)
81936 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
81937 index 58f141b..b759702 100644
81938 --- a/net/ipv4/route.c
81939 +++ b/net/ipv4/route.c
81940 @@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
81941
81942 static inline int rt_genid(struct net *net)
81943 {
81944 - return atomic_read(&net->ipv4.rt_genid);
81945 + return atomic_read_unchecked(&net->ipv4.rt_genid);
81946 }
81947
81948 #ifdef CONFIG_PROC_FS
81949 @@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct net *net)
81950 unsigned char shuffle;
81951
81952 get_random_bytes(&shuffle, sizeof(shuffle));
81953 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
81954 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
81955 }
81956
81957 /*
81958 @@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
81959
81960 static __net_init int rt_secret_timer_init(struct net *net)
81961 {
81962 - atomic_set(&net->ipv4.rt_genid,
81963 + atomic_set_unchecked(&net->ipv4.rt_genid,
81964 (int) ((num_physpages ^ (num_physpages>>8)) ^
81965 (jiffies ^ (jiffies >> 7))));
81966
81967 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
81968 index f095659..adc892a 100644
81969 --- a/net/ipv4/tcp.c
81970 +++ b/net/ipv4/tcp.c
81971 @@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
81972 int val;
81973 int err = 0;
81974
81975 + pax_track_stack();
81976 +
81977 /* This is a string value all the others are int's */
81978 if (optname == TCP_CONGESTION) {
81979 char name[TCP_CA_NAME_MAX];
81980 @@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
81981 struct tcp_sock *tp = tcp_sk(sk);
81982 int val, len;
81983
81984 + pax_track_stack();
81985 +
81986 if (get_user(len, optlen))
81987 return -EFAULT;
81988
81989 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
81990 index 6fc7961..33bad4a 100644
81991 --- a/net/ipv4/tcp_ipv4.c
81992 +++ b/net/ipv4/tcp_ipv4.c
81993 @@ -85,6 +85,9 @@
81994 int sysctl_tcp_tw_reuse __read_mostly;
81995 int sysctl_tcp_low_latency __read_mostly;
81996
81997 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81998 +extern int grsec_enable_blackhole;
81999 +#endif
82000
82001 #ifdef CONFIG_TCP_MD5SIG
82002 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
82003 @@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
82004 return 0;
82005
82006 reset:
82007 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82008 + if (!grsec_enable_blackhole)
82009 +#endif
82010 tcp_v4_send_reset(rsk, skb);
82011 discard:
82012 kfree_skb(skb);
82013 @@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
82014 TCP_SKB_CB(skb)->sacked = 0;
82015
82016 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
82017 - if (!sk)
82018 + if (!sk) {
82019 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82020 + ret = 1;
82021 +#endif
82022 goto no_tcp_socket;
82023 + }
82024
82025 process:
82026 - if (sk->sk_state == TCP_TIME_WAIT)
82027 + if (sk->sk_state == TCP_TIME_WAIT) {
82028 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82029 + ret = 2;
82030 +#endif
82031 goto do_time_wait;
82032 + }
82033
82034 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
82035 goto discard_and_relse;
82036 @@ -1651,6 +1665,10 @@ no_tcp_socket:
82037 bad_packet:
82038 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
82039 } else {
82040 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82041 + if (!grsec_enable_blackhole || (ret == 1 &&
82042 + (skb->dev->flags & IFF_LOOPBACK)))
82043 +#endif
82044 tcp_v4_send_reset(NULL, skb);
82045 }
82046
82047 @@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
82048 0, /* non standard timer */
82049 0, /* open_requests have no inode */
82050 atomic_read(&sk->sk_refcnt),
82051 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82052 + NULL,
82053 +#else
82054 req,
82055 +#endif
82056 len);
82057 }
82058
82059 @@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
82060 sock_i_uid(sk),
82061 icsk->icsk_probes_out,
82062 sock_i_ino(sk),
82063 - atomic_read(&sk->sk_refcnt), sk,
82064 + atomic_read(&sk->sk_refcnt),
82065 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82066 + NULL,
82067 +#else
82068 + sk,
82069 +#endif
82070 jiffies_to_clock_t(icsk->icsk_rto),
82071 jiffies_to_clock_t(icsk->icsk_ack.ato),
82072 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
82073 @@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
82074 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
82075 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
82076 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
82077 - atomic_read(&tw->tw_refcnt), tw, len);
82078 + atomic_read(&tw->tw_refcnt),
82079 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82080 + NULL,
82081 +#else
82082 + tw,
82083 +#endif
82084 + len);
82085 }
82086
82087 #define TMPSZ 150
82088 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
82089 index 4c03598..e09a8e8 100644
82090 --- a/net/ipv4/tcp_minisocks.c
82091 +++ b/net/ipv4/tcp_minisocks.c
82092 @@ -26,6 +26,10 @@
82093 #include <net/inet_common.h>
82094 #include <net/xfrm.h>
82095
82096 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82097 +extern int grsec_enable_blackhole;
82098 +#endif
82099 +
82100 #ifdef CONFIG_SYSCTL
82101 #define SYNC_INIT 0 /* let the user enable it */
82102 #else
82103 @@ -672,6 +676,10 @@ listen_overflow:
82104
82105 embryonic_reset:
82106 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
82107 +
82108 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82109 + if (!grsec_enable_blackhole)
82110 +#endif
82111 if (!(flg & TCP_FLAG_RST))
82112 req->rsk_ops->send_reset(sk, skb);
82113
82114 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
82115 index af83bdf..ec91cb2 100644
82116 --- a/net/ipv4/tcp_output.c
82117 +++ b/net/ipv4/tcp_output.c
82118 @@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
82119 __u8 *md5_hash_location;
82120 int mss;
82121
82122 + pax_track_stack();
82123 +
82124 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
82125 if (skb == NULL)
82126 return NULL;
82127 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
82128 index 59f5b5e..193860f 100644
82129 --- a/net/ipv4/tcp_probe.c
82130 +++ b/net/ipv4/tcp_probe.c
82131 @@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
82132 if (cnt + width >= len)
82133 break;
82134
82135 - if (copy_to_user(buf + cnt, tbuf, width))
82136 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
82137 return -EFAULT;
82138 cnt += width;
82139 }
82140 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
82141 index 57d5501..a9ed13a 100644
82142 --- a/net/ipv4/tcp_timer.c
82143 +++ b/net/ipv4/tcp_timer.c
82144 @@ -21,6 +21,10 @@
82145 #include <linux/module.h>
82146 #include <net/tcp.h>
82147
82148 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82149 +extern int grsec_lastack_retries;
82150 +#endif
82151 +
82152 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
82153 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
82154 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
82155 @@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock *sk)
82156 }
82157 }
82158
82159 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82160 + if ((sk->sk_state == TCP_LAST_ACK) &&
82161 + (grsec_lastack_retries > 0) &&
82162 + (grsec_lastack_retries < retry_until))
82163 + retry_until = grsec_lastack_retries;
82164 +#endif
82165 +
82166 if (retransmits_timed_out(sk, retry_until)) {
82167 /* Has it gone just too far? */
82168 tcp_write_err(sk);
82169 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
82170 index 8e28770..72105c8 100644
82171 --- a/net/ipv4/udp.c
82172 +++ b/net/ipv4/udp.c
82173 @@ -86,6 +86,7 @@
82174 #include <linux/types.h>
82175 #include <linux/fcntl.h>
82176 #include <linux/module.h>
82177 +#include <linux/security.h>
82178 #include <linux/socket.h>
82179 #include <linux/sockios.h>
82180 #include <linux/igmp.h>
82181 @@ -106,6 +107,10 @@
82182 #include <net/xfrm.h>
82183 #include "udp_impl.h"
82184
82185 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82186 +extern int grsec_enable_blackhole;
82187 +#endif
82188 +
82189 struct udp_table udp_table;
82190 EXPORT_SYMBOL(udp_table);
82191
82192 @@ -371,6 +376,9 @@ found:
82193 return s;
82194 }
82195
82196 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
82197 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
82198 +
82199 /*
82200 * This routine is called by the ICMP module when it gets some
82201 * sort of error condition. If err < 0 then the socket should
82202 @@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
82203 dport = usin->sin_port;
82204 if (dport == 0)
82205 return -EINVAL;
82206 +
82207 + err = gr_search_udp_sendmsg(sk, usin);
82208 + if (err)
82209 + return err;
82210 } else {
82211 if (sk->sk_state != TCP_ESTABLISHED)
82212 return -EDESTADDRREQ;
82213 +
82214 + err = gr_search_udp_sendmsg(sk, NULL);
82215 + if (err)
82216 + return err;
82217 +
82218 daddr = inet->daddr;
82219 dport = inet->dport;
82220 /* Open fast path for connected socket.
82221 @@ -945,6 +962,10 @@ try_again:
82222 if (!skb)
82223 goto out;
82224
82225 + err = gr_search_udp_recvmsg(sk, skb);
82226 + if (err)
82227 + goto out_free;
82228 +
82229 ulen = skb->len - sizeof(struct udphdr);
82230 copied = len;
82231 if (copied > ulen)
82232 @@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
82233 if (rc == -ENOMEM) {
82234 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
82235 is_udplite);
82236 - atomic_inc(&sk->sk_drops);
82237 + atomic_inc_unchecked(&sk->sk_drops);
82238 }
82239 goto drop;
82240 }
82241 @@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
82242 goto csum_error;
82243
82244 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
82245 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82246 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
82247 +#endif
82248 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
82249
82250 /*
82251 @@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
82252 sk_wmem_alloc_get(sp),
82253 sk_rmem_alloc_get(sp),
82254 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
82255 - atomic_read(&sp->sk_refcnt), sp,
82256 - atomic_read(&sp->sk_drops), len);
82257 + atomic_read(&sp->sk_refcnt),
82258 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82259 + NULL,
82260 +#else
82261 + sp,
82262 +#endif
82263 + atomic_read_unchecked(&sp->sk_drops), len);
82264 }
82265
82266 int udp4_seq_show(struct seq_file *seq, void *v)
82267 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
82268 index 8ac3d09..fc58c5f 100644
82269 --- a/net/ipv6/addrconf.c
82270 +++ b/net/ipv6/addrconf.c
82271 @@ -2053,7 +2053,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
82272 p.iph.ihl = 5;
82273 p.iph.protocol = IPPROTO_IPV6;
82274 p.iph.ttl = 64;
82275 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
82276 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
82277
82278 if (ops->ndo_do_ioctl) {
82279 mm_segment_t oldfs = get_fs();
82280 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
82281 index cc4797d..7cfdfcc 100644
82282 --- a/net/ipv6/inet6_connection_sock.c
82283 +++ b/net/ipv6/inet6_connection_sock.c
82284 @@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
82285 #ifdef CONFIG_XFRM
82286 {
82287 struct rt6_info *rt = (struct rt6_info *)dst;
82288 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
82289 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
82290 }
82291 #endif
82292 }
82293 @@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
82294 #ifdef CONFIG_XFRM
82295 if (dst) {
82296 struct rt6_info *rt = (struct rt6_info *)dst;
82297 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
82298 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
82299 sk->sk_dst_cache = NULL;
82300 dst_release(dst);
82301 dst = NULL;
82302 diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
82303 index 093e9b2..f72cddb 100644
82304 --- a/net/ipv6/inet6_hashtables.c
82305 +++ b/net/ipv6/inet6_hashtables.c
82306 @@ -119,7 +119,7 @@ out:
82307 }
82308 EXPORT_SYMBOL(__inet6_lookup_established);
82309
82310 -static int inline compute_score(struct sock *sk, struct net *net,
82311 +static inline int compute_score(struct sock *sk, struct net *net,
82312 const unsigned short hnum,
82313 const struct in6_addr *daddr,
82314 const int dif)
82315 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
82316 index 4f7aaf6..f7acf45 100644
82317 --- a/net/ipv6/ipv6_sockglue.c
82318 +++ b/net/ipv6/ipv6_sockglue.c
82319 @@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
82320 int val, valbool;
82321 int retv = -ENOPROTOOPT;
82322
82323 + pax_track_stack();
82324 +
82325 if (optval == NULL)
82326 val=0;
82327 else {
82328 @@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
82329 int len;
82330 int val;
82331
82332 + pax_track_stack();
82333 +
82334 if (ip6_mroute_opt(optname))
82335 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
82336
82337 @@ -922,7 +926,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
82338 if (sk->sk_type != SOCK_STREAM)
82339 return -ENOPROTOOPT;
82340
82341 - msg.msg_control = optval;
82342 + msg.msg_control = (void __force_kernel *)optval;
82343 msg.msg_controllen = len;
82344 msg.msg_flags = 0;
82345
82346 diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
82347 index 1cf3f0c..1d4376f 100644
82348 --- a/net/ipv6/netfilter/ip6_queue.c
82349 +++ b/net/ipv6/netfilter/ip6_queue.c
82350 @@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
82351
82352 if (v->data_len < sizeof(*user_iph))
82353 return 0;
82354 + if (v->data_len > 65535)
82355 + return -EMSGSIZE;
82356 +
82357 diff = v->data_len - e->skb->len;
82358 if (diff < 0) {
82359 if (pskb_trim(e->skb, v->data_len))
82360 @@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex)
82361 static inline void
82362 __ipq_rcv_skb(struct sk_buff *skb)
82363 {
82364 - int status, type, pid, flags, nlmsglen, skblen;
82365 + int status, type, pid, flags;
82366 + unsigned int nlmsglen, skblen;
82367 struct nlmsghdr *nlh;
82368
82369 skblen = skb->len;
82370 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
82371 index 78b5a36..7f37433 100644
82372 --- a/net/ipv6/netfilter/ip6_tables.c
82373 +++ b/net/ipv6/netfilter/ip6_tables.c
82374 @@ -1173,6 +1173,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
82375 private = &tmp;
82376 }
82377 #endif
82378 + memset(&info, 0, sizeof(info));
82379 info.valid_hooks = t->valid_hooks;
82380 memcpy(info.hook_entry, private->hook_entry,
82381 sizeof(info.hook_entry));
82382 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
82383 index 4f24570..b813b34 100644
82384 --- a/net/ipv6/raw.c
82385 +++ b/net/ipv6/raw.c
82386 @@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
82387 {
82388 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
82389 skb_checksum_complete(skb)) {
82390 - atomic_inc(&sk->sk_drops);
82391 + atomic_inc_unchecked(&sk->sk_drops);
82392 kfree_skb(skb);
82393 return NET_RX_DROP;
82394 }
82395
82396 /* Charge it to the socket. */
82397 if (sock_queue_rcv_skb(sk,skb)<0) {
82398 - atomic_inc(&sk->sk_drops);
82399 + atomic_inc_unchecked(&sk->sk_drops);
82400 kfree_skb(skb);
82401 return NET_RX_DROP;
82402 }
82403 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
82404 struct raw6_sock *rp = raw6_sk(sk);
82405
82406 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
82407 - atomic_inc(&sk->sk_drops);
82408 + atomic_inc_unchecked(&sk->sk_drops);
82409 kfree_skb(skb);
82410 return NET_RX_DROP;
82411 }
82412 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
82413
82414 if (inet->hdrincl) {
82415 if (skb_checksum_complete(skb)) {
82416 - atomic_inc(&sk->sk_drops);
82417 + atomic_inc_unchecked(&sk->sk_drops);
82418 kfree_skb(skb);
82419 return NET_RX_DROP;
82420 }
82421 @@ -518,7 +518,7 @@ csum_copy_err:
82422 as some normal condition.
82423 */
82424 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
82425 - atomic_inc(&sk->sk_drops);
82426 + atomic_inc_unchecked(&sk->sk_drops);
82427 goto out;
82428 }
82429
82430 @@ -600,7 +600,7 @@ out:
82431 return err;
82432 }
82433
82434 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
82435 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
82436 struct flowi *fl, struct rt6_info *rt,
82437 unsigned int flags)
82438 {
82439 @@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
82440 u16 proto;
82441 int err;
82442
82443 + pax_track_stack();
82444 +
82445 /* Rough check on arithmetic overflow,
82446 better check is made in ip6_append_data().
82447 */
82448 @@ -916,12 +918,17 @@ do_confirm:
82449 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
82450 char __user *optval, int optlen)
82451 {
82452 + struct icmp6_filter filter;
82453 +
82454 switch (optname) {
82455 case ICMPV6_FILTER:
82456 + if (optlen < 0)
82457 + return -EINVAL;
82458 if (optlen > sizeof(struct icmp6_filter))
82459 optlen = sizeof(struct icmp6_filter);
82460 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
82461 + if (copy_from_user(&filter, optval, optlen))
82462 return -EFAULT;
82463 + raw6_sk(sk)->filter = filter;
82464 return 0;
82465 default:
82466 return -ENOPROTOOPT;
82467 @@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
82468 char __user *optval, int __user *optlen)
82469 {
82470 int len;
82471 + struct icmp6_filter filter;
82472
82473 switch (optname) {
82474 case ICMPV6_FILTER:
82475 @@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
82476 len = sizeof(struct icmp6_filter);
82477 if (put_user(len, optlen))
82478 return -EFAULT;
82479 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
82480 + filter = raw6_sk(sk)->filter;
82481 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
82482 return -EFAULT;
82483 return 0;
82484 default:
82485 @@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
82486 0, 0L, 0,
82487 sock_i_uid(sp), 0,
82488 sock_i_ino(sp),
82489 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
82490 + atomic_read(&sp->sk_refcnt),
82491 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82492 + NULL,
82493 +#else
82494 + sp,
82495 +#endif
82496 + atomic_read_unchecked(&sp->sk_drops));
82497 }
82498
82499 static int raw6_seq_show(struct seq_file *seq, void *v)
82500 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
82501 index faae6df..d4430c1 100644
82502 --- a/net/ipv6/tcp_ipv6.c
82503 +++ b/net/ipv6/tcp_ipv6.c
82504 @@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
82505 }
82506 #endif
82507
82508 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82509 +extern int grsec_enable_blackhole;
82510 +#endif
82511 +
82512 static void tcp_v6_hash(struct sock *sk)
82513 {
82514 if (sk->sk_state != TCP_CLOSE) {
82515 @@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
82516 return 0;
82517
82518 reset:
82519 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82520 + if (!grsec_enable_blackhole)
82521 +#endif
82522 tcp_v6_send_reset(sk, skb);
82523 discard:
82524 if (opt_skb)
82525 @@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
82526 TCP_SKB_CB(skb)->sacked = 0;
82527
82528 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
82529 - if (!sk)
82530 + if (!sk) {
82531 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82532 + ret = 1;
82533 +#endif
82534 goto no_tcp_socket;
82535 + }
82536
82537 process:
82538 - if (sk->sk_state == TCP_TIME_WAIT)
82539 + if (sk->sk_state == TCP_TIME_WAIT) {
82540 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82541 + ret = 2;
82542 +#endif
82543 goto do_time_wait;
82544 + }
82545
82546 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
82547 goto discard_and_relse;
82548 @@ -1701,6 +1716,10 @@ no_tcp_socket:
82549 bad_packet:
82550 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
82551 } else {
82552 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82553 + if (!grsec_enable_blackhole || (ret == 1 &&
82554 + (skb->dev->flags & IFF_LOOPBACK)))
82555 +#endif
82556 tcp_v6_send_reset(NULL, skb);
82557 }
82558
82559 @@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file *seq,
82560 uid,
82561 0, /* non standard timer */
82562 0, /* open_requests have no inode */
82563 - 0, req);
82564 + 0,
82565 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82566 + NULL
82567 +#else
82568 + req
82569 +#endif
82570 + );
82571 }
82572
82573 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
82574 @@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
82575 sock_i_uid(sp),
82576 icsk->icsk_probes_out,
82577 sock_i_ino(sp),
82578 - atomic_read(&sp->sk_refcnt), sp,
82579 + atomic_read(&sp->sk_refcnt),
82580 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82581 + NULL,
82582 +#else
82583 + sp,
82584 +#endif
82585 jiffies_to_clock_t(icsk->icsk_rto),
82586 jiffies_to_clock_t(icsk->icsk_ack.ato),
82587 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
82588 @@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct seq_file *seq,
82589 dest->s6_addr32[2], dest->s6_addr32[3], destp,
82590 tw->tw_substate, 0, 0,
82591 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
82592 - atomic_read(&tw->tw_refcnt), tw);
82593 + atomic_read(&tw->tw_refcnt),
82594 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82595 + NULL
82596 +#else
82597 + tw
82598 +#endif
82599 + );
82600 }
82601
82602 static int tcp6_seq_show(struct seq_file *seq, void *v)
82603 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
82604 index 9cc6289..052c521 100644
82605 --- a/net/ipv6/udp.c
82606 +++ b/net/ipv6/udp.c
82607 @@ -49,6 +49,10 @@
82608 #include <linux/seq_file.h>
82609 #include "udp_impl.h"
82610
82611 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82612 +extern int grsec_enable_blackhole;
82613 +#endif
82614 +
82615 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
82616 {
82617 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
82618 @@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
82619 if (rc == -ENOMEM) {
82620 UDP6_INC_STATS_BH(sock_net(sk),
82621 UDP_MIB_RCVBUFERRORS, is_udplite);
82622 - atomic_inc(&sk->sk_drops);
82623 + atomic_inc_unchecked(&sk->sk_drops);
82624 }
82625 goto drop;
82626 }
82627 @@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
82628 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
82629 proto == IPPROTO_UDPLITE);
82630
82631 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82632 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
82633 +#endif
82634 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
82635
82636 kfree_skb(skb);
82637 @@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
82638 0, 0L, 0,
82639 sock_i_uid(sp), 0,
82640 sock_i_ino(sp),
82641 - atomic_read(&sp->sk_refcnt), sp,
82642 - atomic_read(&sp->sk_drops));
82643 + atomic_read(&sp->sk_refcnt),
82644 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82645 + NULL,
82646 +#else
82647 + sp,
82648 +#endif
82649 + atomic_read_unchecked(&sp->sk_drops));
82650 }
82651
82652 int udp6_seq_show(struct seq_file *seq, void *v)
82653 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
82654 index 811984d..11f59b7 100644
82655 --- a/net/irda/ircomm/ircomm_tty.c
82656 +++ b/net/irda/ircomm/ircomm_tty.c
82657 @@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82658 add_wait_queue(&self->open_wait, &wait);
82659
82660 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
82661 - __FILE__,__LINE__, tty->driver->name, self->open_count );
82662 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
82663
82664 /* As far as I can see, we protect open_count - Jean II */
82665 spin_lock_irqsave(&self->spinlock, flags);
82666 if (!tty_hung_up_p(filp)) {
82667 extra_count = 1;
82668 - self->open_count--;
82669 + local_dec(&self->open_count);
82670 }
82671 spin_unlock_irqrestore(&self->spinlock, flags);
82672 - self->blocked_open++;
82673 + local_inc(&self->blocked_open);
82674
82675 while (1) {
82676 if (tty->termios->c_cflag & CBAUD) {
82677 @@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82678 }
82679
82680 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
82681 - __FILE__,__LINE__, tty->driver->name, self->open_count );
82682 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
82683
82684 schedule();
82685 }
82686 @@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82687 if (extra_count) {
82688 /* ++ is not atomic, so this should be protected - Jean II */
82689 spin_lock_irqsave(&self->spinlock, flags);
82690 - self->open_count++;
82691 + local_inc(&self->open_count);
82692 spin_unlock_irqrestore(&self->spinlock, flags);
82693 }
82694 - self->blocked_open--;
82695 + local_dec(&self->blocked_open);
82696
82697 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
82698 - __FILE__,__LINE__, tty->driver->name, self->open_count);
82699 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
82700
82701 if (!retval)
82702 self->flags |= ASYNC_NORMAL_ACTIVE;
82703 @@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
82704 }
82705 /* ++ is not atomic, so this should be protected - Jean II */
82706 spin_lock_irqsave(&self->spinlock, flags);
82707 - self->open_count++;
82708 + local_inc(&self->open_count);
82709
82710 tty->driver_data = self;
82711 self->tty = tty;
82712 spin_unlock_irqrestore(&self->spinlock, flags);
82713
82714 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
82715 - self->line, self->open_count);
82716 + self->line, local_read(&self->open_count));
82717
82718 /* Not really used by us, but lets do it anyway */
82719 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
82720 @@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
82721 return;
82722 }
82723
82724 - if ((tty->count == 1) && (self->open_count != 1)) {
82725 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
82726 /*
82727 * Uh, oh. tty->count is 1, which means that the tty
82728 * structure will be freed. state->count should always
82729 @@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
82730 */
82731 IRDA_DEBUG(0, "%s(), bad serial port count; "
82732 "tty->count is 1, state->count is %d\n", __func__ ,
82733 - self->open_count);
82734 - self->open_count = 1;
82735 + local_read(&self->open_count));
82736 + local_set(&self->open_count, 1);
82737 }
82738
82739 - if (--self->open_count < 0) {
82740 + if (local_dec_return(&self->open_count) < 0) {
82741 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
82742 - __func__, self->line, self->open_count);
82743 - self->open_count = 0;
82744 + __func__, self->line, local_read(&self->open_count));
82745 + local_set(&self->open_count, 0);
82746 }
82747 - if (self->open_count) {
82748 + if (local_read(&self->open_count)) {
82749 spin_unlock_irqrestore(&self->spinlock, flags);
82750
82751 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
82752 @@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
82753 tty->closing = 0;
82754 self->tty = NULL;
82755
82756 - if (self->blocked_open) {
82757 + if (local_read(&self->blocked_open)) {
82758 if (self->close_delay)
82759 schedule_timeout_interruptible(self->close_delay);
82760 wake_up_interruptible(&self->open_wait);
82761 @@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
82762 spin_lock_irqsave(&self->spinlock, flags);
82763 self->flags &= ~ASYNC_NORMAL_ACTIVE;
82764 self->tty = NULL;
82765 - self->open_count = 0;
82766 + local_set(&self->open_count, 0);
82767 spin_unlock_irqrestore(&self->spinlock, flags);
82768
82769 wake_up_interruptible(&self->open_wait);
82770 @@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
82771 seq_putc(m, '\n');
82772
82773 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
82774 - seq_printf(m, "Open count: %d\n", self->open_count);
82775 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
82776 seq_printf(m, "Max data size: %d\n", self->max_data_size);
82777 seq_printf(m, "Max header size: %d\n", self->max_header_size);
82778
82779 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
82780 index bada1b9..f325943 100644
82781 --- a/net/iucv/af_iucv.c
82782 +++ b/net/iucv/af_iucv.c
82783 @@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct sock *sk)
82784
82785 write_lock_bh(&iucv_sk_list.lock);
82786
82787 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
82788 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
82789 while (__iucv_get_sock_by_name(name)) {
82790 sprintf(name, "%08x",
82791 - atomic_inc_return(&iucv_sk_list.autobind_name));
82792 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
82793 }
82794
82795 write_unlock_bh(&iucv_sk_list.lock);
82796 diff --git a/net/key/af_key.c b/net/key/af_key.c
82797 index 4e98193..439b449 100644
82798 --- a/net/key/af_key.c
82799 +++ b/net/key/af_key.c
82800 @@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
82801 struct xfrm_migrate m[XFRM_MAX_DEPTH];
82802 struct xfrm_kmaddress k;
82803
82804 + pax_track_stack();
82805 +
82806 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
82807 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
82808 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
82809 @@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
82810 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
82811 else
82812 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
82813 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82814 + NULL,
82815 +#else
82816 s,
82817 +#endif
82818 atomic_read(&s->sk_refcnt),
82819 sk_rmem_alloc_get(s),
82820 sk_wmem_alloc_get(s),
82821 diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
82822 index bda96d1..c038b72 100644
82823 --- a/net/lapb/lapb_iface.c
82824 +++ b/net/lapb/lapb_iface.c
82825 @@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
82826 goto out;
82827
82828 lapb->dev = dev;
82829 - lapb->callbacks = *callbacks;
82830 + lapb->callbacks = callbacks;
82831
82832 __lapb_insert_cb(lapb);
82833
82834 @@ -379,32 +379,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
82835
82836 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
82837 {
82838 - if (lapb->callbacks.connect_confirmation)
82839 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
82840 + if (lapb->callbacks->connect_confirmation)
82841 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
82842 }
82843
82844 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
82845 {
82846 - if (lapb->callbacks.connect_indication)
82847 - lapb->callbacks.connect_indication(lapb->dev, reason);
82848 + if (lapb->callbacks->connect_indication)
82849 + lapb->callbacks->connect_indication(lapb->dev, reason);
82850 }
82851
82852 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
82853 {
82854 - if (lapb->callbacks.disconnect_confirmation)
82855 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
82856 + if (lapb->callbacks->disconnect_confirmation)
82857 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
82858 }
82859
82860 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
82861 {
82862 - if (lapb->callbacks.disconnect_indication)
82863 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
82864 + if (lapb->callbacks->disconnect_indication)
82865 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
82866 }
82867
82868 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
82869 {
82870 - if (lapb->callbacks.data_indication)
82871 - return lapb->callbacks.data_indication(lapb->dev, skb);
82872 + if (lapb->callbacks->data_indication)
82873 + return lapb->callbacks->data_indication(lapb->dev, skb);
82874
82875 kfree_skb(skb);
82876 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
82877 @@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
82878 {
82879 int used = 0;
82880
82881 - if (lapb->callbacks.data_transmit) {
82882 - lapb->callbacks.data_transmit(lapb->dev, skb);
82883 + if (lapb->callbacks->data_transmit) {
82884 + lapb->callbacks->data_transmit(lapb->dev, skb);
82885 used = 1;
82886 }
82887
82888 diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
82889 index fe2d3f8..e57f683 100644
82890 --- a/net/mac80211/cfg.c
82891 +++ b/net/mac80211/cfg.c
82892 @@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
82893 return err;
82894 }
82895
82896 -struct cfg80211_ops mac80211_config_ops = {
82897 +const struct cfg80211_ops mac80211_config_ops = {
82898 .add_virtual_intf = ieee80211_add_iface,
82899 .del_virtual_intf = ieee80211_del_iface,
82900 .change_virtual_intf = ieee80211_change_iface,
82901 diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h
82902 index 7d7879f..2d51f62 100644
82903 --- a/net/mac80211/cfg.h
82904 +++ b/net/mac80211/cfg.h
82905 @@ -4,6 +4,6 @@
82906 #ifndef __CFG_H
82907 #define __CFG_H
82908
82909 -extern struct cfg80211_ops mac80211_config_ops;
82910 +extern const struct cfg80211_ops mac80211_config_ops;
82911
82912 #endif /* __CFG_H */
82913 diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
82914 index 99c7525..9cb4937 100644
82915 --- a/net/mac80211/debugfs_key.c
82916 +++ b/net/mac80211/debugfs_key.c
82917 @@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf,
82918 size_t count, loff_t *ppos)
82919 {
82920 struct ieee80211_key *key = file->private_data;
82921 - int i, res, bufsize = 2 * key->conf.keylen + 2;
82922 + int i, bufsize = 2 * key->conf.keylen + 2;
82923 char *buf = kmalloc(bufsize, GFP_KERNEL);
82924 char *p = buf;
82925 + ssize_t res;
82926 +
82927 + if (buf == NULL)
82928 + return -ENOMEM;
82929
82930 for (i = 0; i < key->conf.keylen; i++)
82931 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
82932 diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
82933 index 33a2e89..08650c8 100644
82934 --- a/net/mac80211/debugfs_sta.c
82935 +++ b/net/mac80211/debugfs_sta.c
82936 @@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
82937 int i;
82938 struct sta_info *sta = file->private_data;
82939
82940 + pax_track_stack();
82941 +
82942 spin_lock_bh(&sta->lock);
82943 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
82944 sta->ampdu_mlme.dialog_token_allocator + 1);
82945 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
82946 index ca62bfe..6657a03 100644
82947 --- a/net/mac80211/ieee80211_i.h
82948 +++ b/net/mac80211/ieee80211_i.h
82949 @@ -25,6 +25,7 @@
82950 #include <linux/etherdevice.h>
82951 #include <net/cfg80211.h>
82952 #include <net/mac80211.h>
82953 +#include <asm/local.h>
82954 #include "key.h"
82955 #include "sta_info.h"
82956
82957 @@ -635,7 +636,7 @@ struct ieee80211_local {
82958 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
82959 spinlock_t queue_stop_reason_lock;
82960
82961 - int open_count;
82962 + local_t open_count;
82963 int monitors, cooked_mntrs;
82964 /* number of interfaces with corresponding FIF_ flags */
82965 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
82966 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
82967 index 079c500..eb3c6d4 100644
82968 --- a/net/mac80211/iface.c
82969 +++ b/net/mac80211/iface.c
82970 @@ -166,7 +166,7 @@ static int ieee80211_open(struct net_device *dev)
82971 break;
82972 }
82973
82974 - if (local->open_count == 0) {
82975 + if (local_read(&local->open_count) == 0) {
82976 res = drv_start(local);
82977 if (res)
82978 goto err_del_bss;
82979 @@ -196,7 +196,7 @@ static int ieee80211_open(struct net_device *dev)
82980 * Validate the MAC address for this device.
82981 */
82982 if (!is_valid_ether_addr(dev->dev_addr)) {
82983 - if (!local->open_count)
82984 + if (!local_read(&local->open_count))
82985 drv_stop(local);
82986 return -EADDRNOTAVAIL;
82987 }
82988 @@ -292,7 +292,7 @@ static int ieee80211_open(struct net_device *dev)
82989
82990 hw_reconf_flags |= __ieee80211_recalc_idle(local);
82991
82992 - local->open_count++;
82993 + local_inc(&local->open_count);
82994 if (hw_reconf_flags) {
82995 ieee80211_hw_config(local, hw_reconf_flags);
82996 /*
82997 @@ -320,7 +320,7 @@ static int ieee80211_open(struct net_device *dev)
82998 err_del_interface:
82999 drv_remove_interface(local, &conf);
83000 err_stop:
83001 - if (!local->open_count)
83002 + if (!local_read(&local->open_count))
83003 drv_stop(local);
83004 err_del_bss:
83005 sdata->bss = NULL;
83006 @@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_device *dev)
83007 WARN_ON(!list_empty(&sdata->u.ap.vlans));
83008 }
83009
83010 - local->open_count--;
83011 + local_dec(&local->open_count);
83012
83013 switch (sdata->vif.type) {
83014 case NL80211_IFTYPE_AP_VLAN:
83015 @@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_device *dev)
83016
83017 ieee80211_recalc_ps(local, -1);
83018
83019 - if (local->open_count == 0) {
83020 + if (local_read(&local->open_count) == 0) {
83021 ieee80211_clear_tx_pending(local);
83022 ieee80211_stop_device(local);
83023
83024 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
83025 index 2dfe176..74e4388 100644
83026 --- a/net/mac80211/main.c
83027 +++ b/net/mac80211/main.c
83028 @@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
83029 local->hw.conf.power_level = power;
83030 }
83031
83032 - if (changed && local->open_count) {
83033 + if (changed && local_read(&local->open_count)) {
83034 ret = drv_config(local, changed);
83035 /*
83036 * Goal:
83037 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
83038 index e67eea7..fcc227e 100644
83039 --- a/net/mac80211/mlme.c
83040 +++ b/net/mac80211/mlme.c
83041 @@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
83042 bool have_higher_than_11mbit = false, newsta = false;
83043 u16 ap_ht_cap_flags;
83044
83045 + pax_track_stack();
83046 +
83047 /*
83048 * AssocResp and ReassocResp have identical structure, so process both
83049 * of them in this function.
83050 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
83051 index e535f1c..4d733d1 100644
83052 --- a/net/mac80211/pm.c
83053 +++ b/net/mac80211/pm.c
83054 @@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
83055 }
83056
83057 /* stop hardware - this must stop RX */
83058 - if (local->open_count)
83059 + if (local_read(&local->open_count))
83060 ieee80211_stop_device(local);
83061
83062 local->suspended = true;
83063 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
83064 index b33efc4..0a2efb6 100644
83065 --- a/net/mac80211/rate.c
83066 +++ b/net/mac80211/rate.c
83067 @@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
83068 struct rate_control_ref *ref, *old;
83069
83070 ASSERT_RTNL();
83071 - if (local->open_count)
83072 + if (local_read(&local->open_count))
83073 return -EBUSY;
83074
83075 ref = rate_control_alloc(name, local);
83076 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
83077 index b1d7904..57e4da7 100644
83078 --- a/net/mac80211/tx.c
83079 +++ b/net/mac80211/tx.c
83080 @@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
83081 return cpu_to_le16(dur);
83082 }
83083
83084 -static int inline is_ieee80211_device(struct ieee80211_local *local,
83085 +static inline int is_ieee80211_device(struct ieee80211_local *local,
83086 struct net_device *dev)
83087 {
83088 return local == wdev_priv(dev->ieee80211_ptr);
83089 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
83090 index 31b1085..48fb26d 100644
83091 --- a/net/mac80211/util.c
83092 +++ b/net/mac80211/util.c
83093 @@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
83094 local->resuming = true;
83095
83096 /* restart hardware */
83097 - if (local->open_count) {
83098 + if (local_read(&local->open_count)) {
83099 /*
83100 * Upon resume hardware can sometimes be goofy due to
83101 * various platform / driver / bus issues, so restarting
83102 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
83103 index 634d14a..b35a608 100644
83104 --- a/net/netfilter/Kconfig
83105 +++ b/net/netfilter/Kconfig
83106 @@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
83107
83108 To compile it as a module, choose M here. If unsure, say N.
83109
83110 +config NETFILTER_XT_MATCH_GRADM
83111 + tristate '"gradm" match support'
83112 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
83113 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
83114 + ---help---
83115 + The gradm match allows to match on grsecurity RBAC being enabled.
83116 + It is useful when iptables rules are applied early on bootup to
83117 + prevent connections to the machine (except from a trusted host)
83118 + while the RBAC system is disabled.
83119 +
83120 config NETFILTER_XT_MATCH_HASHLIMIT
83121 tristate '"hashlimit" match support'
83122 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
83123 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
83124 index 49f62ee..a17b2c6 100644
83125 --- a/net/netfilter/Makefile
83126 +++ b/net/netfilter/Makefile
83127 @@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
83128 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
83129 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
83130 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
83131 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
83132 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
83133 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
83134 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
83135 diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
83136 index 3c7e427..724043c 100644
83137 --- a/net/netfilter/ipvs/ip_vs_app.c
83138 +++ b/net/netfilter/ipvs/ip_vs_app.c
83139 @@ -564,7 +564,7 @@ static const struct file_operations ip_vs_app_fops = {
83140 .open = ip_vs_app_open,
83141 .read = seq_read,
83142 .llseek = seq_lseek,
83143 - .release = seq_release,
83144 + .release = seq_release_net,
83145 };
83146 #endif
83147
83148 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
83149 index 95682e5..457dbac 100644
83150 --- a/net/netfilter/ipvs/ip_vs_conn.c
83151 +++ b/net/netfilter/ipvs/ip_vs_conn.c
83152 @@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
83153 /* if the connection is not template and is created
83154 * by sync, preserve the activity flag.
83155 */
83156 - cp->flags |= atomic_read(&dest->conn_flags) &
83157 + cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
83158 (~IP_VS_CONN_F_INACTIVE);
83159 else
83160 - cp->flags |= atomic_read(&dest->conn_flags);
83161 + cp->flags |= atomic_read_unchecked(&dest->conn_flags);
83162 cp->dest = dest;
83163
83164 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
83165 @@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
83166 atomic_set(&cp->refcnt, 1);
83167
83168 atomic_set(&cp->n_control, 0);
83169 - atomic_set(&cp->in_pkts, 0);
83170 + atomic_set_unchecked(&cp->in_pkts, 0);
83171
83172 atomic_inc(&ip_vs_conn_count);
83173 if (flags & IP_VS_CONN_F_NO_CPORT)
83174 @@ -871,7 +871,7 @@ static const struct file_operations ip_vs_conn_fops = {
83175 .open = ip_vs_conn_open,
83176 .read = seq_read,
83177 .llseek = seq_lseek,
83178 - .release = seq_release,
83179 + .release = seq_release_net,
83180 };
83181
83182 static const char *ip_vs_origin_name(unsigned flags)
83183 @@ -934,7 +934,7 @@ static const struct file_operations ip_vs_conn_sync_fops = {
83184 .open = ip_vs_conn_sync_open,
83185 .read = seq_read,
83186 .llseek = seq_lseek,
83187 - .release = seq_release,
83188 + .release = seq_release_net,
83189 };
83190
83191 #endif
83192 @@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
83193
83194 /* Don't drop the entry if its number of incoming packets is not
83195 located in [0, 8] */
83196 - i = atomic_read(&cp->in_pkts);
83197 + i = atomic_read_unchecked(&cp->in_pkts);
83198 if (i > 8 || i < 0) return 0;
83199
83200 if (!todrop_rate[i]) return 0;
83201 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
83202 index b95699f..5fee919 100644
83203 --- a/net/netfilter/ipvs/ip_vs_core.c
83204 +++ b/net/netfilter/ipvs/ip_vs_core.c
83205 @@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
83206 ret = cp->packet_xmit(skb, cp, pp);
83207 /* do not touch skb anymore */
83208
83209 - atomic_inc(&cp->in_pkts);
83210 + atomic_inc_unchecked(&cp->in_pkts);
83211 ip_vs_conn_put(cp);
83212 return ret;
83213 }
83214 @@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
83215 * Sync connection if it is about to close to
83216 * encorage the standby servers to update the connections timeout
83217 */
83218 - pkts = atomic_add_return(1, &cp->in_pkts);
83219 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
83220 if (af == AF_INET &&
83221 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
83222 (((cp->protocol != IPPROTO_TCP ||
83223 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
83224 index 02b2610..2d89424 100644
83225 --- a/net/netfilter/ipvs/ip_vs_ctl.c
83226 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
83227 @@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc,
83228 ip_vs_rs_hash(dest);
83229 write_unlock_bh(&__ip_vs_rs_lock);
83230 }
83231 - atomic_set(&dest->conn_flags, conn_flags);
83232 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
83233
83234 /* bind the service */
83235 if (!dest->svc) {
83236 @@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
83237 " %-7s %-6d %-10d %-10d\n",
83238 &dest->addr.in6,
83239 ntohs(dest->port),
83240 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
83241 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
83242 atomic_read(&dest->weight),
83243 atomic_read(&dest->activeconns),
83244 atomic_read(&dest->inactconns));
83245 @@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
83246 "%-7s %-6d %-10d %-10d\n",
83247 ntohl(dest->addr.ip),
83248 ntohs(dest->port),
83249 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
83250 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
83251 atomic_read(&dest->weight),
83252 atomic_read(&dest->activeconns),
83253 atomic_read(&dest->inactconns));
83254 @@ -1927,7 +1927,7 @@ static const struct file_operations ip_vs_info_fops = {
83255 .open = ip_vs_info_open,
83256 .read = seq_read,
83257 .llseek = seq_lseek,
83258 - .release = seq_release_private,
83259 + .release = seq_release_net,
83260 };
83261
83262 #endif
83263 @@ -1976,7 +1976,7 @@ static const struct file_operations ip_vs_stats_fops = {
83264 .open = ip_vs_stats_seq_open,
83265 .read = seq_read,
83266 .llseek = seq_lseek,
83267 - .release = single_release,
83268 + .release = single_release_net,
83269 };
83270
83271 #endif
83272 @@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
83273
83274 entry.addr = dest->addr.ip;
83275 entry.port = dest->port;
83276 - entry.conn_flags = atomic_read(&dest->conn_flags);
83277 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
83278 entry.weight = atomic_read(&dest->weight);
83279 entry.u_threshold = dest->u_threshold;
83280 entry.l_threshold = dest->l_threshold;
83281 @@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
83282 unsigned char arg[128];
83283 int ret = 0;
83284
83285 + pax_track_stack();
83286 +
83287 if (!capable(CAP_NET_ADMIN))
83288 return -EPERM;
83289
83290 @@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
83291 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
83292
83293 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
83294 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
83295 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
83296 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
83297 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
83298 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
83299 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
83300 index e177f0d..55e8581 100644
83301 --- a/net/netfilter/ipvs/ip_vs_sync.c
83302 +++ b/net/netfilter/ipvs/ip_vs_sync.c
83303 @@ -438,7 +438,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
83304
83305 if (opt)
83306 memcpy(&cp->in_seq, opt, sizeof(*opt));
83307 - atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
83308 + atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
83309 cp->state = state;
83310 cp->old_state = cp->state;
83311 /*
83312 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
83313 index 30b3189..e2e4b55 100644
83314 --- a/net/netfilter/ipvs/ip_vs_xmit.c
83315 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
83316 @@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
83317 else
83318 rc = NF_ACCEPT;
83319 /* do not touch skb anymore */
83320 - atomic_inc(&cp->in_pkts);
83321 + atomic_inc_unchecked(&cp->in_pkts);
83322 goto out;
83323 }
83324
83325 @@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
83326 else
83327 rc = NF_ACCEPT;
83328 /* do not touch skb anymore */
83329 - atomic_inc(&cp->in_pkts);
83330 + atomic_inc_unchecked(&cp->in_pkts);
83331 goto out;
83332 }
83333
83334 diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
83335 index d521718..d0fd7a1 100644
83336 --- a/net/netfilter/nf_conntrack_netlink.c
83337 +++ b/net/netfilter/nf_conntrack_netlink.c
83338 @@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
83339 static int
83340 ctnetlink_parse_tuple(const struct nlattr * const cda[],
83341 struct nf_conntrack_tuple *tuple,
83342 - enum ctattr_tuple type, u_int8_t l3num)
83343 + enum ctattr_type type, u_int8_t l3num)
83344 {
83345 struct nlattr *tb[CTA_TUPLE_MAX+1];
83346 int err;
83347 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
83348 index f900dc3..5e45346 100644
83349 --- a/net/netfilter/nfnetlink_log.c
83350 +++ b/net/netfilter/nfnetlink_log.c
83351 @@ -68,7 +68,7 @@ struct nfulnl_instance {
83352 };
83353
83354 static DEFINE_RWLOCK(instances_lock);
83355 -static atomic_t global_seq;
83356 +static atomic_unchecked_t global_seq;
83357
83358 #define INSTANCE_BUCKETS 16
83359 static struct hlist_head instance_table[INSTANCE_BUCKETS];
83360 @@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_instance *inst,
83361 /* global sequence number */
83362 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
83363 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
83364 - htonl(atomic_inc_return(&global_seq)));
83365 + htonl(atomic_inc_return_unchecked(&global_seq)));
83366
83367 if (data_len) {
83368 struct nlattr *nla;
83369 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
83370 new file mode 100644
83371 index 0000000..b1bac76
83372 --- /dev/null
83373 +++ b/net/netfilter/xt_gradm.c
83374 @@ -0,0 +1,51 @@
83375 +/*
83376 + * gradm match for netfilter
83377 + * Copyright © Zbigniew Krzystolik, 2010
83378 + *
83379 + * This program is free software; you can redistribute it and/or modify
83380 + * it under the terms of the GNU General Public License; either version
83381 + * 2 or 3 as published by the Free Software Foundation.
83382 + */
83383 +#include <linux/module.h>
83384 +#include <linux/moduleparam.h>
83385 +#include <linux/skbuff.h>
83386 +#include <linux/netfilter/x_tables.h>
83387 +#include <linux/grsecurity.h>
83388 +#include <linux/netfilter/xt_gradm.h>
83389 +
83390 +static bool
83391 +gradm_mt(const struct sk_buff *skb, const struct xt_match_param *par)
83392 +{
83393 + const struct xt_gradm_mtinfo *info = par->matchinfo;
83394 + bool retval = false;
83395 + if (gr_acl_is_enabled())
83396 + retval = true;
83397 + return retval ^ info->invflags;
83398 +}
83399 +
83400 +static struct xt_match gradm_mt_reg __read_mostly = {
83401 + .name = "gradm",
83402 + .revision = 0,
83403 + .family = NFPROTO_UNSPEC,
83404 + .match = gradm_mt,
83405 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
83406 + .me = THIS_MODULE,
83407 +};
83408 +
83409 +static int __init gradm_mt_init(void)
83410 +{
83411 + return xt_register_match(&gradm_mt_reg);
83412 +}
83413 +
83414 +static void __exit gradm_mt_exit(void)
83415 +{
83416 + xt_unregister_match(&gradm_mt_reg);
83417 +}
83418 +
83419 +module_init(gradm_mt_init);
83420 +module_exit(gradm_mt_exit);
83421 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
83422 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
83423 +MODULE_LICENSE("GPL");
83424 +MODULE_ALIAS("ipt_gradm");
83425 +MODULE_ALIAS("ip6t_gradm");
83426 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
83427 index 5a7dcdf..24a3578 100644
83428 --- a/net/netlink/af_netlink.c
83429 +++ b/net/netlink/af_netlink.c
83430 @@ -733,7 +733,7 @@ static void netlink_overrun(struct sock *sk)
83431 sk->sk_error_report(sk);
83432 }
83433 }
83434 - atomic_inc(&sk->sk_drops);
83435 + atomic_inc_unchecked(&sk->sk_drops);
83436 }
83437
83438 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
83439 @@ -1964,15 +1964,23 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
83440 struct netlink_sock *nlk = nlk_sk(s);
83441
83442 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d\n",
83443 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83444 + NULL,
83445 +#else
83446 s,
83447 +#endif
83448 s->sk_protocol,
83449 nlk->pid,
83450 nlk->groups ? (u32)nlk->groups[0] : 0,
83451 sk_rmem_alloc_get(s),
83452 sk_wmem_alloc_get(s),
83453 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83454 + NULL,
83455 +#else
83456 nlk->cb,
83457 +#endif
83458 atomic_read(&s->sk_refcnt),
83459 - atomic_read(&s->sk_drops)
83460 + atomic_read_unchecked(&s->sk_drops)
83461 );
83462
83463 }
83464 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
83465 index 7a83495..ab0062f 100644
83466 --- a/net/netrom/af_netrom.c
83467 +++ b/net/netrom/af_netrom.c
83468 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
83469 struct sock *sk = sock->sk;
83470 struct nr_sock *nr = nr_sk(sk);
83471
83472 + memset(sax, 0, sizeof(*sax));
83473 lock_sock(sk);
83474 if (peer != 0) {
83475 if (sk->sk_state != TCP_ESTABLISHED) {
83476 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
83477 *uaddr_len = sizeof(struct full_sockaddr_ax25);
83478 } else {
83479 sax->fsa_ax25.sax25_family = AF_NETROM;
83480 - sax->fsa_ax25.sax25_ndigis = 0;
83481 sax->fsa_ax25.sax25_call = nr->source_addr;
83482 *uaddr_len = sizeof(struct sockaddr_ax25);
83483 }
83484 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
83485 index 35cfa79..4e78ff7 100644
83486 --- a/net/packet/af_packet.c
83487 +++ b/net/packet/af_packet.c
83488 @@ -2429,7 +2429,11 @@ static int packet_seq_show(struct seq_file *seq, void *v)
83489
83490 seq_printf(seq,
83491 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
83492 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83493 + NULL,
83494 +#else
83495 s,
83496 +#endif
83497 atomic_read(&s->sk_refcnt),
83498 s->sk_type,
83499 ntohs(po->num),
83500 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
83501 index 519ff9d..a422a90 100644
83502 --- a/net/phonet/af_phonet.c
83503 +++ b/net/phonet/af_phonet.c
83504 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(int protocol)
83505 {
83506 struct phonet_protocol *pp;
83507
83508 - if (protocol >= PHONET_NPROTO)
83509 + if (protocol < 0 || protocol >= PHONET_NPROTO)
83510 return NULL;
83511
83512 spin_lock(&proto_tab_lock);
83513 @@ -402,7 +402,7 @@ int __init_or_module phonet_proto_register(int protocol,
83514 {
83515 int err = 0;
83516
83517 - if (protocol >= PHONET_NPROTO)
83518 + if (protocol < 0 || protocol >= PHONET_NPROTO)
83519 return -EINVAL;
83520
83521 err = proto_register(pp->prot, 1);
83522 diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
83523 index ef5c75c..2b6c2fa 100644
83524 --- a/net/phonet/datagram.c
83525 +++ b/net/phonet/datagram.c
83526 @@ -162,7 +162,7 @@ static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb)
83527 if (err < 0) {
83528 kfree_skb(skb);
83529 if (err == -ENOMEM)
83530 - atomic_inc(&sk->sk_drops);
83531 + atomic_inc_unchecked(&sk->sk_drops);
83532 }
83533 return err ? NET_RX_DROP : NET_RX_SUCCESS;
83534 }
83535 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
83536 index 9cdd35e..16cd850 100644
83537 --- a/net/phonet/pep.c
83538 +++ b/net/phonet/pep.c
83539 @@ -348,7 +348,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
83540
83541 case PNS_PEP_CTRL_REQ:
83542 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
83543 - atomic_inc(&sk->sk_drops);
83544 + atomic_inc_unchecked(&sk->sk_drops);
83545 break;
83546 }
83547 __skb_pull(skb, 4);
83548 @@ -362,12 +362,12 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
83549 if (!err)
83550 return 0;
83551 if (err == -ENOMEM)
83552 - atomic_inc(&sk->sk_drops);
83553 + atomic_inc_unchecked(&sk->sk_drops);
83554 break;
83555 }
83556
83557 if (pn->rx_credits == 0) {
83558 - atomic_inc(&sk->sk_drops);
83559 + atomic_inc_unchecked(&sk->sk_drops);
83560 err = -ENOBUFS;
83561 break;
83562 }
83563 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
83564 index aa5b5a9..c09b4f8 100644
83565 --- a/net/phonet/socket.c
83566 +++ b/net/phonet/socket.c
83567 @@ -482,8 +482,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
83568 sk->sk_state,
83569 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
83570 sock_i_uid(sk), sock_i_ino(sk),
83571 - atomic_read(&sk->sk_refcnt), sk,
83572 - atomic_read(&sk->sk_drops), &len);
83573 + atomic_read(&sk->sk_refcnt),
83574 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83575 + NULL,
83576 +#else
83577 + sk,
83578 +#endif
83579 + atomic_read_unchecked(&sk->sk_drops), &len);
83580 }
83581 seq_printf(seq, "%*s\n", 127 - len, "");
83582 return 0;
83583 diff --git a/net/rds/Kconfig b/net/rds/Kconfig
83584 index ec753b3..821187c 100644
83585 --- a/net/rds/Kconfig
83586 +++ b/net/rds/Kconfig
83587 @@ -1,7 +1,7 @@
83588
83589 config RDS
83590 tristate "The RDS Protocol (EXPERIMENTAL)"
83591 - depends on INET && EXPERIMENTAL
83592 + depends on INET && EXPERIMENTAL && BROKEN
83593 ---help---
83594 The RDS (Reliable Datagram Sockets) protocol provides reliable,
83595 sequenced delivery of datagrams over Infiniband, iWARP,
83596 diff --git a/net/rds/cong.c b/net/rds/cong.c
83597 index dd2711d..1c7ed12 100644
83598 --- a/net/rds/cong.c
83599 +++ b/net/rds/cong.c
83600 @@ -77,7 +77,7 @@
83601 * finds that the saved generation number is smaller than the global generation
83602 * number, it wakes up the process.
83603 */
83604 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
83605 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
83606
83607 /*
83608 * Congestion monitoring
83609 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
83610 rdsdebug("waking map %p for %pI4\n",
83611 map, &map->m_addr);
83612 rds_stats_inc(s_cong_update_received);
83613 - atomic_inc(&rds_cong_generation);
83614 + atomic_inc_unchecked(&rds_cong_generation);
83615 if (waitqueue_active(&map->m_waitq))
83616 wake_up(&map->m_waitq);
83617 if (waitqueue_active(&rds_poll_waitq))
83618 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
83619
83620 int rds_cong_updated_since(unsigned long *recent)
83621 {
83622 - unsigned long gen = atomic_read(&rds_cong_generation);
83623 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
83624
83625 if (likely(*recent == gen))
83626 return 0;
83627 diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
83628 index de4a1b1..94ec861 100644
83629 --- a/net/rds/iw_rdma.c
83630 +++ b/net/rds/iw_rdma.c
83631 @@ -181,6 +181,8 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_i
83632 struct rdma_cm_id *pcm_id;
83633 int rc;
83634
83635 + pax_track_stack();
83636 +
83637 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
83638 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
83639
83640 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
83641 index b5198ae..8b9fb90 100644
83642 --- a/net/rds/tcp.c
83643 +++ b/net/rds/tcp.c
83644 @@ -57,7 +57,7 @@ void rds_tcp_nonagle(struct socket *sock)
83645 int val = 1;
83646
83647 set_fs(KERNEL_DS);
83648 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
83649 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
83650 sizeof(val));
83651 set_fs(oldfs);
83652 }
83653 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
83654 index ab545e0..4079b3b 100644
83655 --- a/net/rds/tcp_send.c
83656 +++ b/net/rds/tcp_send.c
83657 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
83658
83659 oldfs = get_fs();
83660 set_fs(KERNEL_DS);
83661 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
83662 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
83663 sizeof(val));
83664 set_fs(oldfs);
83665 }
83666 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
83667 index a86afce..8657bce 100644
83668 --- a/net/rxrpc/af_rxrpc.c
83669 +++ b/net/rxrpc/af_rxrpc.c
83670 @@ -38,7 +38,7 @@ static const struct proto_ops rxrpc_rpc_ops;
83671 __be32 rxrpc_epoch;
83672
83673 /* current debugging ID */
83674 -atomic_t rxrpc_debug_id;
83675 +atomic_unchecked_t rxrpc_debug_id;
83676
83677 /* count of skbs currently in use */
83678 atomic_t rxrpc_n_skbs;
83679 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
83680 index b4a2209..539106c 100644
83681 --- a/net/rxrpc/ar-ack.c
83682 +++ b/net/rxrpc/ar-ack.c
83683 @@ -174,7 +174,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
83684
83685 _enter("{%d,%d,%d,%d},",
83686 call->acks_hard, call->acks_unacked,
83687 - atomic_read(&call->sequence),
83688 + atomic_read_unchecked(&call->sequence),
83689 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
83690
83691 stop = 0;
83692 @@ -198,7 +198,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
83693
83694 /* each Tx packet has a new serial number */
83695 sp->hdr.serial =
83696 - htonl(atomic_inc_return(&call->conn->serial));
83697 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
83698
83699 hdr = (struct rxrpc_header *) txb->head;
83700 hdr->serial = sp->hdr.serial;
83701 @@ -401,7 +401,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
83702 */
83703 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
83704 {
83705 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
83706 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
83707 }
83708
83709 /*
83710 @@ -627,7 +627,7 @@ process_further:
83711
83712 latest = ntohl(sp->hdr.serial);
83713 hard = ntohl(ack.firstPacket);
83714 - tx = atomic_read(&call->sequence);
83715 + tx = atomic_read_unchecked(&call->sequence);
83716
83717 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
83718 latest,
83719 @@ -840,6 +840,8 @@ void rxrpc_process_call(struct work_struct *work)
83720 u32 abort_code = RX_PROTOCOL_ERROR;
83721 u8 *acks = NULL;
83722
83723 + pax_track_stack();
83724 +
83725 //printk("\n--------------------\n");
83726 _enter("{%d,%s,%lx} [%lu]",
83727 call->debug_id, rxrpc_call_states[call->state], call->events,
83728 @@ -1159,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
83729 goto maybe_reschedule;
83730
83731 send_ACK_with_skew:
83732 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
83733 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
83734 ntohl(ack.serial));
83735 send_ACK:
83736 mtu = call->conn->trans->peer->if_mtu;
83737 @@ -1171,7 +1173,7 @@ send_ACK:
83738 ackinfo.rxMTU = htonl(5692);
83739 ackinfo.jumbo_max = htonl(4);
83740
83741 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
83742 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
83743 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
83744 ntohl(hdr.serial),
83745 ntohs(ack.maxSkew),
83746 @@ -1189,7 +1191,7 @@ send_ACK:
83747 send_message:
83748 _debug("send message");
83749
83750 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
83751 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
83752 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
83753 send_message_2:
83754
83755 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
83756 index bc0019f..e1b4b24 100644
83757 --- a/net/rxrpc/ar-call.c
83758 +++ b/net/rxrpc/ar-call.c
83759 @@ -82,7 +82,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
83760 spin_lock_init(&call->lock);
83761 rwlock_init(&call->state_lock);
83762 atomic_set(&call->usage, 1);
83763 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
83764 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
83765 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
83766
83767 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
83768 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
83769 index 9f1ce84..ff8d061 100644
83770 --- a/net/rxrpc/ar-connection.c
83771 +++ b/net/rxrpc/ar-connection.c
83772 @@ -205,7 +205,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
83773 rwlock_init(&conn->lock);
83774 spin_lock_init(&conn->state_lock);
83775 atomic_set(&conn->usage, 1);
83776 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
83777 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
83778 conn->avail_calls = RXRPC_MAXCALLS;
83779 conn->size_align = 4;
83780 conn->header_size = sizeof(struct rxrpc_header);
83781 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
83782 index 0505cdc..f0748ce 100644
83783 --- a/net/rxrpc/ar-connevent.c
83784 +++ b/net/rxrpc/ar-connevent.c
83785 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
83786
83787 len = iov[0].iov_len + iov[1].iov_len;
83788
83789 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
83790 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
83791 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
83792
83793 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
83794 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
83795 index f98c802..9e8488e 100644
83796 --- a/net/rxrpc/ar-input.c
83797 +++ b/net/rxrpc/ar-input.c
83798 @@ -339,9 +339,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
83799 /* track the latest serial number on this connection for ACK packet
83800 * information */
83801 serial = ntohl(sp->hdr.serial);
83802 - hi_serial = atomic_read(&call->conn->hi_serial);
83803 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
83804 while (serial > hi_serial)
83805 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
83806 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
83807 serial);
83808
83809 /* request ACK generation for any ACK or DATA packet that requests
83810 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
83811 index 7043b29..06edcdf 100644
83812 --- a/net/rxrpc/ar-internal.h
83813 +++ b/net/rxrpc/ar-internal.h
83814 @@ -272,8 +272,8 @@ struct rxrpc_connection {
83815 int error; /* error code for local abort */
83816 int debug_id; /* debug ID for printks */
83817 unsigned call_counter; /* call ID counter */
83818 - atomic_t serial; /* packet serial number counter */
83819 - atomic_t hi_serial; /* highest serial number received */
83820 + atomic_unchecked_t serial; /* packet serial number counter */
83821 + atomic_unchecked_t hi_serial; /* highest serial number received */
83822 u8 avail_calls; /* number of calls available */
83823 u8 size_align; /* data size alignment (for security) */
83824 u8 header_size; /* rxrpc + security header size */
83825 @@ -346,7 +346,7 @@ struct rxrpc_call {
83826 spinlock_t lock;
83827 rwlock_t state_lock; /* lock for state transition */
83828 atomic_t usage;
83829 - atomic_t sequence; /* Tx data packet sequence counter */
83830 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
83831 u32 abort_code; /* local/remote abort code */
83832 enum { /* current state of call */
83833 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
83834 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
83835 */
83836 extern atomic_t rxrpc_n_skbs;
83837 extern __be32 rxrpc_epoch;
83838 -extern atomic_t rxrpc_debug_id;
83839 +extern atomic_unchecked_t rxrpc_debug_id;
83840 extern struct workqueue_struct *rxrpc_workqueue;
83841
83842 /*
83843 diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
83844 index 74697b2..10f9b77 100644
83845 --- a/net/rxrpc/ar-key.c
83846 +++ b/net/rxrpc/ar-key.c
83847 @@ -88,11 +88,11 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
83848 return ret;
83849
83850 plen -= sizeof(*token);
83851 - token = kmalloc(sizeof(*token), GFP_KERNEL);
83852 + token = kzalloc(sizeof(*token), GFP_KERNEL);
83853 if (!token)
83854 return -ENOMEM;
83855
83856 - token->kad = kmalloc(plen, GFP_KERNEL);
83857 + token->kad = kzalloc(plen, GFP_KERNEL);
83858 if (!token->kad) {
83859 kfree(token);
83860 return -ENOMEM;
83861 @@ -730,10 +730,10 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen)
83862 goto error;
83863
83864 ret = -ENOMEM;
83865 - token = kmalloc(sizeof(*token), GFP_KERNEL);
83866 + token = kzalloc(sizeof(*token), GFP_KERNEL);
83867 if (!token)
83868 goto error;
83869 - token->kad = kmalloc(plen, GFP_KERNEL);
83870 + token->kad = kzalloc(plen, GFP_KERNEL);
83871 if (!token->kad)
83872 goto error_free;
83873
83874 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
83875 index 807535f..5b7f19e 100644
83876 --- a/net/rxrpc/ar-local.c
83877 +++ b/net/rxrpc/ar-local.c
83878 @@ -44,7 +44,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
83879 spin_lock_init(&local->lock);
83880 rwlock_init(&local->services_lock);
83881 atomic_set(&local->usage, 1);
83882 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
83883 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
83884 memcpy(&local->srx, srx, sizeof(*srx));
83885 }
83886
83887 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
83888 index cc9102c..7d3888e 100644
83889 --- a/net/rxrpc/ar-output.c
83890 +++ b/net/rxrpc/ar-output.c
83891 @@ -680,9 +680,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
83892 sp->hdr.cid = call->cid;
83893 sp->hdr.callNumber = call->call_id;
83894 sp->hdr.seq =
83895 - htonl(atomic_inc_return(&call->sequence));
83896 + htonl(atomic_inc_return_unchecked(&call->sequence));
83897 sp->hdr.serial =
83898 - htonl(atomic_inc_return(&conn->serial));
83899 + htonl(atomic_inc_return_unchecked(&conn->serial));
83900 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
83901 sp->hdr.userStatus = 0;
83902 sp->hdr.securityIndex = conn->security_ix;
83903 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
83904 index edc026c..4bd4e2d 100644
83905 --- a/net/rxrpc/ar-peer.c
83906 +++ b/net/rxrpc/ar-peer.c
83907 @@ -86,7 +86,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
83908 INIT_LIST_HEAD(&peer->error_targets);
83909 spin_lock_init(&peer->lock);
83910 atomic_set(&peer->usage, 1);
83911 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
83912 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
83913 memcpy(&peer->srx, srx, sizeof(*srx));
83914
83915 rxrpc_assess_MTU_size(peer);
83916 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
83917 index 38047f7..9f48511 100644
83918 --- a/net/rxrpc/ar-proc.c
83919 +++ b/net/rxrpc/ar-proc.c
83920 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
83921 atomic_read(&conn->usage),
83922 rxrpc_conn_states[conn->state],
83923 key_serial(conn->key),
83924 - atomic_read(&conn->serial),
83925 - atomic_read(&conn->hi_serial));
83926 + atomic_read_unchecked(&conn->serial),
83927 + atomic_read_unchecked(&conn->hi_serial));
83928
83929 return 0;
83930 }
83931 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
83932 index 0936e1a..437c640 100644
83933 --- a/net/rxrpc/ar-transport.c
83934 +++ b/net/rxrpc/ar-transport.c
83935 @@ -46,7 +46,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
83936 spin_lock_init(&trans->client_lock);
83937 rwlock_init(&trans->conn_lock);
83938 atomic_set(&trans->usage, 1);
83939 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
83940 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
83941
83942 if (peer->srx.transport.family == AF_INET) {
83943 switch (peer->srx.transport_type) {
83944 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
83945 index 713ac59..306f6ae 100644
83946 --- a/net/rxrpc/rxkad.c
83947 +++ b/net/rxrpc/rxkad.c
83948 @@ -210,6 +210,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
83949 u16 check;
83950 int nsg;
83951
83952 + pax_track_stack();
83953 +
83954 sp = rxrpc_skb(skb);
83955
83956 _enter("");
83957 @@ -337,6 +339,8 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
83958 u16 check;
83959 int nsg;
83960
83961 + pax_track_stack();
83962 +
83963 _enter("");
83964
83965 sp = rxrpc_skb(skb);
83966 @@ -609,7 +613,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
83967
83968 len = iov[0].iov_len + iov[1].iov_len;
83969
83970 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
83971 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
83972 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
83973
83974 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
83975 @@ -659,7 +663,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
83976
83977 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
83978
83979 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
83980 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
83981 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
83982
83983 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
83984 diff --git a/net/sctp/auth.c b/net/sctp/auth.c
83985 index 914c419..7a16d2c 100644
83986 --- a/net/sctp/auth.c
83987 +++ b/net/sctp/auth.c
83988 @@ -81,7 +81,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
83989 struct sctp_auth_bytes *key;
83990
83991 /* Verify that we are not going to overflow INT_MAX */
83992 - if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes))
83993 + if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes)))
83994 return NULL;
83995
83996 /* Allocate the shared key */
83997 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
83998 index d093cbf..9fc36fc 100644
83999 --- a/net/sctp/proc.c
84000 +++ b/net/sctp/proc.c
84001 @@ -213,7 +213,12 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
84002 sctp_for_each_hentry(epb, node, &head->chain) {
84003 ep = sctp_ep(epb);
84004 sk = epb->sk;
84005 - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
84006 + seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
84007 +#ifdef CONFIG_GRKERNSEC_HIDESYM
84008 + NULL, NULL,
84009 +#else
84010 + ep, sk,
84011 +#endif
84012 sctp_sk(sk)->type, sk->sk_state, hash,
84013 epb->bind_addr.port,
84014 sock_i_uid(sk), sock_i_ino(sk));
84015 @@ -320,7 +325,12 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
84016 seq_printf(seq,
84017 "%8p %8p %-3d %-3d %-2d %-4d "
84018 "%4d %8d %8d %7d %5lu %-5d %5d ",
84019 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
84020 +#ifdef CONFIG_GRKERNSEC_HIDESYM
84021 + NULL, NULL,
84022 +#else
84023 + assoc, sk,
84024 +#endif
84025 + sctp_sk(sk)->type, sk->sk_state,
84026 assoc->state, hash,
84027 assoc->assoc_id,
84028 assoc->sndbuf_used,
84029 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
84030 index 3a95fcb..c40fc1d 100644
84031 --- a/net/sctp/socket.c
84032 +++ b/net/sctp/socket.c
84033 @@ -5802,7 +5802,6 @@ pp_found:
84034 */
84035 int reuse = sk->sk_reuse;
84036 struct sock *sk2;
84037 - struct hlist_node *node;
84038
84039 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
84040 if (pp->fastreuse && sk->sk_reuse &&
84041 diff --git a/net/socket.c b/net/socket.c
84042 index d449812..4ac08d3c 100644
84043 --- a/net/socket.c
84044 +++ b/net/socket.c
84045 @@ -87,6 +87,7 @@
84046 #include <linux/wireless.h>
84047 #include <linux/nsproxy.h>
84048 #include <linux/magic.h>
84049 +#include <linux/in.h>
84050
84051 #include <asm/uaccess.h>
84052 #include <asm/unistd.h>
84053 @@ -97,6 +98,21 @@
84054 #include <net/sock.h>
84055 #include <linux/netfilter.h>
84056
84057 +extern void gr_attach_curr_ip(const struct sock *sk);
84058 +extern int gr_handle_sock_all(const int family, const int type,
84059 + const int protocol);
84060 +extern int gr_handle_sock_server(const struct sockaddr *sck);
84061 +extern int gr_handle_sock_server_other(const struct sock *sck);
84062 +extern int gr_handle_sock_client(const struct sockaddr *sck);
84063 +extern int gr_search_connect(struct socket * sock,
84064 + struct sockaddr_in * addr);
84065 +extern int gr_search_bind(struct socket * sock,
84066 + struct sockaddr_in * addr);
84067 +extern int gr_search_listen(struct socket * sock);
84068 +extern int gr_search_accept(struct socket * sock);
84069 +extern int gr_search_socket(const int domain, const int type,
84070 + const int protocol);
84071 +
84072 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
84073 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
84074 unsigned long nr_segs, loff_t pos);
84075 @@ -298,7 +314,7 @@ static int sockfs_get_sb(struct file_system_type *fs_type,
84076 mnt);
84077 }
84078
84079 -static struct vfsmount *sock_mnt __read_mostly;
84080 +struct vfsmount *sock_mnt __read_mostly;
84081
84082 static struct file_system_type sock_fs_type = {
84083 .name = "sockfs",
84084 @@ -1154,6 +1170,8 @@ static int __sock_create(struct net *net, int family, int type, int protocol,
84085 return -EAFNOSUPPORT;
84086 if (type < 0 || type >= SOCK_MAX)
84087 return -EINVAL;
84088 + if (protocol < 0)
84089 + return -EINVAL;
84090
84091 /* Compatibility.
84092
84093 @@ -1283,6 +1301,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
84094 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
84095 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
84096
84097 + if(!gr_search_socket(family, type, protocol)) {
84098 + retval = -EACCES;
84099 + goto out;
84100 + }
84101 +
84102 + if (gr_handle_sock_all(family, type, protocol)) {
84103 + retval = -EACCES;
84104 + goto out;
84105 + }
84106 +
84107 retval = sock_create(family, type, protocol, &sock);
84108 if (retval < 0)
84109 goto out;
84110 @@ -1415,6 +1443,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
84111 if (sock) {
84112 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
84113 if (err >= 0) {
84114 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
84115 + err = -EACCES;
84116 + goto error;
84117 + }
84118 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
84119 + if (err)
84120 + goto error;
84121 +
84122 err = security_socket_bind(sock,
84123 (struct sockaddr *)&address,
84124 addrlen);
84125 @@ -1423,6 +1459,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
84126 (struct sockaddr *)
84127 &address, addrlen);
84128 }
84129 +error:
84130 fput_light(sock->file, fput_needed);
84131 }
84132 return err;
84133 @@ -1446,10 +1483,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
84134 if ((unsigned)backlog > somaxconn)
84135 backlog = somaxconn;
84136
84137 + if (gr_handle_sock_server_other(sock->sk)) {
84138 + err = -EPERM;
84139 + goto error;
84140 + }
84141 +
84142 + err = gr_search_listen(sock);
84143 + if (err)
84144 + goto error;
84145 +
84146 err = security_socket_listen(sock, backlog);
84147 if (!err)
84148 err = sock->ops->listen(sock, backlog);
84149
84150 +error:
84151 fput_light(sock->file, fput_needed);
84152 }
84153 return err;
84154 @@ -1492,6 +1539,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
84155 newsock->type = sock->type;
84156 newsock->ops = sock->ops;
84157
84158 + if (gr_handle_sock_server_other(sock->sk)) {
84159 + err = -EPERM;
84160 + sock_release(newsock);
84161 + goto out_put;
84162 + }
84163 +
84164 + err = gr_search_accept(sock);
84165 + if (err) {
84166 + sock_release(newsock);
84167 + goto out_put;
84168 + }
84169 +
84170 /*
84171 * We don't need try_module_get here, as the listening socket (sock)
84172 * has the protocol module (sock->ops->owner) held.
84173 @@ -1534,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
84174 fd_install(newfd, newfile);
84175 err = newfd;
84176
84177 + gr_attach_curr_ip(newsock->sk);
84178 +
84179 out_put:
84180 fput_light(sock->file, fput_needed);
84181 out:
84182 @@ -1571,6 +1632,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
84183 int, addrlen)
84184 {
84185 struct socket *sock;
84186 + struct sockaddr *sck;
84187 struct sockaddr_storage address;
84188 int err, fput_needed;
84189
84190 @@ -1581,6 +1643,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
84191 if (err < 0)
84192 goto out_put;
84193
84194 + sck = (struct sockaddr *)&address;
84195 +
84196 + if (gr_handle_sock_client(sck)) {
84197 + err = -EACCES;
84198 + goto out_put;
84199 + }
84200 +
84201 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
84202 + if (err)
84203 + goto out_put;
84204 +
84205 err =
84206 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
84207 if (err)
84208 @@ -1882,6 +1955,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
84209 int err, ctl_len, iov_size, total_len;
84210 int fput_needed;
84211
84212 + pax_track_stack();
84213 +
84214 err = -EFAULT;
84215 if (MSG_CMSG_COMPAT & flags) {
84216 if (get_compat_msghdr(&msg_sys, msg_compat))
84217 @@ -2022,7 +2097,7 @@ SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
84218 * kernel msghdr to use the kernel address space)
84219 */
84220
84221 - uaddr = (__force void __user *)msg_sys.msg_name;
84222 + uaddr = (void __force_user *)msg_sys.msg_name;
84223 uaddr_len = COMPAT_NAMELEN(msg);
84224 if (MSG_CMSG_COMPAT & flags) {
84225 err = verify_compat_iovec(&msg_sys, iov,
84226 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
84227 index ac94477..8afe5c3 100644
84228 --- a/net/sunrpc/sched.c
84229 +++ b/net/sunrpc/sched.c
84230 @@ -234,10 +234,10 @@ static int rpc_wait_bit_killable(void *word)
84231 #ifdef RPC_DEBUG
84232 static void rpc_task_set_debuginfo(struct rpc_task *task)
84233 {
84234 - static atomic_t rpc_pid;
84235 + static atomic_unchecked_t rpc_pid;
84236
84237 task->tk_magic = RPC_TASK_MAGIC_ID;
84238 - task->tk_pid = atomic_inc_return(&rpc_pid);
84239 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
84240 }
84241 #else
84242 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
84243 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
84244 index 35fb68b..236a8bf 100644
84245 --- a/net/sunrpc/xprtrdma/svc_rdma.c
84246 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
84247 @@ -59,15 +59,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
84248 static unsigned int min_max_inline = 4096;
84249 static unsigned int max_max_inline = 65536;
84250
84251 -atomic_t rdma_stat_recv;
84252 -atomic_t rdma_stat_read;
84253 -atomic_t rdma_stat_write;
84254 -atomic_t rdma_stat_sq_starve;
84255 -atomic_t rdma_stat_rq_starve;
84256 -atomic_t rdma_stat_rq_poll;
84257 -atomic_t rdma_stat_rq_prod;
84258 -atomic_t rdma_stat_sq_poll;
84259 -atomic_t rdma_stat_sq_prod;
84260 +atomic_unchecked_t rdma_stat_recv;
84261 +atomic_unchecked_t rdma_stat_read;
84262 +atomic_unchecked_t rdma_stat_write;
84263 +atomic_unchecked_t rdma_stat_sq_starve;
84264 +atomic_unchecked_t rdma_stat_rq_starve;
84265 +atomic_unchecked_t rdma_stat_rq_poll;
84266 +atomic_unchecked_t rdma_stat_rq_prod;
84267 +atomic_unchecked_t rdma_stat_sq_poll;
84268 +atomic_unchecked_t rdma_stat_sq_prod;
84269
84270 /* Temporary NFS request map and context caches */
84271 struct kmem_cache *svc_rdma_map_cachep;
84272 @@ -105,7 +105,7 @@ static int read_reset_stat(ctl_table *table, int write,
84273 len -= *ppos;
84274 if (len > *lenp)
84275 len = *lenp;
84276 - if (len && copy_to_user(buffer, str_buf, len))
84277 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
84278 return -EFAULT;
84279 *lenp = len;
84280 *ppos += len;
84281 @@ -149,63 +149,63 @@ static ctl_table svcrdma_parm_table[] = {
84282 {
84283 .procname = "rdma_stat_read",
84284 .data = &rdma_stat_read,
84285 - .maxlen = sizeof(atomic_t),
84286 + .maxlen = sizeof(atomic_unchecked_t),
84287 .mode = 0644,
84288 .proc_handler = &read_reset_stat,
84289 },
84290 {
84291 .procname = "rdma_stat_recv",
84292 .data = &rdma_stat_recv,
84293 - .maxlen = sizeof(atomic_t),
84294 + .maxlen = sizeof(atomic_unchecked_t),
84295 .mode = 0644,
84296 .proc_handler = &read_reset_stat,
84297 },
84298 {
84299 .procname = "rdma_stat_write",
84300 .data = &rdma_stat_write,
84301 - .maxlen = sizeof(atomic_t),
84302 + .maxlen = sizeof(atomic_unchecked_t),
84303 .mode = 0644,
84304 .proc_handler = &read_reset_stat,
84305 },
84306 {
84307 .procname = "rdma_stat_sq_starve",
84308 .data = &rdma_stat_sq_starve,
84309 - .maxlen = sizeof(atomic_t),
84310 + .maxlen = sizeof(atomic_unchecked_t),
84311 .mode = 0644,
84312 .proc_handler = &read_reset_stat,
84313 },
84314 {
84315 .procname = "rdma_stat_rq_starve",
84316 .data = &rdma_stat_rq_starve,
84317 - .maxlen = sizeof(atomic_t),
84318 + .maxlen = sizeof(atomic_unchecked_t),
84319 .mode = 0644,
84320 .proc_handler = &read_reset_stat,
84321 },
84322 {
84323 .procname = "rdma_stat_rq_poll",
84324 .data = &rdma_stat_rq_poll,
84325 - .maxlen = sizeof(atomic_t),
84326 + .maxlen = sizeof(atomic_unchecked_t),
84327 .mode = 0644,
84328 .proc_handler = &read_reset_stat,
84329 },
84330 {
84331 .procname = "rdma_stat_rq_prod",
84332 .data = &rdma_stat_rq_prod,
84333 - .maxlen = sizeof(atomic_t),
84334 + .maxlen = sizeof(atomic_unchecked_t),
84335 .mode = 0644,
84336 .proc_handler = &read_reset_stat,
84337 },
84338 {
84339 .procname = "rdma_stat_sq_poll",
84340 .data = &rdma_stat_sq_poll,
84341 - .maxlen = sizeof(atomic_t),
84342 + .maxlen = sizeof(atomic_unchecked_t),
84343 .mode = 0644,
84344 .proc_handler = &read_reset_stat,
84345 },
84346 {
84347 .procname = "rdma_stat_sq_prod",
84348 .data = &rdma_stat_sq_prod,
84349 - .maxlen = sizeof(atomic_t),
84350 + .maxlen = sizeof(atomic_unchecked_t),
84351 .mode = 0644,
84352 .proc_handler = &read_reset_stat,
84353 },
84354 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
84355 index 9e88438..8ed5cf0 100644
84356 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
84357 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
84358 @@ -495,7 +495,7 @@ next_sge:
84359 svc_rdma_put_context(ctxt, 0);
84360 goto out;
84361 }
84362 - atomic_inc(&rdma_stat_read);
84363 + atomic_inc_unchecked(&rdma_stat_read);
84364
84365 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
84366 chl_map->ch[ch_no].count -= read_wr.num_sge;
84367 @@ -606,7 +606,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
84368 dto_q);
84369 list_del_init(&ctxt->dto_q);
84370 } else {
84371 - atomic_inc(&rdma_stat_rq_starve);
84372 + atomic_inc_unchecked(&rdma_stat_rq_starve);
84373 clear_bit(XPT_DATA, &xprt->xpt_flags);
84374 ctxt = NULL;
84375 }
84376 @@ -626,7 +626,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
84377 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
84378 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
84379 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
84380 - atomic_inc(&rdma_stat_recv);
84381 + atomic_inc_unchecked(&rdma_stat_recv);
84382
84383 /* Build up the XDR from the receive buffers. */
84384 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
84385 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
84386 index f11be72..7aad4e8 100644
84387 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
84388 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
84389 @@ -328,7 +328,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
84390 write_wr.wr.rdma.remote_addr = to;
84391
84392 /* Post It */
84393 - atomic_inc(&rdma_stat_write);
84394 + atomic_inc_unchecked(&rdma_stat_write);
84395 if (svc_rdma_send(xprt, &write_wr))
84396 goto err;
84397 return 0;
84398 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
84399 index 3fa5751..030ba89 100644
84400 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
84401 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
84402 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
84403 return;
84404
84405 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
84406 - atomic_inc(&rdma_stat_rq_poll);
84407 + atomic_inc_unchecked(&rdma_stat_rq_poll);
84408
84409 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
84410 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
84411 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
84412 }
84413
84414 if (ctxt)
84415 - atomic_inc(&rdma_stat_rq_prod);
84416 + atomic_inc_unchecked(&rdma_stat_rq_prod);
84417
84418 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
84419 /*
84420 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
84421 return;
84422
84423 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
84424 - atomic_inc(&rdma_stat_sq_poll);
84425 + atomic_inc_unchecked(&rdma_stat_sq_poll);
84426 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
84427 if (wc.status != IB_WC_SUCCESS)
84428 /* Close the transport */
84429 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
84430 }
84431
84432 if (ctxt)
84433 - atomic_inc(&rdma_stat_sq_prod);
84434 + atomic_inc_unchecked(&rdma_stat_sq_prod);
84435 }
84436
84437 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
84438 @@ -1260,7 +1260,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
84439 spin_lock_bh(&xprt->sc_lock);
84440 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
84441 spin_unlock_bh(&xprt->sc_lock);
84442 - atomic_inc(&rdma_stat_sq_starve);
84443 + atomic_inc_unchecked(&rdma_stat_sq_starve);
84444
84445 /* See if we can opportunistically reap SQ WR to make room */
84446 sq_cq_reap(xprt);
84447 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
84448 index 0b15d72..7934fbb 100644
84449 --- a/net/sysctl_net.c
84450 +++ b/net/sysctl_net.c
84451 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
84452 struct ctl_table *table)
84453 {
84454 /* Allow network administrator to have same access as root. */
84455 - if (capable(CAP_NET_ADMIN)) {
84456 + if (capable_nolog(CAP_NET_ADMIN)) {
84457 int mode = (table->mode >> 6) & 7;
84458 return (mode << 6) | (mode << 3) | mode;
84459 }
84460 diff --git a/net/tipc/link.c b/net/tipc/link.c
84461 index dd4c18b..f40d38d 100644
84462 --- a/net/tipc/link.c
84463 +++ b/net/tipc/link.c
84464 @@ -1418,7 +1418,7 @@ again:
84465
84466 if (!sect_rest) {
84467 sect_rest = msg_sect[++curr_sect].iov_len;
84468 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
84469 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
84470 }
84471
84472 if (sect_rest < fragm_rest)
84473 @@ -1437,7 +1437,7 @@ error:
84474 }
84475 } else
84476 skb_copy_to_linear_data_offset(buf, fragm_crs,
84477 - sect_crs, sz);
84478 + (const void __force_kernel *)sect_crs, sz);
84479 sect_crs += sz;
84480 sect_rest -= sz;
84481 fragm_crs += sz;
84482 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
84483 index 0747d8a..e8bf3f3 100644
84484 --- a/net/tipc/subscr.c
84485 +++ b/net/tipc/subscr.c
84486 @@ -104,7 +104,7 @@ static void subscr_send_event(struct subscription *sub,
84487 {
84488 struct iovec msg_sect;
84489
84490 - msg_sect.iov_base = (void *)&sub->evt;
84491 + msg_sect.iov_base = (void __force_user *)&sub->evt;
84492 msg_sect.iov_len = sizeof(struct tipc_event);
84493
84494 sub->evt.event = htohl(event, sub->swap);
84495 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
84496 index db8d51a..608692d 100644
84497 --- a/net/unix/af_unix.c
84498 +++ b/net/unix/af_unix.c
84499 @@ -745,6 +745,12 @@ static struct sock *unix_find_other(struct net *net,
84500 err = -ECONNREFUSED;
84501 if (!S_ISSOCK(inode->i_mode))
84502 goto put_fail;
84503 +
84504 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
84505 + err = -EACCES;
84506 + goto put_fail;
84507 + }
84508 +
84509 u = unix_find_socket_byinode(net, inode);
84510 if (!u)
84511 goto put_fail;
84512 @@ -765,6 +771,13 @@ static struct sock *unix_find_other(struct net *net,
84513 if (u) {
84514 struct dentry *dentry;
84515 dentry = unix_sk(u)->dentry;
84516 +
84517 + if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
84518 + err = -EPERM;
84519 + sock_put(u);
84520 + goto fail;
84521 + }
84522 +
84523 if (dentry)
84524 touch_atime(unix_sk(u)->mnt, dentry);
84525 } else
84526 @@ -850,11 +863,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
84527 err = security_path_mknod(&nd.path, dentry, mode, 0);
84528 if (err)
84529 goto out_mknod_drop_write;
84530 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
84531 + err = -EACCES;
84532 + goto out_mknod_drop_write;
84533 + }
84534 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
84535 out_mknod_drop_write:
84536 mnt_drop_write(nd.path.mnt);
84537 if (err)
84538 goto out_mknod_dput;
84539 +
84540 + gr_handle_create(dentry, nd.path.mnt);
84541 +
84542 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
84543 dput(nd.path.dentry);
84544 nd.path.dentry = dentry;
84545 @@ -2211,7 +2231,11 @@ static int unix_seq_show(struct seq_file *seq, void *v)
84546 unix_state_lock(s);
84547
84548 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
84549 +#ifdef CONFIG_GRKERNSEC_HIDESYM
84550 + NULL,
84551 +#else
84552 s,
84553 +#endif
84554 atomic_read(&s->sk_refcnt),
84555 0,
84556 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
84557 diff --git a/net/wireless/core.h b/net/wireless/core.h
84558 index 376798f..109a61f 100644
84559 --- a/net/wireless/core.h
84560 +++ b/net/wireless/core.h
84561 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
84562 struct mutex mtx;
84563
84564 /* rfkill support */
84565 - struct rfkill_ops rfkill_ops;
84566 + rfkill_ops_no_const rfkill_ops;
84567 struct rfkill *rfkill;
84568 struct work_struct rfkill_sync;
84569
84570 diff --git a/net/wireless/wext.c b/net/wireless/wext.c
84571 index a2e4c60..0979cbe 100644
84572 --- a/net/wireless/wext.c
84573 +++ b/net/wireless/wext.c
84574 @@ -816,8 +816,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
84575 */
84576
84577 /* Support for very large requests */
84578 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
84579 - (user_length > descr->max_tokens)) {
84580 + if (user_length > descr->max_tokens) {
84581 /* Allow userspace to GET more than max so
84582 * we can support any size GET requests.
84583 * There is still a limit : -ENOMEM.
84584 @@ -854,22 +853,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
84585 }
84586 }
84587
84588 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
84589 - /*
84590 - * If this is a GET, but not NOMAX, it means that the extra
84591 - * data is not bounded by userspace, but by max_tokens. Thus
84592 - * set the length to max_tokens. This matches the extra data
84593 - * allocation.
84594 - * The driver should fill it with the number of tokens it
84595 - * provided, and it may check iwp->length rather than having
84596 - * knowledge of max_tokens. If the driver doesn't change the
84597 - * iwp->length, this ioctl just copies back max_token tokens
84598 - * filled with zeroes. Hopefully the driver isn't claiming
84599 - * them to be valid data.
84600 - */
84601 - iwp->length = descr->max_tokens;
84602 - }
84603 -
84604 err = handler(dev, info, (union iwreq_data *) iwp, extra);
84605
84606 iwp->length += essid_compat;
84607 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
84608 index cb81ca3..e15d49a 100644
84609 --- a/net/xfrm/xfrm_policy.c
84610 +++ b/net/xfrm/xfrm_policy.c
84611 @@ -586,7 +586,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
84612 hlist_add_head(&policy->bydst, chain);
84613 xfrm_pol_hold(policy);
84614 net->xfrm.policy_count[dir]++;
84615 - atomic_inc(&flow_cache_genid);
84616 + atomic_inc_unchecked(&flow_cache_genid);
84617 if (delpol)
84618 __xfrm_policy_unlink(delpol, dir);
84619 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
84620 @@ -669,7 +669,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u8 type, int dir,
84621 write_unlock_bh(&xfrm_policy_lock);
84622
84623 if (ret && delete) {
84624 - atomic_inc(&flow_cache_genid);
84625 + atomic_inc_unchecked(&flow_cache_genid);
84626 xfrm_policy_kill(ret);
84627 }
84628 return ret;
84629 @@ -710,7 +710,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u8 type, int dir, u32 id,
84630 write_unlock_bh(&xfrm_policy_lock);
84631
84632 if (ret && delete) {
84633 - atomic_inc(&flow_cache_genid);
84634 + atomic_inc_unchecked(&flow_cache_genid);
84635 xfrm_policy_kill(ret);
84636 }
84637 return ret;
84638 @@ -824,7 +824,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
84639 }
84640
84641 }
84642 - atomic_inc(&flow_cache_genid);
84643 + atomic_inc_unchecked(&flow_cache_genid);
84644 out:
84645 write_unlock_bh(&xfrm_policy_lock);
84646 return err;
84647 @@ -1088,7 +1088,7 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
84648 write_unlock_bh(&xfrm_policy_lock);
84649 if (pol) {
84650 if (dir < XFRM_POLICY_MAX)
84651 - atomic_inc(&flow_cache_genid);
84652 + atomic_inc_unchecked(&flow_cache_genid);
84653 xfrm_policy_kill(pol);
84654 return 0;
84655 }
84656 @@ -1477,7 +1477,7 @@ free_dst:
84657 goto out;
84658 }
84659
84660 -static int inline
84661 +static inline int
84662 xfrm_dst_alloc_copy(void **target, void *src, int size)
84663 {
84664 if (!*target) {
84665 @@ -1489,7 +1489,7 @@ xfrm_dst_alloc_copy(void **target, void *src, int size)
84666 return 0;
84667 }
84668
84669 -static int inline
84670 +static inline int
84671 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
84672 {
84673 #ifdef CONFIG_XFRM_SUB_POLICY
84674 @@ -1501,7 +1501,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
84675 #endif
84676 }
84677
84678 -static int inline
84679 +static inline int
84680 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
84681 {
84682 #ifdef CONFIG_XFRM_SUB_POLICY
84683 @@ -1537,7 +1537,7 @@ int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
84684 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
84685
84686 restart:
84687 - genid = atomic_read(&flow_cache_genid);
84688 + genid = atomic_read_unchecked(&flow_cache_genid);
84689 policy = NULL;
84690 for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
84691 pols[pi] = NULL;
84692 @@ -1680,7 +1680,7 @@ restart:
84693 goto error;
84694 }
84695 if (nx == -EAGAIN ||
84696 - genid != atomic_read(&flow_cache_genid)) {
84697 + genid != atomic_read_unchecked(&flow_cache_genid)) {
84698 xfrm_pols_put(pols, npols);
84699 goto restart;
84700 }
84701 diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
84702 index b95a2d6..85c4d78 100644
84703 --- a/net/xfrm/xfrm_user.c
84704 +++ b/net/xfrm/xfrm_user.c
84705 @@ -1169,6 +1169,8 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
84706 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
84707 int i;
84708
84709 + pax_track_stack();
84710 +
84711 if (xp->xfrm_nr == 0)
84712 return 0;
84713
84714 @@ -1784,6 +1786,8 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
84715 int err;
84716 int n = 0;
84717
84718 + pax_track_stack();
84719 +
84720 if (attrs[XFRMA_MIGRATE] == NULL)
84721 return -EINVAL;
84722
84723 diff --git a/samples/kobject/kset-example.c b/samples/kobject/kset-example.c
84724 index 45b7d56..19e828c 100644
84725 --- a/samples/kobject/kset-example.c
84726 +++ b/samples/kobject/kset-example.c
84727 @@ -87,7 +87,7 @@ static ssize_t foo_attr_store(struct kobject *kobj,
84728 }
84729
84730 /* Our custom sysfs_ops that we will associate with our ktype later on */
84731 -static struct sysfs_ops foo_sysfs_ops = {
84732 +static const struct sysfs_ops foo_sysfs_ops = {
84733 .show = foo_attr_show,
84734 .store = foo_attr_store,
84735 };
84736 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
84737 index 341b589..405aed3 100644
84738 --- a/scripts/Makefile.build
84739 +++ b/scripts/Makefile.build
84740 @@ -59,7 +59,7 @@ endif
84741 endif
84742
84743 # Do not include host rules unless needed
84744 -ifneq ($(hostprogs-y)$(hostprogs-m),)
84745 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
84746 include scripts/Makefile.host
84747 endif
84748
84749 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
84750 index 6f89fbb..53adc9c 100644
84751 --- a/scripts/Makefile.clean
84752 +++ b/scripts/Makefile.clean
84753 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
84754 __clean-files := $(extra-y) $(always) \
84755 $(targets) $(clean-files) \
84756 $(host-progs) \
84757 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
84758 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
84759 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
84760
84761 # as clean-files is given relative to the current directory, this adds
84762 # a $(obj) prefix, except for absolute paths
84763 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
84764 index 1ac414f..a1c1451 100644
84765 --- a/scripts/Makefile.host
84766 +++ b/scripts/Makefile.host
84767 @@ -31,6 +31,7 @@
84768 # Note: Shared libraries consisting of C++ files are not supported
84769
84770 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
84771 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
84772
84773 # C code
84774 # Executables compiled from a single .c file
84775 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
84776 # Shared libaries (only .c supported)
84777 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
84778 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
84779 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
84780 # Remove .so files from "xxx-objs"
84781 host-cobjs := $(filter-out %.so,$(host-cobjs))
84782
84783 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
84784 index 6bf21f8..c0546b3 100644
84785 --- a/scripts/basic/fixdep.c
84786 +++ b/scripts/basic/fixdep.c
84787 @@ -162,7 +162,7 @@ static void grow_config(int len)
84788 /*
84789 * Lookup a value in the configuration string.
84790 */
84791 -static int is_defined_config(const char * name, int len)
84792 +static int is_defined_config(const char * name, unsigned int len)
84793 {
84794 const char * pconfig;
84795 const char * plast = str_config + len_config - len;
84796 @@ -199,7 +199,7 @@ static void clear_config(void)
84797 /*
84798 * Record the use of a CONFIG_* word.
84799 */
84800 -static void use_config(char *m, int slen)
84801 +static void use_config(char *m, unsigned int slen)
84802 {
84803 char s[PATH_MAX];
84804 char *p;
84805 @@ -222,9 +222,9 @@ static void use_config(char *m, int slen)
84806
84807 static void parse_config_file(char *map, size_t len)
84808 {
84809 - int *end = (int *) (map + len);
84810 + unsigned int *end = (unsigned int *) (map + len);
84811 /* start at +1, so that p can never be < map */
84812 - int *m = (int *) map + 1;
84813 + unsigned int *m = (unsigned int *) map + 1;
84814 char *p, *q;
84815
84816 for (; m < end; m++) {
84817 @@ -371,7 +371,7 @@ static void print_deps(void)
84818 static void traps(void)
84819 {
84820 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
84821 - int *p = (int *)test;
84822 + unsigned int *p = (unsigned int *)test;
84823
84824 if (*p != INT_CONF) {
84825 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
84826 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
84827 new file mode 100644
84828 index 0000000..8729101
84829 --- /dev/null
84830 +++ b/scripts/gcc-plugin.sh
84831 @@ -0,0 +1,2 @@
84832 +#!/bin/sh
84833 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
84834 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
84835 index 62a9025..65b82ad 100644
84836 --- a/scripts/mod/file2alias.c
84837 +++ b/scripts/mod/file2alias.c
84838 @@ -72,7 +72,7 @@ static void device_id_check(const char *modname, const char *device_id,
84839 unsigned long size, unsigned long id_size,
84840 void *symval)
84841 {
84842 - int i;
84843 + unsigned int i;
84844
84845 if (size % id_size || size < id_size) {
84846 if (cross_build != 0)
84847 @@ -102,7 +102,7 @@ static void device_id_check(const char *modname, const char *device_id,
84848 /* USB is special because the bcdDevice can be matched against a numeric range */
84849 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
84850 static void do_usb_entry(struct usb_device_id *id,
84851 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
84852 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
84853 unsigned char range_lo, unsigned char range_hi,
84854 struct module *mod)
84855 {
84856 @@ -151,7 +151,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
84857 {
84858 unsigned int devlo, devhi;
84859 unsigned char chi, clo;
84860 - int ndigits;
84861 + unsigned int ndigits;
84862
84863 id->match_flags = TO_NATIVE(id->match_flags);
84864 id->idVendor = TO_NATIVE(id->idVendor);
84865 @@ -368,7 +368,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
84866 for (i = 0; i < count; i++) {
84867 const char *id = (char *)devs[i].id;
84868 char acpi_id[sizeof(devs[0].id)];
84869 - int j;
84870 + unsigned int j;
84871
84872 buf_printf(&mod->dev_table_buf,
84873 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
84874 @@ -398,7 +398,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
84875
84876 for (j = 0; j < PNP_MAX_DEVICES; j++) {
84877 const char *id = (char *)card->devs[j].id;
84878 - int i2, j2;
84879 + unsigned int i2, j2;
84880 int dup = 0;
84881
84882 if (!id[0])
84883 @@ -424,7 +424,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
84884 /* add an individual alias for every device entry */
84885 if (!dup) {
84886 char acpi_id[sizeof(card->devs[0].id)];
84887 - int k;
84888 + unsigned int k;
84889
84890 buf_printf(&mod->dev_table_buf,
84891 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
84892 @@ -699,7 +699,7 @@ static void dmi_ascii_filter(char *d, const char *s)
84893 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
84894 char *alias)
84895 {
84896 - int i, j;
84897 + unsigned int i, j;
84898
84899 sprintf(alias, "dmi*");
84900
84901 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
84902 index 03efeab..0888989 100644
84903 --- a/scripts/mod/modpost.c
84904 +++ b/scripts/mod/modpost.c
84905 @@ -835,6 +835,7 @@ enum mismatch {
84906 INIT_TO_EXIT,
84907 EXIT_TO_INIT,
84908 EXPORT_TO_INIT_EXIT,
84909 + DATA_TO_TEXT
84910 };
84911
84912 struct sectioncheck {
84913 @@ -920,6 +921,12 @@ const struct sectioncheck sectioncheck[] = {
84914 .fromsec = { "__ksymtab*", NULL },
84915 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
84916 .mismatch = EXPORT_TO_INIT_EXIT
84917 +},
84918 +/* Do not reference code from writable data */
84919 +{
84920 + .fromsec = { DATA_SECTIONS, NULL },
84921 + .tosec = { TEXT_SECTIONS, NULL },
84922 + .mismatch = DATA_TO_TEXT
84923 }
84924 };
84925
84926 @@ -1024,10 +1031,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
84927 continue;
84928 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
84929 continue;
84930 - if (sym->st_value == addr)
84931 - return sym;
84932 /* Find a symbol nearby - addr are maybe negative */
84933 d = sym->st_value - addr;
84934 + if (d == 0)
84935 + return sym;
84936 if (d < 0)
84937 d = addr - sym->st_value;
84938 if (d < distance) {
84939 @@ -1268,6 +1275,14 @@ static void report_sec_mismatch(const char *modname, enum mismatch mismatch,
84940 "Fix this by removing the %sannotation of %s "
84941 "or drop the export.\n",
84942 tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
84943 + case DATA_TO_TEXT:
84944 +/*
84945 + fprintf(stderr,
84946 + "The variable %s references\n"
84947 + "the %s %s%s%s\n",
84948 + fromsym, to, sec2annotation(tosec), tosym, to_p);
84949 +*/
84950 + break;
84951 case NO_MISMATCH:
84952 /* To get warnings on missing members */
84953 break;
84954 @@ -1495,7 +1510,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
84955 static void check_sec_ref(struct module *mod, const char *modname,
84956 struct elf_info *elf)
84957 {
84958 - int i;
84959 + unsigned int i;
84960 Elf_Shdr *sechdrs = elf->sechdrs;
84961
84962 /* Walk through all sections */
84963 @@ -1651,7 +1666,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
84964 va_end(ap);
84965 }
84966
84967 -void buf_write(struct buffer *buf, const char *s, int len)
84968 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
84969 {
84970 if (buf->size - buf->pos < len) {
84971 buf->size += len + SZ;
84972 @@ -1863,7 +1878,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
84973 if (fstat(fileno(file), &st) < 0)
84974 goto close_write;
84975
84976 - if (st.st_size != b->pos)
84977 + if (st.st_size != (off_t)b->pos)
84978 goto close_write;
84979
84980 tmp = NOFAIL(malloc(b->pos));
84981 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
84982 index 09f58e3..4b66092 100644
84983 --- a/scripts/mod/modpost.h
84984 +++ b/scripts/mod/modpost.h
84985 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
84986
84987 struct buffer {
84988 char *p;
84989 - int pos;
84990 - int size;
84991 + unsigned int pos;
84992 + unsigned int size;
84993 };
84994
84995 void __attribute__((format(printf, 2, 3)))
84996 buf_printf(struct buffer *buf, const char *fmt, ...);
84997
84998 void
84999 -buf_write(struct buffer *buf, const char *s, int len);
85000 +buf_write(struct buffer *buf, const char *s, unsigned int len);
85001
85002 struct module {
85003 struct module *next;
85004 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
85005 index ecf9c7d..d52b38e 100644
85006 --- a/scripts/mod/sumversion.c
85007 +++ b/scripts/mod/sumversion.c
85008 @@ -455,7 +455,7 @@ static void write_version(const char *filename, const char *sum,
85009 goto out;
85010 }
85011
85012 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
85013 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
85014 warn("writing sum in %s failed: %s\n",
85015 filename, strerror(errno));
85016 goto out;
85017 diff --git a/scripts/package/mkspec b/scripts/package/mkspec
85018 index 47bdd2f..d4d4e93 100755
85019 --- a/scripts/package/mkspec
85020 +++ b/scripts/package/mkspec
85021 @@ -70,7 +70,7 @@ echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM_BUILD_ROOT/lib/modules'
85022 echo 'mkdir -p $RPM_BUILD_ROOT/lib/firmware'
85023 echo "%endif"
85024
85025 -echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{_smp_mflags} KBUILD_SRC= modules_install'
85026 +echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{?_smp_mflags} KBUILD_SRC= modules_install'
85027 echo "%ifarch ia64"
85028 echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/efi/vmlinuz-$KERNELRELEASE"
85029 echo 'ln -s '"efi/vmlinuz-$KERNELRELEASE" '$RPM_BUILD_ROOT'"/boot/"
85030 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
85031 index 5c11312..72742b5 100644
85032 --- a/scripts/pnmtologo.c
85033 +++ b/scripts/pnmtologo.c
85034 @@ -237,14 +237,14 @@ static void write_header(void)
85035 fprintf(out, " * Linux logo %s\n", logoname);
85036 fputs(" */\n\n", out);
85037 fputs("#include <linux/linux_logo.h>\n\n", out);
85038 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
85039 + fprintf(out, "static unsigned char %s_data[] = {\n",
85040 logoname);
85041 }
85042
85043 static void write_footer(void)
85044 {
85045 fputs("\n};\n\n", out);
85046 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
85047 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
85048 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
85049 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
85050 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
85051 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
85052 fputs("\n};\n\n", out);
85053
85054 /* write logo clut */
85055 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
85056 + fprintf(out, "static unsigned char %s_clut[] = {\n",
85057 logoname);
85058 write_hex_cnt = 0;
85059 for (i = 0; i < logo_clutsize; i++) {
85060 diff --git a/scripts/tags.sh b/scripts/tags.sh
85061 index d52f7a0..269eb1b 100755
85062 --- a/scripts/tags.sh
85063 +++ b/scripts/tags.sh
85064 @@ -93,6 +93,11 @@ docscope()
85065 cscope -b -f cscope.out
85066 }
85067
85068 +dogtags()
85069 +{
85070 + all_sources | gtags -f -
85071 +}
85072 +
85073 exuberant()
85074 {
85075 all_sources | xargs $1 -a \
85076 @@ -164,6 +169,10 @@ case "$1" in
85077 docscope
85078 ;;
85079
85080 + "gtags")
85081 + dogtags
85082 + ;;
85083 +
85084 "tags")
85085 rm -f tags
85086 xtags ctags
85087 diff --git a/security/Kconfig b/security/Kconfig
85088 index fb363cd..886ace4 100644
85089 --- a/security/Kconfig
85090 +++ b/security/Kconfig
85091 @@ -4,6 +4,626 @@
85092
85093 menu "Security options"
85094
85095 +source grsecurity/Kconfig
85096 +
85097 +menu "PaX"
85098 +
85099 + config ARCH_TRACK_EXEC_LIMIT
85100 + bool
85101 +
85102 + config PAX_KERNEXEC_PLUGIN
85103 + bool
85104 +
85105 + config PAX_PER_CPU_PGD
85106 + bool
85107 +
85108 + config TASK_SIZE_MAX_SHIFT
85109 + int
85110 + depends on X86_64
85111 + default 47 if !PAX_PER_CPU_PGD
85112 + default 42 if PAX_PER_CPU_PGD
85113 +
85114 + config PAX_ENABLE_PAE
85115 + bool
85116 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
85117 +
85118 +config PAX
85119 + bool "Enable various PaX features"
85120 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
85121 + help
85122 + This allows you to enable various PaX features. PaX adds
85123 + intrusion prevention mechanisms to the kernel that reduce
85124 + the risks posed by exploitable memory corruption bugs.
85125 +
85126 +menu "PaX Control"
85127 + depends on PAX
85128 +
85129 +config PAX_SOFTMODE
85130 + bool 'Support soft mode'
85131 + help
85132 + Enabling this option will allow you to run PaX in soft mode, that
85133 + is, PaX features will not be enforced by default, only on executables
85134 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
85135 + support as they are the only way to mark executables for soft mode use.
85136 +
85137 + Soft mode can be activated by using the "pax_softmode=1" kernel command
85138 + line option on boot. Furthermore you can control various PaX features
85139 + at runtime via the entries in /proc/sys/kernel/pax.
85140 +
85141 +config PAX_EI_PAX
85142 + bool 'Use legacy ELF header marking'
85143 + help
85144 + Enabling this option will allow you to control PaX features on
85145 + a per executable basis via the 'chpax' utility available at
85146 + http://pax.grsecurity.net/. The control flags will be read from
85147 + an otherwise reserved part of the ELF header. This marking has
85148 + numerous drawbacks (no support for soft-mode, toolchain does not
85149 + know about the non-standard use of the ELF header) therefore it
85150 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
85151 + support.
85152 +
85153 + If you have applications not marked by the PT_PAX_FLAGS ELF program
85154 + header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
85155 + option otherwise they will not get any protection.
85156 +
85157 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
85158 + support as well, they will override the legacy EI_PAX marks.
85159 +
85160 +config PAX_PT_PAX_FLAGS
85161 + bool 'Use ELF program header marking'
85162 + help
85163 + Enabling this option will allow you to control PaX features on
85164 + a per executable basis via the 'paxctl' utility available at
85165 + http://pax.grsecurity.net/. The control flags will be read from
85166 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
85167 + has the benefits of supporting both soft mode and being fully
85168 + integrated into the toolchain (the binutils patch is available
85169 + from http://pax.grsecurity.net).
85170 +
85171 + If you have applications not marked by the PT_PAX_FLAGS ELF program
85172 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
85173 + support otherwise they will not get any protection.
85174 +
85175 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
85176 + must make sure that the marks are the same if a binary has both marks.
85177 +
85178 + Note that if you enable the legacy EI_PAX marking support as well,
85179 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
85180 +
85181 +config PAX_XATTR_PAX_FLAGS
85182 + bool 'Use filesystem extended attributes marking'
85183 + depends on EXPERT
85184 + select CIFS_XATTR if CIFS
85185 + select EXT2_FS_XATTR if EXT2_FS
85186 + select EXT3_FS_XATTR if EXT3_FS
85187 + select EXT4_FS_XATTR if EXT4_FS
85188 + select JFFS2_FS_XATTR if JFFS2_FS
85189 + select REISERFS_FS_XATTR if REISERFS_FS
85190 + select UBIFS_FS_XATTR if UBIFS_FS
85191 + help
85192 + Enabling this option will allow you to control PaX features on
85193 + a per executable basis via the 'setfattr' utility. The control
85194 + flags will be read from the user.pax.flags extended attribute of
85195 + the file. This marking has the benefit of supporting binary-only
85196 + applications that self-check themselves (e.g., skype) and would
85197 + not tolerate chpax/paxctl changes. The main drawback is that
85198 + extended attributes are not supported by some filesystems (e.g.,
85199 + isofs, squashfs, tmpfs, udf, vfat) so copying files through such
85200 + filesystems will lose the extended attributes and these PaX markings.
85201 +
85202 + If you have applications not marked by the PT_PAX_FLAGS ELF program
85203 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
85204 + support otherwise they will not get any protection.
85205 +
85206 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
85207 + must make sure that the marks are the same if a binary has both marks.
85208 +
85209 + Note that if you enable the legacy EI_PAX marking support as well,
85210 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
85211 +
85212 +choice
85213 + prompt 'MAC system integration'
85214 + default PAX_HAVE_ACL_FLAGS
85215 + help
85216 + Mandatory Access Control systems have the option of controlling
85217 + PaX flags on a per executable basis, choose the method supported
85218 + by your particular system.
85219 +
85220 + - "none": if your MAC system does not interact with PaX,
85221 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
85222 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
85223 +
85224 + NOTE: this option is for developers/integrators only.
85225 +
85226 + config PAX_NO_ACL_FLAGS
85227 + bool 'none'
85228 +
85229 + config PAX_HAVE_ACL_FLAGS
85230 + bool 'direct'
85231 +
85232 + config PAX_HOOK_ACL_FLAGS
85233 + bool 'hook'
85234 +endchoice
85235 +
85236 +endmenu
85237 +
85238 +menu "Non-executable pages"
85239 + depends on PAX
85240 +
85241 +config PAX_NOEXEC
85242 + bool "Enforce non-executable pages"
85243 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
85244 + help
85245 + By design some architectures do not allow for protecting memory
85246 + pages against execution or even if they do, Linux does not make
85247 + use of this feature. In practice this means that if a page is
85248 + readable (such as the stack or heap) it is also executable.
85249 +
85250 + There is a well known exploit technique that makes use of this
85251 + fact and a common programming mistake where an attacker can
85252 + introduce code of his choice somewhere in the attacked program's
85253 + memory (typically the stack or the heap) and then execute it.
85254 +
85255 + If the attacked program was running with different (typically
85256 + higher) privileges than that of the attacker, then he can elevate
85257 + his own privilege level (e.g. get a root shell, write to files for
85258 + which he does not have write access to, etc).
85259 +
85260 + Enabling this option will let you choose from various features
85261 + that prevent the injection and execution of 'foreign' code in
85262 + a program.
85263 +
85264 + This will also break programs that rely on the old behaviour and
85265 + expect that dynamically allocated memory via the malloc() family
85266 + of functions is executable (which it is not). Notable examples
85267 + are the XFree86 4.x server, the java runtime and wine.
85268 +
85269 +config PAX_PAGEEXEC
85270 + bool "Paging based non-executable pages"
85271 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
85272 + select S390_SWITCH_AMODE if S390
85273 + select S390_EXEC_PROTECT if S390
85274 + select ARCH_TRACK_EXEC_LIMIT if X86_32
85275 + help
85276 + This implementation is based on the paging feature of the CPU.
85277 + On i386 without hardware non-executable bit support there is a
85278 + variable but usually low performance impact, however on Intel's
85279 + P4 core based CPUs it is very high so you should not enable this
85280 + for kernels meant to be used on such CPUs.
85281 +
85282 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
85283 + with hardware non-executable bit support there is no performance
85284 + impact, on ppc the impact is negligible.
85285 +
85286 + Note that several architectures require various emulations due to
85287 + badly designed userland ABIs, this will cause a performance impact
85288 + but will disappear as soon as userland is fixed. For example, ppc
85289 + userland MUST have been built with secure-plt by a recent toolchain.
85290 +
85291 +config PAX_SEGMEXEC
85292 + bool "Segmentation based non-executable pages"
85293 + depends on PAX_NOEXEC && X86_32
85294 + help
85295 + This implementation is based on the segmentation feature of the
85296 + CPU and has a very small performance impact, however applications
85297 + will be limited to a 1.5 GB address space instead of the normal
85298 + 3 GB.
85299 +
85300 +config PAX_EMUTRAMP
85301 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
85302 + default y if PARISC
85303 + help
85304 + There are some programs and libraries that for one reason or
85305 + another attempt to execute special small code snippets from
85306 + non-executable memory pages. Most notable examples are the
85307 + signal handler return code generated by the kernel itself and
85308 + the GCC trampolines.
85309 +
85310 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
85311 + such programs will no longer work under your kernel.
85312 +
85313 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
85314 + utilities to enable trampoline emulation for the affected programs
85315 + yet still have the protection provided by the non-executable pages.
85316 +
85317 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
85318 + your system will not even boot.
85319 +
85320 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
85321 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
85322 + for the affected files.
85323 +
85324 + NOTE: enabling this feature *may* open up a loophole in the
85325 + protection provided by non-executable pages that an attacker
85326 + could abuse. Therefore the best solution is to not have any
85327 + files on your system that would require this option. This can
85328 + be achieved by not using libc5 (which relies on the kernel
85329 + signal handler return code) and not using or rewriting programs
85330 + that make use of the nested function implementation of GCC.
85331 + Skilled users can just fix GCC itself so that it implements
85332 + nested function calls in a way that does not interfere with PaX.
85333 +
85334 +config PAX_EMUSIGRT
85335 + bool "Automatically emulate sigreturn trampolines"
85336 + depends on PAX_EMUTRAMP && PARISC
85337 + default y
85338 + help
85339 + Enabling this option will have the kernel automatically detect
85340 + and emulate signal return trampolines executing on the stack
85341 + that would otherwise lead to task termination.
85342 +
85343 + This solution is intended as a temporary one for users with
85344 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
85345 + Modula-3 runtime, etc) or executables linked to such, basically
85346 + everything that does not specify its own SA_RESTORER function in
85347 + normal executable memory like glibc 2.1+ does.
85348 +
85349 + On parisc you MUST enable this option, otherwise your system will
85350 + not even boot.
85351 +
85352 + NOTE: this feature cannot be disabled on a per executable basis
85353 + and since it *does* open up a loophole in the protection provided
85354 + by non-executable pages, the best solution is to not have any
85355 + files on your system that would require this option.
85356 +
85357 +config PAX_MPROTECT
85358 + bool "Restrict mprotect()"
85359 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
85360 + help
85361 + Enabling this option will prevent programs from
85362 + - changing the executable status of memory pages that were
85363 + not originally created as executable,
85364 + - making read-only executable pages writable again,
85365 + - creating executable pages from anonymous memory,
85366 + - making read-only-after-relocations (RELRO) data pages writable again.
85367 +
85368 + You should say Y here to complete the protection provided by
85369 + the enforcement of non-executable pages.
85370 +
85371 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
85372 + this feature on a per file basis.
85373 +
85374 +config PAX_MPROTECT_COMPAT
85375 + bool "Use legacy/compat protection demoting (read help)"
85376 + depends on PAX_MPROTECT
85377 + default n
85378 + help
85379 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
85380 + by sending the proper error code to the application. For some broken
85381 + userland, this can cause problems with Python or other applications. The
85382 + current implementation however allows for applications like clamav to
85383 + detect if JIT compilation/execution is allowed and to fall back gracefully
85384 + to an interpreter-based mode if it does not. While we encourage everyone
85385 + to use the current implementation as-is and push upstream to fix broken
85386 + userland (note that the RWX logging option can assist with this), in some
85387 + environments this may not be possible. Having to disable MPROTECT
85388 + completely on certain binaries reduces the security benefit of PaX,
85389 + so this option is provided for those environments to revert to the old
85390 + behavior.
85391 +
85392 +config PAX_ELFRELOCS
85393 + bool "Allow ELF text relocations (read help)"
85394 + depends on PAX_MPROTECT
85395 + default n
85396 + help
85397 + Non-executable pages and mprotect() restrictions are effective
85398 + in preventing the introduction of new executable code into an
85399 + attacked task's address space. There remain only two venues
85400 + for this kind of attack: if the attacker can execute already
85401 + existing code in the attacked task then he can either have it
85402 + create and mmap() a file containing his code or have it mmap()
85403 + an already existing ELF library that does not have position
85404 + independent code in it and use mprotect() on it to make it
85405 + writable and copy his code there. While protecting against
85406 + the former approach is beyond PaX, the latter can be prevented
85407 + by having only PIC ELF libraries on one's system (which do not
85408 + need to relocate their code). If you are sure this is your case,
85409 + as is the case with all modern Linux distributions, then leave
85410 + this option disabled. You should say 'n' here.
85411 +
85412 +config PAX_ETEXECRELOCS
85413 + bool "Allow ELF ET_EXEC text relocations"
85414 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
85415 + select PAX_ELFRELOCS
85416 + default y
85417 + help
85418 + On some architectures there are incorrectly created applications
85419 + that require text relocations and would not work without enabling
85420 + this option. If you are an alpha, ia64 or parisc user, you should
85421 + enable this option and disable it once you have made sure that
85422 + none of your applications need it.
85423 +
85424 +config PAX_EMUPLT
85425 + bool "Automatically emulate ELF PLT"
85426 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
85427 + default y
85428 + help
85429 + Enabling this option will have the kernel automatically detect
85430 + and emulate the Procedure Linkage Table entries in ELF files.
85431 + On some architectures such entries are in writable memory, and
85432 + become non-executable leading to task termination. Therefore
85433 + it is mandatory that you enable this option on alpha, parisc,
85434 + sparc and sparc64, otherwise your system would not even boot.
85435 +
85436 + NOTE: this feature *does* open up a loophole in the protection
85437 + provided by the non-executable pages, therefore the proper
85438 + solution is to modify the toolchain to produce a PLT that does
85439 + not need to be writable.
85440 +
85441 +config PAX_DLRESOLVE
85442 + bool 'Emulate old glibc resolver stub'
85443 + depends on PAX_EMUPLT && SPARC
85444 + default n
85445 + help
85446 + This option is needed if userland has an old glibc (before 2.4)
85447 + that puts a 'save' instruction into the runtime generated resolver
85448 + stub that needs special emulation.
85449 +
85450 +config PAX_KERNEXEC
85451 + bool "Enforce non-executable kernel pages"
85452 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
85453 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
85454 + select PAX_KERNEXEC_PLUGIN if X86_64
85455 + help
85456 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
85457 + that is, enabling this option will make it harder to inject
85458 + and execute 'foreign' code in kernel memory itself.
85459 +
85460 + Note that on x86_64 kernels there is a known regression when
85461 + this feature and KVM/VMX are both enabled in the host kernel.
85462 +
85463 +choice
85464 + prompt "Return Address Instrumentation Method"
85465 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
85466 + depends on PAX_KERNEXEC_PLUGIN
85467 + help
85468 + Select the method used to instrument function pointer dereferences.
85469 + Note that binary modules cannot be instrumented by this approach.
85470 +
85471 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
85472 + bool "bts"
85473 + help
85474 + This method is compatible with binary only modules but has
85475 + a higher runtime overhead.
85476 +
85477 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
85478 + bool "or"
85479 + depends on !PARAVIRT
85480 + help
85481 + This method is incompatible with binary only modules but has
85482 + a lower runtime overhead.
85483 +endchoice
85484 +
85485 +config PAX_KERNEXEC_PLUGIN_METHOD
85486 + string
85487 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
85488 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
85489 + default ""
85490 +
85491 +config PAX_KERNEXEC_MODULE_TEXT
85492 + int "Minimum amount of memory reserved for module code"
85493 + default "4"
85494 + depends on PAX_KERNEXEC && X86_32 && MODULES
85495 + help
85496 + Due to implementation details the kernel must reserve a fixed
85497 + amount of memory for module code at compile time that cannot be
85498 + changed at runtime. Here you can specify the minimum amount
85499 + in MB that will be reserved. Due to the same implementation
85500 + details this size will always be rounded up to the next 2/4 MB
85501 + boundary (depends on PAE) so the actually available memory for
85502 + module code will usually be more than this minimum.
85503 +
85504 + The default 4 MB should be enough for most users but if you have
85505 + an excessive number of modules (e.g., most distribution configs
85506 + compile many drivers as modules) or use huge modules such as
85507 + nvidia's kernel driver, you will need to adjust this amount.
85508 + A good rule of thumb is to look at your currently loaded kernel
85509 + modules and add up their sizes.
85510 +
85511 +endmenu
85512 +
85513 +menu "Address Space Layout Randomization"
85514 + depends on PAX
85515 +
85516 +config PAX_ASLR
85517 + bool "Address Space Layout Randomization"
85518 + help
85519 + Many if not most exploit techniques rely on the knowledge of
85520 + certain addresses in the attacked program. The following options
85521 + will allow the kernel to apply a certain amount of randomization
85522 + to specific parts of the program thereby forcing an attacker to
85523 + guess them in most cases. Any failed guess will most likely crash
85524 + the attacked program which allows the kernel to detect such attempts
85525 + and react on them. PaX itself provides no reaction mechanisms,
85526 + instead it is strongly encouraged that you make use of Nergal's
85527 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
85528 + (http://www.grsecurity.net/) built-in crash detection features or
85529 + develop one yourself.
85530 +
85531 + By saying Y here you can choose to randomize the following areas:
85532 + - top of the task's kernel stack
85533 + - top of the task's userland stack
85534 + - base address for mmap() requests that do not specify one
85535 + (this includes all libraries)
85536 + - base address of the main executable
85537 +
85538 + It is strongly recommended to say Y here as address space layout
85539 + randomization has negligible impact on performance yet it provides
85540 + a very effective protection.
85541 +
85542 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
85543 + this feature on a per file basis.
85544 +
85545 +config PAX_RANDKSTACK
85546 + bool "Randomize kernel stack base"
85547 + depends on X86_TSC && X86
85548 + help
85549 + By saying Y here the kernel will randomize every task's kernel
85550 + stack on every system call. This will not only force an attacker
85551 + to guess it but also prevent him from making use of possible
85552 + leaked information about it.
85553 +
85554 + Since the kernel stack is a rather scarce resource, randomization
85555 + may cause unexpected stack overflows, therefore you should very
85556 + carefully test your system. Note that once enabled in the kernel
85557 + configuration, this feature cannot be disabled on a per file basis.
85558 +
85559 +config PAX_RANDUSTACK
85560 + bool "Randomize user stack base"
85561 + depends on PAX_ASLR
85562 + help
85563 + By saying Y here the kernel will randomize every task's userland
85564 + stack. The randomization is done in two steps where the second
85565 + one may apply a big amount of shift to the top of the stack and
85566 + cause problems for programs that want to use lots of memory (more
85567 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
85568 + For this reason the second step can be controlled by 'chpax' or
85569 + 'paxctl' on a per file basis.
85570 +
85571 +config PAX_RANDMMAP
85572 + bool "Randomize mmap() base"
85573 + depends on PAX_ASLR
85574 + help
85575 + By saying Y here the kernel will use a randomized base address for
85576 + mmap() requests that do not specify one themselves. As a result
85577 + all dynamically loaded libraries will appear at random addresses
85578 + and therefore be harder to exploit by a technique where an attacker
85579 + attempts to execute library code for his purposes (e.g. spawn a
85580 + shell from an exploited program that is running at an elevated
85581 + privilege level).
85582 +
85583 + Furthermore, if a program is relinked as a dynamic ELF file, its
85584 + base address will be randomized as well, completing the full
85585 + randomization of the address space layout. Attacking such programs
85586 + becomes a guess game. You can find an example of doing this at
85587 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
85588 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
85589 +
85590 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
85591 + feature on a per file basis.
85592 +
85593 +endmenu
85594 +
85595 +menu "Miscellaneous hardening features"
85596 +
85597 +config PAX_MEMORY_SANITIZE
85598 + bool "Sanitize all freed memory"
85599 + depends on !HIBERNATION
85600 + help
85601 + By saying Y here the kernel will erase memory pages as soon as they
85602 + are freed. This in turn reduces the lifetime of data stored in the
85603 + pages, making it less likely that sensitive information such as
85604 + passwords, cryptographic secrets, etc stay in memory for too long.
85605 +
85606 + This is especially useful for programs whose runtime is short, long
85607 + lived processes and the kernel itself benefit from this as long as
85608 + they operate on whole memory pages and ensure timely freeing of pages
85609 + that may hold sensitive information.
85610 +
85611 + The tradeoff is performance impact, on a single CPU system kernel
85612 + compilation sees a 3% slowdown, other systems and workloads may vary
85613 + and you are advised to test this feature on your expected workload
85614 + before deploying it.
85615 +
85616 + Note that this feature does not protect data stored in live pages,
85617 + e.g., process memory swapped to disk may stay there for a long time.
85618 +
85619 +config PAX_MEMORY_STACKLEAK
85620 + bool "Sanitize kernel stack"
85621 + depends on X86
85622 + help
85623 + By saying Y here the kernel will erase the kernel stack before it
85624 + returns from a system call. This in turn reduces the information
85625 + that a kernel stack leak bug can reveal.
85626 +
85627 + Note that such a bug can still leak information that was put on
85628 + the stack by the current system call (the one eventually triggering
85629 + the bug) but traces of earlier system calls on the kernel stack
85630 + cannot leak anymore.
85631 +
85632 + The tradeoff is performance impact, on a single CPU system kernel
85633 + compilation sees a 1% slowdown, other systems and workloads may vary
85634 + and you are advised to test this feature on your expected workload
85635 + before deploying it.
85636 +
85637 + Note: full support for this feature requires gcc with plugin support
85638 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
85639 + versions means that functions with large enough stack frames may
85640 + leave uninitialized memory behind that may be exposed to a later
85641 + syscall leaking the stack.
85642 +
85643 +config PAX_MEMORY_UDEREF
85644 + bool "Prevent invalid userland pointer dereference"
85645 + depends on X86 && !UML_X86 && !XEN
85646 + select PAX_PER_CPU_PGD if X86_64
85647 + help
85648 + By saying Y here the kernel will be prevented from dereferencing
85649 + userland pointers in contexts where the kernel expects only kernel
85650 + pointers. This is both a useful runtime debugging feature and a
85651 + security measure that prevents exploiting a class of kernel bugs.
85652 +
85653 + The tradeoff is that some virtualization solutions may experience
85654 + a huge slowdown and therefore you should not enable this feature
85655 + for kernels meant to run in such environments. Whether a given VM
85656 + solution is affected or not is best determined by simply trying it
85657 + out, the performance impact will be obvious right on boot as this
85658 + mechanism engages from very early on. A good rule of thumb is that
85659 + VMs running on CPUs without hardware virtualization support (i.e.,
85660 + the majority of IA-32 CPUs) will likely experience the slowdown.
85661 +
85662 +config PAX_REFCOUNT
85663 + bool "Prevent various kernel object reference counter overflows"
85664 + depends on GRKERNSEC && (X86 || SPARC64)
85665 + help
85666 + By saying Y here the kernel will detect and prevent overflowing
85667 + various (but not all) kinds of object reference counters. Such
85668 + overflows can normally occur due to bugs only and are often, if
85669 + not always, exploitable.
85670 +
85671 + The tradeoff is that data structures protected by an overflowed
85672 + refcount will never be freed and therefore will leak memory. Note
85673 + that this leak also happens even without this protection but in
85674 + that case the overflow can eventually trigger the freeing of the
85675 + data structure while it is still being used elsewhere, resulting
85676 + in the exploitable situation that this feature prevents.
85677 +
85678 + Since this has a negligible performance impact, you should enable
85679 + this feature.
85680 +
85681 +config PAX_USERCOPY
85682 + bool "Harden heap object copies between kernel and userland"
85683 + depends on X86 || PPC || SPARC || ARM
85684 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
85685 + help
85686 + By saying Y here the kernel will enforce the size of heap objects
85687 + when they are copied in either direction between the kernel and
85688 + userland, even if only a part of the heap object is copied.
85689 +
85690 + Specifically, this checking prevents information leaking from the
85691 + kernel heap during kernel to userland copies (if the kernel heap
85692 + object is otherwise fully initialized) and prevents kernel heap
85693 + overflows during userland to kernel copies.
85694 +
85695 + Note that the current implementation provides the strictest bounds
85696 + checks for the SLUB allocator.
85697 +
85698 + Enabling this option also enables per-slab cache protection against
85699 + data in a given cache being copied into/out of via userland
85700 + accessors. Though the whitelist of regions will be reduced over
85701 + time, it notably protects important data structures like task structs.
85702 +
85703 +
85704 + If frame pointers are enabled on x86, this option will also
85705 + restrict copies into and out of the kernel stack to local variables
85706 + within a single frame.
85707 +
85708 + Since this has a negligible performance impact, you should enable
85709 + this feature.
85710 +
85711 +endmenu
85712 +
85713 +endmenu
85714 +
85715 config KEYS
85716 bool "Enable access key retention support"
85717 help
85718 @@ -146,7 +766,7 @@ config INTEL_TXT
85719 config LSM_MMAP_MIN_ADDR
85720 int "Low address space for LSM to protect from user allocation"
85721 depends on SECURITY && SECURITY_SELINUX
85722 - default 65536
85723 + default 32768
85724 help
85725 This is the portion of low virtual memory which should be protected
85726 from userspace allocation. Keeping a user from writing to low pages
85727 diff --git a/security/capability.c b/security/capability.c
85728 index fce07a7..5f12858 100644
85729 --- a/security/capability.c
85730 +++ b/security/capability.c
85731 @@ -890,7 +890,7 @@ static void cap_audit_rule_free(void *lsmrule)
85732 }
85733 #endif /* CONFIG_AUDIT */
85734
85735 -struct security_operations default_security_ops = {
85736 +struct security_operations default_security_ops __read_only = {
85737 .name = "default",
85738 };
85739
85740 diff --git a/security/commoncap.c b/security/commoncap.c
85741 index fe30751..aaba312 100644
85742 --- a/security/commoncap.c
85743 +++ b/security/commoncap.c
85744 @@ -27,6 +27,8 @@
85745 #include <linux/sched.h>
85746 #include <linux/prctl.h>
85747 #include <linux/securebits.h>
85748 +#include <linux/syslog.h>
85749 +#include <net/sock.h>
85750
85751 /*
85752 * If a non-root user executes a setuid-root binary in
85753 @@ -50,9 +52,18 @@ static void warn_setuid_and_fcaps_mixed(char *fname)
85754 }
85755 }
85756
85757 +#ifdef CONFIG_NET
85758 +extern kernel_cap_t gr_cap_rtnetlink(struct sock *sk);
85759 +#endif
85760 +
85761 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
85762 {
85763 +#ifdef CONFIG_NET
85764 + NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(sk);
85765 +#else
85766 NETLINK_CB(skb).eff_cap = current_cap();
85767 +#endif
85768 +
85769 return 0;
85770 }
85771
85772 @@ -582,6 +593,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
85773 {
85774 const struct cred *cred = current_cred();
85775
85776 + if (gr_acl_enable_at_secure())
85777 + return 1;
85778 +
85779 if (cred->uid != 0) {
85780 if (bprm->cap_effective)
85781 return 1;
85782 @@ -956,13 +970,18 @@ error:
85783 /**
85784 * cap_syslog - Determine whether syslog function is permitted
85785 * @type: Function requested
85786 + * @from_file: Whether this request came from an open file (i.e. /proc)
85787 *
85788 * Determine whether the current process is permitted to use a particular
85789 * syslog function, returning 0 if permission is granted, -ve if not.
85790 */
85791 -int cap_syslog(int type)
85792 +int cap_syslog(int type, bool from_file)
85793 {
85794 - if ((type != 3 && type != 10) && !capable(CAP_SYS_ADMIN))
85795 + /* /proc/kmsg can open be opened by CAP_SYS_ADMIN */
85796 + if (type != SYSLOG_ACTION_OPEN && from_file)
85797 + return 0;
85798 + if ((type != SYSLOG_ACTION_READ_ALL &&
85799 + type != SYSLOG_ACTION_SIZE_BUFFER) && !capable(CAP_SYS_ADMIN))
85800 return -EPERM;
85801 return 0;
85802 }
85803 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
85804 index 165eb53..b1db4eb 100644
85805 --- a/security/integrity/ima/ima.h
85806 +++ b/security/integrity/ima/ima.h
85807 @@ -84,8 +84,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
85808 extern spinlock_t ima_queue_lock;
85809
85810 struct ima_h_table {
85811 - atomic_long_t len; /* number of stored measurements in the list */
85812 - atomic_long_t violations;
85813 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
85814 + atomic_long_unchecked_t violations;
85815 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
85816 };
85817 extern struct ima_h_table ima_htable;
85818 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
85819 index 852bf85..35d6df3 100644
85820 --- a/security/integrity/ima/ima_api.c
85821 +++ b/security/integrity/ima/ima_api.c
85822 @@ -74,7 +74,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
85823 int result;
85824
85825 /* can overflow, only indicator */
85826 - atomic_long_inc(&ima_htable.violations);
85827 + atomic_long_inc_unchecked(&ima_htable.violations);
85828
85829 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
85830 if (!entry) {
85831 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
85832 index 0c72c9c..433e29b 100644
85833 --- a/security/integrity/ima/ima_fs.c
85834 +++ b/security/integrity/ima/ima_fs.c
85835 @@ -27,12 +27,12 @@
85836 static int valid_policy = 1;
85837 #define TMPBUFLEN 12
85838 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
85839 - loff_t *ppos, atomic_long_t *val)
85840 + loff_t *ppos, atomic_long_unchecked_t *val)
85841 {
85842 char tmpbuf[TMPBUFLEN];
85843 ssize_t len;
85844
85845 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
85846 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
85847 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
85848 }
85849
85850 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
85851 index e19316d..339f7ae 100644
85852 --- a/security/integrity/ima/ima_queue.c
85853 +++ b/security/integrity/ima/ima_queue.c
85854 @@ -78,7 +78,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
85855 INIT_LIST_HEAD(&qe->later);
85856 list_add_tail_rcu(&qe->later, &ima_measurements);
85857
85858 - atomic_long_inc(&ima_htable.len);
85859 + atomic_long_inc_unchecked(&ima_htable.len);
85860 key = ima_hash_key(entry->digest);
85861 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
85862 return 0;
85863 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
85864 index e031952..c9a535d 100644
85865 --- a/security/keys/keyring.c
85866 +++ b/security/keys/keyring.c
85867 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
85868 ret = -EFAULT;
85869
85870 for (loop = 0; loop < klist->nkeys; loop++) {
85871 + key_serial_t serial;
85872 key = klist->keys[loop];
85873 + serial = key->serial;
85874
85875 tmp = sizeof(key_serial_t);
85876 if (tmp > buflen)
85877 tmp = buflen;
85878
85879 - if (copy_to_user(buffer,
85880 - &key->serial,
85881 - tmp) != 0)
85882 + if (copy_to_user(buffer, &serial, tmp))
85883 goto error;
85884
85885 buflen -= tmp;
85886 diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
85887 index 931cfda..e71808a 100644
85888 --- a/security/keys/process_keys.c
85889 +++ b/security/keys/process_keys.c
85890 @@ -208,7 +208,7 @@ static int install_process_keyring(void)
85891 ret = install_process_keyring_to_cred(new);
85892 if (ret < 0) {
85893 abort_creds(new);
85894 - return ret != -EEXIST ?: 0;
85895 + return ret != -EEXIST ? ret : 0;
85896 }
85897
85898 return commit_creds(new);
85899 diff --git a/security/min_addr.c b/security/min_addr.c
85900 index d9f9425..c28cef4 100644
85901 --- a/security/min_addr.c
85902 +++ b/security/min_addr.c
85903 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
85904 */
85905 static void update_mmap_min_addr(void)
85906 {
85907 +#ifndef SPARC
85908 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
85909 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
85910 mmap_min_addr = dac_mmap_min_addr;
85911 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
85912 #else
85913 mmap_min_addr = dac_mmap_min_addr;
85914 #endif
85915 +#endif
85916 }
85917
85918 /*
85919 diff --git a/security/root_plug.c b/security/root_plug.c
85920 index 2f7ffa6..0455400 100644
85921 --- a/security/root_plug.c
85922 +++ b/security/root_plug.c
85923 @@ -70,7 +70,7 @@ static int rootplug_bprm_check_security (struct linux_binprm *bprm)
85924 return 0;
85925 }
85926
85927 -static struct security_operations rootplug_security_ops = {
85928 +static struct security_operations rootplug_security_ops __read_only = {
85929 .bprm_check_security = rootplug_bprm_check_security,
85930 };
85931
85932 diff --git a/security/security.c b/security/security.c
85933 index c4c6732..7abf13b 100644
85934 --- a/security/security.c
85935 +++ b/security/security.c
85936 @@ -24,7 +24,7 @@ static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1];
85937 extern struct security_operations default_security_ops;
85938 extern void security_fixup_ops(struct security_operations *ops);
85939
85940 -struct security_operations *security_ops; /* Initialized to NULL */
85941 +struct security_operations *security_ops __read_only; /* Initialized to NULL */
85942
85943 static inline int verify(struct security_operations *ops)
85944 {
85945 @@ -106,7 +106,7 @@ int __init security_module_enable(struct security_operations *ops)
85946 * If there is already a security module registered with the kernel,
85947 * an error will be returned. Otherwise %0 is returned on success.
85948 */
85949 -int register_security(struct security_operations *ops)
85950 +int __init register_security(struct security_operations *ops)
85951 {
85952 if (verify(ops)) {
85953 printk(KERN_DEBUG "%s could not verify "
85954 @@ -199,9 +199,9 @@ int security_quota_on(struct dentry *dentry)
85955 return security_ops->quota_on(dentry);
85956 }
85957
85958 -int security_syslog(int type)
85959 +int security_syslog(int type, bool from_file)
85960 {
85961 - return security_ops->syslog(type);
85962 + return security_ops->syslog(type, from_file);
85963 }
85964
85965 int security_settime(struct timespec *ts, struct timezone *tz)
85966 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
85967 index a106754..ca3a589 100644
85968 --- a/security/selinux/hooks.c
85969 +++ b/security/selinux/hooks.c
85970 @@ -76,6 +76,7 @@
85971 #include <linux/selinux.h>
85972 #include <linux/mutex.h>
85973 #include <linux/posix-timers.h>
85974 +#include <linux/syslog.h>
85975
85976 #include "avc.h"
85977 #include "objsec.h"
85978 @@ -131,7 +132,7 @@ int selinux_enabled = 1;
85979 * Minimal support for a secondary security module,
85980 * just to allow the use of the capability module.
85981 */
85982 -static struct security_operations *secondary_ops;
85983 +static struct security_operations *secondary_ops __read_only;
85984
85985 /* Lists of inode and superblock security structures initialized
85986 before the policy was loaded. */
85987 @@ -2050,29 +2051,30 @@ static int selinux_quota_on(struct dentry *dentry)
85988 return dentry_has_perm(cred, NULL, dentry, FILE__QUOTAON);
85989 }
85990
85991 -static int selinux_syslog(int type)
85992 +static int selinux_syslog(int type, bool from_file)
85993 {
85994 int rc;
85995
85996 - rc = cap_syslog(type);
85997 + rc = cap_syslog(type, from_file);
85998 if (rc)
85999 return rc;
86000
86001 switch (type) {
86002 - case 3: /* Read last kernel messages */
86003 - case 10: /* Return size of the log buffer */
86004 + case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */
86005 + case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */
86006 rc = task_has_system(current, SYSTEM__SYSLOG_READ);
86007 break;
86008 - case 6: /* Disable logging to console */
86009 - case 7: /* Enable logging to console */
86010 - case 8: /* Set level of messages printed to console */
86011 + case SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging to console */
86012 + case SYSLOG_ACTION_CONSOLE_ON: /* Enable logging to console */
86013 + /* Set level of messages printed to console */
86014 + case SYSLOG_ACTION_CONSOLE_LEVEL:
86015 rc = task_has_system(current, SYSTEM__SYSLOG_CONSOLE);
86016 break;
86017 - case 0: /* Close log */
86018 - case 1: /* Open log */
86019 - case 2: /* Read from log */
86020 - case 4: /* Read/clear last kernel messages */
86021 - case 5: /* Clear ring buffer */
86022 + case SYSLOG_ACTION_CLOSE: /* Close log */
86023 + case SYSLOG_ACTION_OPEN: /* Open log */
86024 + case SYSLOG_ACTION_READ: /* Read from log */
86025 + case SYSLOG_ACTION_READ_CLEAR: /* Read/clear last kernel messages */
86026 + case SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
86027 default:
86028 rc = task_has_system(current, SYSTEM__SYSLOG_MOD);
86029 break;
86030 @@ -5457,7 +5459,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
86031
86032 #endif
86033
86034 -static struct security_operations selinux_ops = {
86035 +static struct security_operations selinux_ops __read_only = {
86036 .name = "selinux",
86037
86038 .ptrace_access_check = selinux_ptrace_access_check,
86039 @@ -5841,7 +5843,9 @@ int selinux_disable(void)
86040 avc_disable();
86041
86042 /* Reset security_ops to the secondary module, dummy or capability. */
86043 + pax_open_kernel();
86044 security_ops = secondary_ops;
86045 + pax_close_kernel();
86046
86047 /* Unregister netfilter hooks. */
86048 selinux_nf_ip_exit();
86049 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
86050 index 13128f9..c23c736 100644
86051 --- a/security/selinux/include/xfrm.h
86052 +++ b/security/selinux/include/xfrm.h
86053 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
86054
86055 static inline void selinux_xfrm_notify_policyload(void)
86056 {
86057 - atomic_inc(&flow_cache_genid);
86058 + atomic_inc_unchecked(&flow_cache_genid);
86059 }
86060 #else
86061 static inline int selinux_xfrm_enabled(void)
86062 diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
86063 index ff17820..d68084c 100644
86064 --- a/security/selinux/ss/services.c
86065 +++ b/security/selinux/ss/services.c
86066 @@ -1715,6 +1715,8 @@ int security_load_policy(void *data, size_t len)
86067 int rc = 0;
86068 struct policy_file file = { data, len }, *fp = &file;
86069
86070 + pax_track_stack();
86071 +
86072 if (!ss_initialized) {
86073 avtab_cache_init();
86074 if (policydb_read(&policydb, fp)) {
86075 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
86076 index c33b6bb..b51f19e 100644
86077 --- a/security/smack/smack_lsm.c
86078 +++ b/security/smack/smack_lsm.c
86079 @@ -157,12 +157,12 @@ static int smack_ptrace_traceme(struct task_struct *ptp)
86080 *
86081 * Returns 0 on success, error code otherwise.
86082 */
86083 -static int smack_syslog(int type)
86084 +static int smack_syslog(int type, bool from_file)
86085 {
86086 int rc;
86087 char *sp = current_security();
86088
86089 - rc = cap_syslog(type);
86090 + rc = cap_syslog(type, from_file);
86091 if (rc != 0)
86092 return rc;
86093
86094 @@ -3073,7 +3073,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
86095 return 0;
86096 }
86097
86098 -struct security_operations smack_ops = {
86099 +struct security_operations smack_ops __read_only = {
86100 .name = "smack",
86101
86102 .ptrace_access_check = smack_ptrace_access_check,
86103 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
86104 index 9548a09..9a5f384 100644
86105 --- a/security/tomoyo/tomoyo.c
86106 +++ b/security/tomoyo/tomoyo.c
86107 @@ -275,7 +275,7 @@ static int tomoyo_dentry_open(struct file *f, const struct cred *cred)
86108 * tomoyo_security_ops is a "struct security_operations" which is used for
86109 * registering TOMOYO.
86110 */
86111 -static struct security_operations tomoyo_security_ops = {
86112 +static struct security_operations tomoyo_security_ops __read_only = {
86113 .name = "tomoyo",
86114 .cred_alloc_blank = tomoyo_cred_alloc_blank,
86115 .cred_prepare = tomoyo_cred_prepare,
86116 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
86117 index 84bb07d..c2ab6b6 100644
86118 --- a/sound/aoa/codecs/onyx.c
86119 +++ b/sound/aoa/codecs/onyx.c
86120 @@ -53,7 +53,7 @@ struct onyx {
86121 spdif_locked:1,
86122 analog_locked:1,
86123 original_mute:2;
86124 - int open_count;
86125 + local_t open_count;
86126 struct codec_info *codec_info;
86127
86128 /* mutex serializes concurrent access to the device
86129 @@ -752,7 +752,7 @@ static int onyx_open(struct codec_info_item *cii,
86130 struct onyx *onyx = cii->codec_data;
86131
86132 mutex_lock(&onyx->mutex);
86133 - onyx->open_count++;
86134 + local_inc(&onyx->open_count);
86135 mutex_unlock(&onyx->mutex);
86136
86137 return 0;
86138 @@ -764,8 +764,7 @@ static int onyx_close(struct codec_info_item *cii,
86139 struct onyx *onyx = cii->codec_data;
86140
86141 mutex_lock(&onyx->mutex);
86142 - onyx->open_count--;
86143 - if (!onyx->open_count)
86144 + if (local_dec_and_test(&onyx->open_count))
86145 onyx->spdif_locked = onyx->analog_locked = 0;
86146 mutex_unlock(&onyx->mutex);
86147
86148 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
86149 index ffd2025..df062c9 100644
86150 --- a/sound/aoa/codecs/onyx.h
86151 +++ b/sound/aoa/codecs/onyx.h
86152 @@ -11,6 +11,7 @@
86153 #include <linux/i2c.h>
86154 #include <asm/pmac_low_i2c.h>
86155 #include <asm/prom.h>
86156 +#include <asm/local.h>
86157
86158 /* PCM3052 register definitions */
86159
86160 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
86161 index d9c9635..bc0a5a2 100644
86162 --- a/sound/core/oss/pcm_oss.c
86163 +++ b/sound/core/oss/pcm_oss.c
86164 @@ -1395,7 +1395,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
86165 }
86166 } else {
86167 tmp = snd_pcm_oss_write2(substream,
86168 - (const char __force *)buf,
86169 + (const char __force_kernel *)buf,
86170 runtime->oss.period_bytes, 0);
86171 if (tmp <= 0)
86172 goto err;
86173 @@ -1483,7 +1483,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
86174 xfer += tmp;
86175 runtime->oss.buffer_used -= tmp;
86176 } else {
86177 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
86178 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
86179 runtime->oss.period_bytes, 0);
86180 if (tmp <= 0)
86181 goto err;
86182 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
86183 index 038232d..7dd9e5c 100644
86184 --- a/sound/core/pcm_compat.c
86185 +++ b/sound/core/pcm_compat.c
86186 @@ -30,7 +30,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
86187 int err;
86188
86189 fs = snd_enter_user();
86190 - err = snd_pcm_delay(substream, &delay);
86191 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
86192 snd_leave_user(fs);
86193 if (err < 0)
86194 return err;
86195 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
86196 index e6d2d97..4843949 100644
86197 --- a/sound/core/pcm_native.c
86198 +++ b/sound/core/pcm_native.c
86199 @@ -2747,11 +2747,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
86200 switch (substream->stream) {
86201 case SNDRV_PCM_STREAM_PLAYBACK:
86202 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
86203 - (void __user *)arg);
86204 + (void __force_user *)arg);
86205 break;
86206 case SNDRV_PCM_STREAM_CAPTURE:
86207 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
86208 - (void __user *)arg);
86209 + (void __force_user *)arg);
86210 break;
86211 default:
86212 result = -EINVAL;
86213 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
86214 index 1f99767..14636533 100644
86215 --- a/sound/core/seq/seq_device.c
86216 +++ b/sound/core/seq/seq_device.c
86217 @@ -63,7 +63,7 @@ struct ops_list {
86218 int argsize; /* argument size */
86219
86220 /* operators */
86221 - struct snd_seq_dev_ops ops;
86222 + struct snd_seq_dev_ops *ops;
86223
86224 /* registred devices */
86225 struct list_head dev_list; /* list of devices */
86226 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
86227
86228 mutex_lock(&ops->reg_mutex);
86229 /* copy driver operators */
86230 - ops->ops = *entry;
86231 + ops->ops = entry;
86232 ops->driver |= DRIVER_LOADED;
86233 ops->argsize = argsize;
86234
86235 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
86236 dev->name, ops->id, ops->argsize, dev->argsize);
86237 return -EINVAL;
86238 }
86239 - if (ops->ops.init_device(dev) >= 0) {
86240 + if (ops->ops->init_device(dev) >= 0) {
86241 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
86242 ops->num_init_devices++;
86243 } else {
86244 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
86245 dev->name, ops->id, ops->argsize, dev->argsize);
86246 return -EINVAL;
86247 }
86248 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
86249 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
86250 dev->status = SNDRV_SEQ_DEVICE_FREE;
86251 dev->driver_data = NULL;
86252 ops->num_init_devices--;
86253 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
86254 index 9284829..ac8e8b2 100644
86255 --- a/sound/drivers/mts64.c
86256 +++ b/sound/drivers/mts64.c
86257 @@ -27,6 +27,7 @@
86258 #include <sound/initval.h>
86259 #include <sound/rawmidi.h>
86260 #include <sound/control.h>
86261 +#include <asm/local.h>
86262
86263 #define CARD_NAME "Miditerminal 4140"
86264 #define DRIVER_NAME "MTS64"
86265 @@ -65,7 +66,7 @@ struct mts64 {
86266 struct pardevice *pardev;
86267 int pardev_claimed;
86268
86269 - int open_count;
86270 + local_t open_count;
86271 int current_midi_output_port;
86272 int current_midi_input_port;
86273 u8 mode[MTS64_NUM_INPUT_PORTS];
86274 @@ -695,7 +696,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
86275 {
86276 struct mts64 *mts = substream->rmidi->private_data;
86277
86278 - if (mts->open_count == 0) {
86279 + if (local_read(&mts->open_count) == 0) {
86280 /* We don't need a spinlock here, because this is just called
86281 if the device has not been opened before.
86282 So there aren't any IRQs from the device */
86283 @@ -703,7 +704,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
86284
86285 msleep(50);
86286 }
86287 - ++(mts->open_count);
86288 + local_inc(&mts->open_count);
86289
86290 return 0;
86291 }
86292 @@ -713,8 +714,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
86293 struct mts64 *mts = substream->rmidi->private_data;
86294 unsigned long flags;
86295
86296 - --(mts->open_count);
86297 - if (mts->open_count == 0) {
86298 + if (local_dec_return(&mts->open_count) == 0) {
86299 /* We need the spinlock_irqsave here because we can still
86300 have IRQs at this point */
86301 spin_lock_irqsave(&mts->lock, flags);
86302 @@ -723,8 +723,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
86303
86304 msleep(500);
86305
86306 - } else if (mts->open_count < 0)
86307 - mts->open_count = 0;
86308 + } else if (local_read(&mts->open_count) < 0)
86309 + local_set(&mts->open_count, 0);
86310
86311 return 0;
86312 }
86313 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
86314 index 01997f2..cbc1195 100644
86315 --- a/sound/drivers/opl4/opl4_lib.c
86316 +++ b/sound/drivers/opl4/opl4_lib.c
86317 @@ -27,7 +27,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
86318 MODULE_DESCRIPTION("OPL4 driver");
86319 MODULE_LICENSE("GPL");
86320
86321 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
86322 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
86323 {
86324 int timeout = 10;
86325 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
86326 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
86327 index 60158e2..0a0cc1a 100644
86328 --- a/sound/drivers/portman2x4.c
86329 +++ b/sound/drivers/portman2x4.c
86330 @@ -46,6 +46,7 @@
86331 #include <sound/initval.h>
86332 #include <sound/rawmidi.h>
86333 #include <sound/control.h>
86334 +#include <asm/local.h>
86335
86336 #define CARD_NAME "Portman 2x4"
86337 #define DRIVER_NAME "portman"
86338 @@ -83,7 +84,7 @@ struct portman {
86339 struct pardevice *pardev;
86340 int pardev_claimed;
86341
86342 - int open_count;
86343 + local_t open_count;
86344 int mode[PORTMAN_NUM_INPUT_PORTS];
86345 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
86346 };
86347 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
86348 index 02f79d2..8691d43 100644
86349 --- a/sound/isa/cmi8330.c
86350 +++ b/sound/isa/cmi8330.c
86351 @@ -173,7 +173,7 @@ struct snd_cmi8330 {
86352
86353 struct snd_pcm *pcm;
86354 struct snd_cmi8330_stream {
86355 - struct snd_pcm_ops ops;
86356 + snd_pcm_ops_no_const ops;
86357 snd_pcm_open_callback_t open;
86358 void *private_data; /* sb or wss */
86359 } streams[2];
86360 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
86361 index 733b014..56ce96f 100644
86362 --- a/sound/oss/sb_audio.c
86363 +++ b/sound/oss/sb_audio.c
86364 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
86365 buf16 = (signed short *)(localbuf + localoffs);
86366 while (c)
86367 {
86368 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
86369 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
86370 if (copy_from_user(lbuf8,
86371 userbuf+useroffs + p,
86372 locallen))
86373 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
86374 index 3136c88..28ad950 100644
86375 --- a/sound/oss/swarm_cs4297a.c
86376 +++ b/sound/oss/swarm_cs4297a.c
86377 @@ -2577,7 +2577,6 @@ static int __init cs4297a_init(void)
86378 {
86379 struct cs4297a_state *s;
86380 u32 pwr, id;
86381 - mm_segment_t fs;
86382 int rval;
86383 #ifndef CONFIG_BCM_CS4297A_CSWARM
86384 u64 cfg;
86385 @@ -2667,22 +2666,23 @@ static int __init cs4297a_init(void)
86386 if (!rval) {
86387 char *sb1250_duart_present;
86388
86389 +#if 0
86390 + mm_segment_t fs;
86391 fs = get_fs();
86392 set_fs(KERNEL_DS);
86393 -#if 0
86394 val = SOUND_MASK_LINE;
86395 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
86396 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
86397 val = initvol[i].vol;
86398 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
86399 }
86400 + set_fs(fs);
86401 // cs4297a_write_ac97(s, 0x18, 0x0808);
86402 #else
86403 // cs4297a_write_ac97(s, 0x5e, 0x180);
86404 cs4297a_write_ac97(s, 0x02, 0x0808);
86405 cs4297a_write_ac97(s, 0x18, 0x0808);
86406 #endif
86407 - set_fs(fs);
86408
86409 list_add(&s->list, &cs4297a_devs);
86410
86411 diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
86412 index 78288db..0406809 100644
86413 --- a/sound/pci/ac97/ac97_codec.c
86414 +++ b/sound/pci/ac97/ac97_codec.c
86415 @@ -1952,7 +1952,7 @@ static int snd_ac97_dev_disconnect(struct snd_device *device)
86416 }
86417
86418 /* build_ops to do nothing */
86419 -static struct snd_ac97_build_ops null_build_ops;
86420 +static const struct snd_ac97_build_ops null_build_ops;
86421
86422 #ifdef CONFIG_SND_AC97_POWER_SAVE
86423 static void do_update_power(struct work_struct *work)
86424 diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c
86425 index eeb2e23..82bf625 100644
86426 --- a/sound/pci/ac97/ac97_patch.c
86427 +++ b/sound/pci/ac97/ac97_patch.c
86428 @@ -371,7 +371,7 @@ static int patch_yamaha_ymf743_build_spdif(struct snd_ac97 *ac97)
86429 return 0;
86430 }
86431
86432 -static struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
86433 +static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
86434 .build_spdif = patch_yamaha_ymf743_build_spdif,
86435 .build_3d = patch_yamaha_ymf7x3_3d,
86436 };
86437 @@ -455,7 +455,7 @@ static int patch_yamaha_ymf753_post_spdif(struct snd_ac97 * ac97)
86438 return 0;
86439 }
86440
86441 -static struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
86442 +static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
86443 .build_3d = patch_yamaha_ymf7x3_3d,
86444 .build_post_spdif = patch_yamaha_ymf753_post_spdif
86445 };
86446 @@ -502,7 +502,7 @@ static int patch_wolfson_wm9703_specific(struct snd_ac97 * ac97)
86447 return 0;
86448 }
86449
86450 -static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
86451 +static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
86452 .build_specific = patch_wolfson_wm9703_specific,
86453 };
86454
86455 @@ -533,7 +533,7 @@ static int patch_wolfson_wm9704_specific(struct snd_ac97 * ac97)
86456 return 0;
86457 }
86458
86459 -static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
86460 +static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
86461 .build_specific = patch_wolfson_wm9704_specific,
86462 };
86463
86464 @@ -555,7 +555,7 @@ static int patch_wolfson_wm9705_specific(struct snd_ac97 * ac97)
86465 return 0;
86466 }
86467
86468 -static struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
86469 +static const struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
86470 .build_specific = patch_wolfson_wm9705_specific,
86471 };
86472
86473 @@ -692,7 +692,7 @@ static int patch_wolfson_wm9711_specific(struct snd_ac97 * ac97)
86474 return 0;
86475 }
86476
86477 -static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
86478 +static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
86479 .build_specific = patch_wolfson_wm9711_specific,
86480 };
86481
86482 @@ -886,7 +886,7 @@ static void patch_wolfson_wm9713_resume (struct snd_ac97 * ac97)
86483 }
86484 #endif
86485
86486 -static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
86487 +static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
86488 .build_specific = patch_wolfson_wm9713_specific,
86489 .build_3d = patch_wolfson_wm9713_3d,
86490 #ifdef CONFIG_PM
86491 @@ -991,7 +991,7 @@ static int patch_sigmatel_stac97xx_specific(struct snd_ac97 * ac97)
86492 return 0;
86493 }
86494
86495 -static struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
86496 +static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
86497 .build_3d = patch_sigmatel_stac9700_3d,
86498 .build_specific = patch_sigmatel_stac97xx_specific
86499 };
86500 @@ -1038,7 +1038,7 @@ static int patch_sigmatel_stac9708_specific(struct snd_ac97 *ac97)
86501 return patch_sigmatel_stac97xx_specific(ac97);
86502 }
86503
86504 -static struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
86505 +static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
86506 .build_3d = patch_sigmatel_stac9708_3d,
86507 .build_specific = patch_sigmatel_stac9708_specific
86508 };
86509 @@ -1267,7 +1267,7 @@ static int patch_sigmatel_stac9758_specific(struct snd_ac97 *ac97)
86510 return 0;
86511 }
86512
86513 -static struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
86514 +static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
86515 .build_3d = patch_sigmatel_stac9700_3d,
86516 .build_specific = patch_sigmatel_stac9758_specific
86517 };
86518 @@ -1342,7 +1342,7 @@ static int patch_cirrus_build_spdif(struct snd_ac97 * ac97)
86519 return 0;
86520 }
86521
86522 -static struct snd_ac97_build_ops patch_cirrus_ops = {
86523 +static const struct snd_ac97_build_ops patch_cirrus_ops = {
86524 .build_spdif = patch_cirrus_build_spdif
86525 };
86526
86527 @@ -1399,7 +1399,7 @@ static int patch_conexant_build_spdif(struct snd_ac97 * ac97)
86528 return 0;
86529 }
86530
86531 -static struct snd_ac97_build_ops patch_conexant_ops = {
86532 +static const struct snd_ac97_build_ops patch_conexant_ops = {
86533 .build_spdif = patch_conexant_build_spdif
86534 };
86535
86536 @@ -1575,7 +1575,7 @@ static void patch_ad1881_chained(struct snd_ac97 * ac97, int unchained_idx, int
86537 }
86538 }
86539
86540 -static struct snd_ac97_build_ops patch_ad1881_build_ops = {
86541 +static const struct snd_ac97_build_ops patch_ad1881_build_ops = {
86542 #ifdef CONFIG_PM
86543 .resume = ad18xx_resume
86544 #endif
86545 @@ -1662,7 +1662,7 @@ static int patch_ad1885_specific(struct snd_ac97 * ac97)
86546 return 0;
86547 }
86548
86549 -static struct snd_ac97_build_ops patch_ad1885_build_ops = {
86550 +static const struct snd_ac97_build_ops patch_ad1885_build_ops = {
86551 .build_specific = &patch_ad1885_specific,
86552 #ifdef CONFIG_PM
86553 .resume = ad18xx_resume
86554 @@ -1689,7 +1689,7 @@ static int patch_ad1886_specific(struct snd_ac97 * ac97)
86555 return 0;
86556 }
86557
86558 -static struct snd_ac97_build_ops patch_ad1886_build_ops = {
86559 +static const struct snd_ac97_build_ops patch_ad1886_build_ops = {
86560 .build_specific = &patch_ad1886_specific,
86561 #ifdef CONFIG_PM
86562 .resume = ad18xx_resume
86563 @@ -1896,7 +1896,7 @@ static int patch_ad1981a_specific(struct snd_ac97 * ac97)
86564 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
86565 }
86566
86567 -static struct snd_ac97_build_ops patch_ad1981a_build_ops = {
86568 +static const struct snd_ac97_build_ops patch_ad1981a_build_ops = {
86569 .build_post_spdif = patch_ad198x_post_spdif,
86570 .build_specific = patch_ad1981a_specific,
86571 #ifdef CONFIG_PM
86572 @@ -1952,7 +1952,7 @@ static int patch_ad1981b_specific(struct snd_ac97 *ac97)
86573 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
86574 }
86575
86576 -static struct snd_ac97_build_ops patch_ad1981b_build_ops = {
86577 +static const struct snd_ac97_build_ops patch_ad1981b_build_ops = {
86578 .build_post_spdif = patch_ad198x_post_spdif,
86579 .build_specific = patch_ad1981b_specific,
86580 #ifdef CONFIG_PM
86581 @@ -2091,7 +2091,7 @@ static int patch_ad1888_specific(struct snd_ac97 *ac97)
86582 return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls));
86583 }
86584
86585 -static struct snd_ac97_build_ops patch_ad1888_build_ops = {
86586 +static const struct snd_ac97_build_ops patch_ad1888_build_ops = {
86587 .build_post_spdif = patch_ad198x_post_spdif,
86588 .build_specific = patch_ad1888_specific,
86589 #ifdef CONFIG_PM
86590 @@ -2140,7 +2140,7 @@ static int patch_ad1980_specific(struct snd_ac97 *ac97)
86591 return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
86592 }
86593
86594 -static struct snd_ac97_build_ops patch_ad1980_build_ops = {
86595 +static const struct snd_ac97_build_ops patch_ad1980_build_ops = {
86596 .build_post_spdif = patch_ad198x_post_spdif,
86597 .build_specific = patch_ad1980_specific,
86598 #ifdef CONFIG_PM
86599 @@ -2255,7 +2255,7 @@ static int patch_ad1985_specific(struct snd_ac97 *ac97)
86600 ARRAY_SIZE(snd_ac97_ad1985_controls));
86601 }
86602
86603 -static struct snd_ac97_build_ops patch_ad1985_build_ops = {
86604 +static const struct snd_ac97_build_ops patch_ad1985_build_ops = {
86605 .build_post_spdif = patch_ad198x_post_spdif,
86606 .build_specific = patch_ad1985_specific,
86607 #ifdef CONFIG_PM
86608 @@ -2547,7 +2547,7 @@ static int patch_ad1986_specific(struct snd_ac97 *ac97)
86609 ARRAY_SIZE(snd_ac97_ad1985_controls));
86610 }
86611
86612 -static struct snd_ac97_build_ops patch_ad1986_build_ops = {
86613 +static const struct snd_ac97_build_ops patch_ad1986_build_ops = {
86614 .build_post_spdif = patch_ad198x_post_spdif,
86615 .build_specific = patch_ad1986_specific,
86616 #ifdef CONFIG_PM
86617 @@ -2652,7 +2652,7 @@ static int patch_alc650_specific(struct snd_ac97 * ac97)
86618 return 0;
86619 }
86620
86621 -static struct snd_ac97_build_ops patch_alc650_ops = {
86622 +static const struct snd_ac97_build_ops patch_alc650_ops = {
86623 .build_specific = patch_alc650_specific,
86624 .update_jacks = alc650_update_jacks
86625 };
86626 @@ -2804,7 +2804,7 @@ static int patch_alc655_specific(struct snd_ac97 * ac97)
86627 return 0;
86628 }
86629
86630 -static struct snd_ac97_build_ops patch_alc655_ops = {
86631 +static const struct snd_ac97_build_ops patch_alc655_ops = {
86632 .build_specific = patch_alc655_specific,
86633 .update_jacks = alc655_update_jacks
86634 };
86635 @@ -2916,7 +2916,7 @@ static int patch_alc850_specific(struct snd_ac97 *ac97)
86636 return 0;
86637 }
86638
86639 -static struct snd_ac97_build_ops patch_alc850_ops = {
86640 +static const struct snd_ac97_build_ops patch_alc850_ops = {
86641 .build_specific = patch_alc850_specific,
86642 .update_jacks = alc850_update_jacks
86643 };
86644 @@ -2978,7 +2978,7 @@ static int patch_cm9738_specific(struct snd_ac97 * ac97)
86645 return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls));
86646 }
86647
86648 -static struct snd_ac97_build_ops patch_cm9738_ops = {
86649 +static const struct snd_ac97_build_ops patch_cm9738_ops = {
86650 .build_specific = patch_cm9738_specific,
86651 .update_jacks = cm9738_update_jacks
86652 };
86653 @@ -3069,7 +3069,7 @@ static int patch_cm9739_post_spdif(struct snd_ac97 * ac97)
86654 return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif));
86655 }
86656
86657 -static struct snd_ac97_build_ops patch_cm9739_ops = {
86658 +static const struct snd_ac97_build_ops patch_cm9739_ops = {
86659 .build_specific = patch_cm9739_specific,
86660 .build_post_spdif = patch_cm9739_post_spdif,
86661 .update_jacks = cm9739_update_jacks
86662 @@ -3243,7 +3243,7 @@ static int patch_cm9761_specific(struct snd_ac97 * ac97)
86663 return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls));
86664 }
86665
86666 -static struct snd_ac97_build_ops patch_cm9761_ops = {
86667 +static const struct snd_ac97_build_ops patch_cm9761_ops = {
86668 .build_specific = patch_cm9761_specific,
86669 .build_post_spdif = patch_cm9761_post_spdif,
86670 .update_jacks = cm9761_update_jacks
86671 @@ -3339,7 +3339,7 @@ static int patch_cm9780_specific(struct snd_ac97 *ac97)
86672 return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls));
86673 }
86674
86675 -static struct snd_ac97_build_ops patch_cm9780_ops = {
86676 +static const struct snd_ac97_build_ops patch_cm9780_ops = {
86677 .build_specific = patch_cm9780_specific,
86678 .build_post_spdif = patch_cm9761_post_spdif /* identical with CM9761 */
86679 };
86680 @@ -3459,7 +3459,7 @@ static int patch_vt1616_specific(struct snd_ac97 * ac97)
86681 return 0;
86682 }
86683
86684 -static struct snd_ac97_build_ops patch_vt1616_ops = {
86685 +static const struct snd_ac97_build_ops patch_vt1616_ops = {
86686 .build_specific = patch_vt1616_specific
86687 };
86688
86689 @@ -3813,7 +3813,7 @@ static int patch_it2646_specific(struct snd_ac97 * ac97)
86690 return 0;
86691 }
86692
86693 -static struct snd_ac97_build_ops patch_it2646_ops = {
86694 +static const struct snd_ac97_build_ops patch_it2646_ops = {
86695 .build_specific = patch_it2646_specific,
86696 .update_jacks = it2646_update_jacks
86697 };
86698 @@ -3847,7 +3847,7 @@ static int patch_si3036_specific(struct snd_ac97 * ac97)
86699 return 0;
86700 }
86701
86702 -static struct snd_ac97_build_ops patch_si3036_ops = {
86703 +static const struct snd_ac97_build_ops patch_si3036_ops = {
86704 .build_specific = patch_si3036_specific,
86705 };
86706
86707 @@ -3914,7 +3914,7 @@ static int patch_ucb1400_specific(struct snd_ac97 * ac97)
86708 return 0;
86709 }
86710
86711 -static struct snd_ac97_build_ops patch_ucb1400_ops = {
86712 +static const struct snd_ac97_build_ops patch_ucb1400_ops = {
86713 .build_specific = patch_ucb1400_specific,
86714 };
86715
86716 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
86717 index 99552fb..4dcc2c5 100644
86718 --- a/sound/pci/hda/hda_codec.h
86719 +++ b/sound/pci/hda/hda_codec.h
86720 @@ -580,7 +580,7 @@ struct hda_bus_ops {
86721 /* notify power-up/down from codec to controller */
86722 void (*pm_notify)(struct hda_bus *bus);
86723 #endif
86724 -};
86725 +} __no_const;
86726
86727 /* template to pass to the bus constructor */
86728 struct hda_bus_template {
86729 @@ -675,6 +675,7 @@ struct hda_codec_ops {
86730 int (*check_power_status)(struct hda_codec *codec, hda_nid_t nid);
86731 #endif
86732 };
86733 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
86734
86735 /* record for amp information cache */
86736 struct hda_cache_head {
86737 @@ -705,7 +706,7 @@ struct hda_pcm_ops {
86738 struct snd_pcm_substream *substream);
86739 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
86740 struct snd_pcm_substream *substream);
86741 -};
86742 +} __no_const;
86743
86744 /* PCM information for each substream */
86745 struct hda_pcm_stream {
86746 @@ -760,7 +761,7 @@ struct hda_codec {
86747 const char *modelname; /* model name for preset */
86748
86749 /* set by patch */
86750 - struct hda_codec_ops patch_ops;
86751 + hda_codec_ops_no_const patch_ops;
86752
86753 /* PCM to create, set by patch_ops.build_pcms callback */
86754 unsigned int num_pcms;
86755 diff --git a/sound/pci/hda/patch_atihdmi.c b/sound/pci/hda/patch_atihdmi.c
86756 index fb684f0..2b11cea 100644
86757 --- a/sound/pci/hda/patch_atihdmi.c
86758 +++ b/sound/pci/hda/patch_atihdmi.c
86759 @@ -177,7 +177,7 @@ static int patch_atihdmi(struct hda_codec *codec)
86760 */
86761 spec->multiout.dig_out_nid = CVT_NID;
86762
86763 - codec->patch_ops = atihdmi_patch_ops;
86764 + memcpy((void *)&codec->patch_ops, &atihdmi_patch_ops, sizeof(atihdmi_patch_ops));
86765
86766 return 0;
86767 }
86768 diff --git a/sound/pci/hda/patch_intelhdmi.c b/sound/pci/hda/patch_intelhdmi.c
86769 index 7c23016..c5bfdd7 100644
86770 --- a/sound/pci/hda/patch_intelhdmi.c
86771 +++ b/sound/pci/hda/patch_intelhdmi.c
86772 @@ -511,10 +511,10 @@ static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
86773 cp_ready);
86774
86775 /* TODO */
86776 - if (cp_state)
86777 - ;
86778 - if (cp_ready)
86779 - ;
86780 + if (cp_state) {
86781 + }
86782 + if (cp_ready) {
86783 + }
86784 }
86785
86786
86787 @@ -656,7 +656,7 @@ static int do_patch_intel_hdmi(struct hda_codec *codec)
86788 spec->multiout.dig_out_nid = cvt_nid;
86789
86790 codec->spec = spec;
86791 - codec->patch_ops = intel_hdmi_patch_ops;
86792 + memcpy((void *)&codec->patch_ops, &intel_hdmi_patch_ops, sizeof(intel_hdmi_patch_ops));
86793
86794 snd_hda_eld_proc_new(codec, &spec->sink_eld);
86795
86796 diff --git a/sound/pci/hda/patch_nvhdmi.c b/sound/pci/hda/patch_nvhdmi.c
86797 index 6afdab0..68ed352 100644
86798 --- a/sound/pci/hda/patch_nvhdmi.c
86799 +++ b/sound/pci/hda/patch_nvhdmi.c
86800 @@ -367,7 +367,7 @@ static int patch_nvhdmi_8ch(struct hda_codec *codec)
86801 spec->multiout.max_channels = 8;
86802 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
86803
86804 - codec->patch_ops = nvhdmi_patch_ops_8ch;
86805 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_8ch, sizeof(nvhdmi_patch_ops_8ch));
86806
86807 return 0;
86808 }
86809 @@ -386,7 +386,7 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec)
86810 spec->multiout.max_channels = 2;
86811 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
86812
86813 - codec->patch_ops = nvhdmi_patch_ops_2ch;
86814 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_2ch, sizeof(nvhdmi_patch_ops_2ch));
86815
86816 return 0;
86817 }
86818 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
86819 index 2fcd70d..a143eaf 100644
86820 --- a/sound/pci/hda/patch_sigmatel.c
86821 +++ b/sound/pci/hda/patch_sigmatel.c
86822 @@ -5220,7 +5220,7 @@ again:
86823 snd_hda_codec_write_cache(codec, nid, 0,
86824 AC_VERB_SET_CONNECT_SEL, num_dacs);
86825
86826 - codec->patch_ops = stac92xx_patch_ops;
86827 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
86828
86829 codec->proc_widget_hook = stac92hd_proc_hook;
86830
86831 @@ -5294,7 +5294,7 @@ static int patch_stac92hd71bxx(struct hda_codec *codec)
86832 return -ENOMEM;
86833
86834 codec->spec = spec;
86835 - codec->patch_ops = stac92xx_patch_ops;
86836 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
86837 spec->num_pins = STAC92HD71BXX_NUM_PINS;
86838 switch (codec->vendor_id) {
86839 case 0x111d76b6:
86840 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
86841 index d063149..01599a4 100644
86842 --- a/sound/pci/ice1712/ice1712.h
86843 +++ b/sound/pci/ice1712/ice1712.h
86844 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
86845 unsigned int mask_flags; /* total mask bits */
86846 struct snd_akm4xxx_ops {
86847 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
86848 - } ops;
86849 + } __no_const ops;
86850 };
86851
86852 struct snd_ice1712_spdif {
86853 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
86854 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
86855 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
86856 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
86857 - } ops;
86858 + } __no_const ops;
86859 };
86860
86861
86862 diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c
86863 index 9e7d12e..3e3bc64 100644
86864 --- a/sound/pci/intel8x0m.c
86865 +++ b/sound/pci/intel8x0m.c
86866 @@ -1264,7 +1264,7 @@ static struct shortname_table {
86867 { 0x5455, "ALi M5455" },
86868 { 0x746d, "AMD AMD8111" },
86869 #endif
86870 - { 0 },
86871 + { 0, },
86872 };
86873
86874 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
86875 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
86876 index 5518371..45cf7ac 100644
86877 --- a/sound/pci/ymfpci/ymfpci_main.c
86878 +++ b/sound/pci/ymfpci/ymfpci_main.c
86879 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
86880 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
86881 break;
86882 }
86883 - if (atomic_read(&chip->interrupt_sleep_count)) {
86884 - atomic_set(&chip->interrupt_sleep_count, 0);
86885 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
86886 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
86887 wake_up(&chip->interrupt_sleep);
86888 }
86889 __end:
86890 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
86891 continue;
86892 init_waitqueue_entry(&wait, current);
86893 add_wait_queue(&chip->interrupt_sleep, &wait);
86894 - atomic_inc(&chip->interrupt_sleep_count);
86895 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
86896 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
86897 remove_wait_queue(&chip->interrupt_sleep, &wait);
86898 }
86899 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
86900 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
86901 spin_unlock(&chip->reg_lock);
86902
86903 - if (atomic_read(&chip->interrupt_sleep_count)) {
86904 - atomic_set(&chip->interrupt_sleep_count, 0);
86905 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
86906 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
86907 wake_up(&chip->interrupt_sleep);
86908 }
86909 }
86910 @@ -2369,7 +2369,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
86911 spin_lock_init(&chip->reg_lock);
86912 spin_lock_init(&chip->voice_lock);
86913 init_waitqueue_head(&chip->interrupt_sleep);
86914 - atomic_set(&chip->interrupt_sleep_count, 0);
86915 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
86916 chip->card = card;
86917 chip->pci = pci;
86918 chip->irq = -1;
86919 diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
86920 index 0a1b2f6..776bb19 100644
86921 --- a/sound/soc/soc-core.c
86922 +++ b/sound/soc/soc-core.c
86923 @@ -609,7 +609,7 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
86924 }
86925
86926 /* ASoC PCM operations */
86927 -static struct snd_pcm_ops soc_pcm_ops = {
86928 +static snd_pcm_ops_no_const soc_pcm_ops = {
86929 .open = soc_pcm_open,
86930 .close = soc_codec_close,
86931 .hw_params = soc_pcm_hw_params,
86932 diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
86933 index 79633ea..9732e90 100644
86934 --- a/sound/usb/usbaudio.c
86935 +++ b/sound/usb/usbaudio.c
86936 @@ -963,12 +963,12 @@ static int snd_usb_pcm_playback_trigger(struct snd_pcm_substream *substream,
86937 switch (cmd) {
86938 case SNDRV_PCM_TRIGGER_START:
86939 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
86940 - subs->ops.prepare = prepare_playback_urb;
86941 + *(void **)&subs->ops.prepare = prepare_playback_urb;
86942 return 0;
86943 case SNDRV_PCM_TRIGGER_STOP:
86944 return deactivate_urbs(subs, 0, 0);
86945 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
86946 - subs->ops.prepare = prepare_nodata_playback_urb;
86947 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
86948 return 0;
86949 default:
86950 return -EINVAL;
86951 @@ -985,15 +985,15 @@ static int snd_usb_pcm_capture_trigger(struct snd_pcm_substream *substream,
86952
86953 switch (cmd) {
86954 case SNDRV_PCM_TRIGGER_START:
86955 - subs->ops.retire = retire_capture_urb;
86956 + *(void **)&subs->ops.retire = retire_capture_urb;
86957 return start_urbs(subs, substream->runtime);
86958 case SNDRV_PCM_TRIGGER_STOP:
86959 return deactivate_urbs(subs, 0, 0);
86960 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
86961 - subs->ops.retire = retire_paused_capture_urb;
86962 + *(void **)&subs->ops.retire = retire_paused_capture_urb;
86963 return 0;
86964 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
86965 - subs->ops.retire = retire_capture_urb;
86966 + *(void **)&subs->ops.retire = retire_capture_urb;
86967 return 0;
86968 default:
86969 return -EINVAL;
86970 @@ -1542,7 +1542,7 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
86971 /* for playback, submit the URBs now; otherwise, the first hwptr_done
86972 * updates for all URBs would happen at the same time when starting */
86973 if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) {
86974 - subs->ops.prepare = prepare_nodata_playback_urb;
86975 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
86976 return start_urbs(subs, runtime);
86977 } else
86978 return 0;
86979 @@ -2228,14 +2228,14 @@ static void init_substream(struct snd_usb_stream *as, int stream, struct audiofo
86980 subs->direction = stream;
86981 subs->dev = as->chip->dev;
86982 if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) {
86983 - subs->ops = audio_urb_ops[stream];
86984 + memcpy((void *)&subs->ops, &audio_urb_ops[stream], sizeof(subs->ops));
86985 } else {
86986 - subs->ops = audio_urb_ops_high_speed[stream];
86987 + memcpy((void *)&subs->ops, &audio_urb_ops_high_speed[stream], sizeof(subs->ops));
86988 switch (as->chip->usb_id) {
86989 case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */
86990 case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */
86991 case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */
86992 - subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
86993 + *(void **)&subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
86994 break;
86995 }
86996 }
86997 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
86998 new file mode 100644
86999 index 0000000..29b6b75
87000 --- /dev/null
87001 +++ b/tools/gcc/Makefile
87002 @@ -0,0 +1,21 @@
87003 +#CC := gcc
87004 +#PLUGIN_SOURCE_FILES := pax_plugin.c
87005 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
87006 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
87007 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
87008 +
87009 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu99
87010 +
87011 +hostlibs-y := constify_plugin.so
87012 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
87013 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
87014 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
87015 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
87016 +
87017 +always := $(hostlibs-y)
87018 +
87019 +constify_plugin-objs := constify_plugin.o
87020 +stackleak_plugin-objs := stackleak_plugin.o
87021 +kallocstat_plugin-objs := kallocstat_plugin.o
87022 +kernexec_plugin-objs := kernexec_plugin.o
87023 +checker_plugin-objs := checker_plugin.o
87024 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
87025 new file mode 100644
87026 index 0000000..d41b5af
87027 --- /dev/null
87028 +++ b/tools/gcc/checker_plugin.c
87029 @@ -0,0 +1,171 @@
87030 +/*
87031 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
87032 + * Licensed under the GPL v2
87033 + *
87034 + * Note: the choice of the license means that the compilation process is
87035 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
87036 + * but for the kernel it doesn't matter since it doesn't link against
87037 + * any of the gcc libraries
87038 + *
87039 + * gcc plugin to implement various sparse (source code checker) features
87040 + *
87041 + * TODO:
87042 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
87043 + *
87044 + * BUGS:
87045 + * - none known
87046 + */
87047 +#include "gcc-plugin.h"
87048 +#include "config.h"
87049 +#include "system.h"
87050 +#include "coretypes.h"
87051 +#include "tree.h"
87052 +#include "tree-pass.h"
87053 +#include "flags.h"
87054 +#include "intl.h"
87055 +#include "toplev.h"
87056 +#include "plugin.h"
87057 +//#include "expr.h" where are you...
87058 +#include "diagnostic.h"
87059 +#include "plugin-version.h"
87060 +#include "tm.h"
87061 +#include "function.h"
87062 +#include "basic-block.h"
87063 +#include "gimple.h"
87064 +#include "rtl.h"
87065 +#include "emit-rtl.h"
87066 +#include "tree-flow.h"
87067 +#include "target.h"
87068 +
87069 +extern void c_register_addr_space (const char *str, addr_space_t as);
87070 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
87071 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
87072 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
87073 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
87074 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
87075 +
87076 +extern void print_gimple_stmt(FILE *, gimple, int, int);
87077 +extern rtx emit_move_insn(rtx x, rtx y);
87078 +
87079 +int plugin_is_GPL_compatible;
87080 +
87081 +static struct plugin_info checker_plugin_info = {
87082 + .version = "201111150100",
87083 +};
87084 +
87085 +#define ADDR_SPACE_KERNEL 0
87086 +#define ADDR_SPACE_FORCE_KERNEL 1
87087 +#define ADDR_SPACE_USER 2
87088 +#define ADDR_SPACE_FORCE_USER 3
87089 +#define ADDR_SPACE_IOMEM 0
87090 +#define ADDR_SPACE_FORCE_IOMEM 0
87091 +#define ADDR_SPACE_PERCPU 0
87092 +#define ADDR_SPACE_FORCE_PERCPU 0
87093 +#define ADDR_SPACE_RCU 0
87094 +#define ADDR_SPACE_FORCE_RCU 0
87095 +
87096 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
87097 +{
87098 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
87099 +}
87100 +
87101 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
87102 +{
87103 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
87104 +}
87105 +
87106 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
87107 +{
87108 + return default_addr_space_valid_pointer_mode(mode, as);
87109 +}
87110 +
87111 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
87112 +{
87113 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
87114 +}
87115 +
87116 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
87117 +{
87118 + return default_addr_space_legitimize_address(x, oldx, mode, as);
87119 +}
87120 +
87121 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
87122 +{
87123 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
87124 + return true;
87125 +
87126 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
87127 + return true;
87128 +
87129 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
87130 + return true;
87131 +
87132 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
87133 + return true;
87134 +
87135 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
87136 + return true;
87137 +
87138 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
87139 + return true;
87140 +
87141 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
87142 + return true;
87143 +
87144 + return subset == superset;
87145 +}
87146 +
87147 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
87148 +{
87149 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
87150 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
87151 +
87152 + return op;
87153 +}
87154 +
87155 +static void register_checker_address_spaces(void *event_data, void *data)
87156 +{
87157 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
87158 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
87159 + c_register_addr_space("__user", ADDR_SPACE_USER);
87160 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
87161 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
87162 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
87163 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
87164 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
87165 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
87166 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
87167 +
87168 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
87169 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
87170 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
87171 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
87172 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
87173 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
87174 + targetm.addr_space.convert = checker_addr_space_convert;
87175 +}
87176 +
87177 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
87178 +{
87179 + const char * const plugin_name = plugin_info->base_name;
87180 + const int argc = plugin_info->argc;
87181 + const struct plugin_argument * const argv = plugin_info->argv;
87182 + int i;
87183 +
87184 + if (!plugin_default_version_check(version, &gcc_version)) {
87185 + error(G_("incompatible gcc/plugin versions"));
87186 + return 1;
87187 + }
87188 +
87189 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
87190 +
87191 + for (i = 0; i < argc; ++i)
87192 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
87193 +
87194 + if (TARGET_64BIT == 0)
87195 + return 0;
87196 +
87197 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
87198 +
87199 + return 0;
87200 +}
87201 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
87202 new file mode 100644
87203 index 0000000..704a564
87204 --- /dev/null
87205 +++ b/tools/gcc/constify_plugin.c
87206 @@ -0,0 +1,303 @@
87207 +/*
87208 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
87209 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
87210 + * Licensed under the GPL v2, or (at your option) v3
87211 + *
87212 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
87213 + *
87214 + * Homepage:
87215 + * http://www.grsecurity.net/~ephox/const_plugin/
87216 + *
87217 + * Usage:
87218 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
87219 + * $ gcc -fplugin=constify_plugin.so test.c -O2
87220 + */
87221 +
87222 +#include "gcc-plugin.h"
87223 +#include "config.h"
87224 +#include "system.h"
87225 +#include "coretypes.h"
87226 +#include "tree.h"
87227 +#include "tree-pass.h"
87228 +#include "flags.h"
87229 +#include "intl.h"
87230 +#include "toplev.h"
87231 +#include "plugin.h"
87232 +#include "diagnostic.h"
87233 +#include "plugin-version.h"
87234 +#include "tm.h"
87235 +#include "function.h"
87236 +#include "basic-block.h"
87237 +#include "gimple.h"
87238 +#include "rtl.h"
87239 +#include "emit-rtl.h"
87240 +#include "tree-flow.h"
87241 +
87242 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
87243 +
87244 +int plugin_is_GPL_compatible;
87245 +
87246 +static struct plugin_info const_plugin_info = {
87247 + .version = "201111150100",
87248 + .help = "no-constify\tturn off constification\n",
87249 +};
87250 +
87251 +static void constify_type(tree type);
87252 +static bool walk_struct(tree node);
87253 +
87254 +static tree deconstify_type(tree old_type)
87255 +{
87256 + tree new_type, field;
87257 +
87258 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
87259 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
87260 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
87261 + DECL_FIELD_CONTEXT(field) = new_type;
87262 + TYPE_READONLY(new_type) = 0;
87263 + C_TYPE_FIELDS_READONLY(new_type) = 0;
87264 + return new_type;
87265 +}
87266 +
87267 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
87268 +{
87269 + tree type;
87270 +
87271 + *no_add_attrs = true;
87272 + if (TREE_CODE(*node) == FUNCTION_DECL) {
87273 + error("%qE attribute does not apply to functions", name);
87274 + return NULL_TREE;
87275 + }
87276 +
87277 + if (TREE_CODE(*node) == VAR_DECL) {
87278 + error("%qE attribute does not apply to variables", name);
87279 + return NULL_TREE;
87280 + }
87281 +
87282 + if (TYPE_P(*node)) {
87283 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
87284 + *no_add_attrs = false;
87285 + else
87286 + error("%qE attribute applies to struct and union types only", name);
87287 + return NULL_TREE;
87288 + }
87289 +
87290 + type = TREE_TYPE(*node);
87291 +
87292 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
87293 + error("%qE attribute applies to struct and union types only", name);
87294 + return NULL_TREE;
87295 + }
87296 +
87297 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
87298 + error("%qE attribute is already applied to the type", name);
87299 + return NULL_TREE;
87300 + }
87301 +
87302 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
87303 + error("%qE attribute used on type that is not constified", name);
87304 + return NULL_TREE;
87305 + }
87306 +
87307 + if (TREE_CODE(*node) == TYPE_DECL) {
87308 + TREE_TYPE(*node) = deconstify_type(type);
87309 + TREE_READONLY(*node) = 0;
87310 + return NULL_TREE;
87311 + }
87312 +
87313 + return NULL_TREE;
87314 +}
87315 +
87316 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
87317 +{
87318 + *no_add_attrs = true;
87319 + if (!TYPE_P(*node)) {
87320 + error("%qE attribute applies to types only", name);
87321 + return NULL_TREE;
87322 + }
87323 +
87324 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
87325 + error("%qE attribute applies to struct and union types only", name);
87326 + return NULL_TREE;
87327 + }
87328 +
87329 + *no_add_attrs = false;
87330 + constify_type(*node);
87331 + return NULL_TREE;
87332 +}
87333 +
87334 +static struct attribute_spec no_const_attr = {
87335 + .name = "no_const",
87336 + .min_length = 0,
87337 + .max_length = 0,
87338 + .decl_required = false,
87339 + .type_required = false,
87340 + .function_type_required = false,
87341 + .handler = handle_no_const_attribute,
87342 +#if BUILDING_GCC_VERSION >= 4007
87343 + .affects_type_identity = true
87344 +#endif
87345 +};
87346 +
87347 +static struct attribute_spec do_const_attr = {
87348 + .name = "do_const",
87349 + .min_length = 0,
87350 + .max_length = 0,
87351 + .decl_required = false,
87352 + .type_required = false,
87353 + .function_type_required = false,
87354 + .handler = handle_do_const_attribute,
87355 +#if BUILDING_GCC_VERSION >= 4007
87356 + .affects_type_identity = true
87357 +#endif
87358 +};
87359 +
87360 +static void register_attributes(void *event_data, void *data)
87361 +{
87362 + register_attribute(&no_const_attr);
87363 + register_attribute(&do_const_attr);
87364 +}
87365 +
87366 +static void constify_type(tree type)
87367 +{
87368 + TYPE_READONLY(type) = 1;
87369 + C_TYPE_FIELDS_READONLY(type) = 1;
87370 +}
87371 +
87372 +static bool is_fptr(tree field)
87373 +{
87374 + tree ptr = TREE_TYPE(field);
87375 +
87376 + if (TREE_CODE(ptr) != POINTER_TYPE)
87377 + return false;
87378 +
87379 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
87380 +}
87381 +
87382 +static bool walk_struct(tree node)
87383 +{
87384 + tree field;
87385 +
87386 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
87387 + return false;
87388 +
87389 + if (TYPE_FIELDS(node) == NULL_TREE)
87390 + return false;
87391 +
87392 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
87393 + tree type = TREE_TYPE(field);
87394 + enum tree_code code = TREE_CODE(type);
87395 + if (code == RECORD_TYPE || code == UNION_TYPE) {
87396 + if (!(walk_struct(type)))
87397 + return false;
87398 + } else if (!is_fptr(field) && !TREE_READONLY(field))
87399 + return false;
87400 + }
87401 + return true;
87402 +}
87403 +
87404 +static void finish_type(void *event_data, void *data)
87405 +{
87406 + tree type = (tree)event_data;
87407 +
87408 + if (type == NULL_TREE)
87409 + return;
87410 +
87411 + if (TYPE_READONLY(type))
87412 + return;
87413 +
87414 + if (walk_struct(type))
87415 + constify_type(type);
87416 +}
87417 +
87418 +static unsigned int check_local_variables(void);
87419 +
87420 +struct gimple_opt_pass pass_local_variable = {
87421 + {
87422 + .type = GIMPLE_PASS,
87423 + .name = "check_local_variables",
87424 + .gate = NULL,
87425 + .execute = check_local_variables,
87426 + .sub = NULL,
87427 + .next = NULL,
87428 + .static_pass_number = 0,
87429 + .tv_id = TV_NONE,
87430 + .properties_required = 0,
87431 + .properties_provided = 0,
87432 + .properties_destroyed = 0,
87433 + .todo_flags_start = 0,
87434 + .todo_flags_finish = 0
87435 + }
87436 +};
87437 +
87438 +static unsigned int check_local_variables(void)
87439 +{
87440 + tree var;
87441 + referenced_var_iterator rvi;
87442 +
87443 +#if BUILDING_GCC_VERSION == 4005
87444 + FOR_EACH_REFERENCED_VAR(var, rvi) {
87445 +#else
87446 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
87447 +#endif
87448 + tree type = TREE_TYPE(var);
87449 +
87450 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
87451 + continue;
87452 +
87453 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
87454 + continue;
87455 +
87456 + if (!TYPE_READONLY(type))
87457 + continue;
87458 +
87459 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
87460 +// continue;
87461 +
87462 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
87463 +// continue;
87464 +
87465 + if (walk_struct(type)) {
87466 + error("constified variable %qE cannot be local", var);
87467 + return 1;
87468 + }
87469 + }
87470 + return 0;
87471 +}
87472 +
87473 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
87474 +{
87475 + const char * const plugin_name = plugin_info->base_name;
87476 + const int argc = plugin_info->argc;
87477 + const struct plugin_argument * const argv = plugin_info->argv;
87478 + int i;
87479 + bool constify = true;
87480 +
87481 + struct register_pass_info local_variable_pass_info = {
87482 + .pass = &pass_local_variable.pass,
87483 + .reference_pass_name = "*referenced_vars",
87484 + .ref_pass_instance_number = 0,
87485 + .pos_op = PASS_POS_INSERT_AFTER
87486 + };
87487 +
87488 + if (!plugin_default_version_check(version, &gcc_version)) {
87489 + error(G_("incompatible gcc/plugin versions"));
87490 + return 1;
87491 + }
87492 +
87493 + for (i = 0; i < argc; ++i) {
87494 + if (!(strcmp(argv[i].key, "no-constify"))) {
87495 + constify = false;
87496 + continue;
87497 + }
87498 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
87499 + }
87500 +
87501 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
87502 + if (constify) {
87503 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
87504 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
87505 + }
87506 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
87507 +
87508 + return 0;
87509 +}
87510 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
87511 new file mode 100644
87512 index 0000000..a5eabce
87513 --- /dev/null
87514 +++ b/tools/gcc/kallocstat_plugin.c
87515 @@ -0,0 +1,167 @@
87516 +/*
87517 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
87518 + * Licensed under the GPL v2
87519 + *
87520 + * Note: the choice of the license means that the compilation process is
87521 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
87522 + * but for the kernel it doesn't matter since it doesn't link against
87523 + * any of the gcc libraries
87524 + *
87525 + * gcc plugin to find the distribution of k*alloc sizes
87526 + *
87527 + * TODO:
87528 + *
87529 + * BUGS:
87530 + * - none known
87531 + */
87532 +#include "gcc-plugin.h"
87533 +#include "config.h"
87534 +#include "system.h"
87535 +#include "coretypes.h"
87536 +#include "tree.h"
87537 +#include "tree-pass.h"
87538 +#include "flags.h"
87539 +#include "intl.h"
87540 +#include "toplev.h"
87541 +#include "plugin.h"
87542 +//#include "expr.h" where are you...
87543 +#include "diagnostic.h"
87544 +#include "plugin-version.h"
87545 +#include "tm.h"
87546 +#include "function.h"
87547 +#include "basic-block.h"
87548 +#include "gimple.h"
87549 +#include "rtl.h"
87550 +#include "emit-rtl.h"
87551 +
87552 +extern void print_gimple_stmt(FILE *, gimple, int, int);
87553 +
87554 +int plugin_is_GPL_compatible;
87555 +
87556 +static const char * const kalloc_functions[] = {
87557 + "__kmalloc",
87558 + "kmalloc",
87559 + "kmalloc_large",
87560 + "kmalloc_node",
87561 + "kmalloc_order",
87562 + "kmalloc_order_trace",
87563 + "kmalloc_slab",
87564 + "kzalloc",
87565 + "kzalloc_node",
87566 +};
87567 +
87568 +static struct plugin_info kallocstat_plugin_info = {
87569 + .version = "201111150100",
87570 +};
87571 +
87572 +static unsigned int execute_kallocstat(void);
87573 +
87574 +static struct gimple_opt_pass kallocstat_pass = {
87575 + .pass = {
87576 + .type = GIMPLE_PASS,
87577 + .name = "kallocstat",
87578 + .gate = NULL,
87579 + .execute = execute_kallocstat,
87580 + .sub = NULL,
87581 + .next = NULL,
87582 + .static_pass_number = 0,
87583 + .tv_id = TV_NONE,
87584 + .properties_required = 0,
87585 + .properties_provided = 0,
87586 + .properties_destroyed = 0,
87587 + .todo_flags_start = 0,
87588 + .todo_flags_finish = 0
87589 + }
87590 +};
87591 +
87592 +static bool is_kalloc(const char *fnname)
87593 +{
87594 + size_t i;
87595 +
87596 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
87597 + if (!strcmp(fnname, kalloc_functions[i]))
87598 + return true;
87599 + return false;
87600 +}
87601 +
87602 +static unsigned int execute_kallocstat(void)
87603 +{
87604 + basic_block bb;
87605 +
87606 + // 1. loop through BBs and GIMPLE statements
87607 + FOR_EACH_BB(bb) {
87608 + gimple_stmt_iterator gsi;
87609 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
87610 + // gimple match:
87611 + tree fndecl, size;
87612 + gimple call_stmt;
87613 + const char *fnname;
87614 +
87615 + // is it a call
87616 + call_stmt = gsi_stmt(gsi);
87617 + if (!is_gimple_call(call_stmt))
87618 + continue;
87619 + fndecl = gimple_call_fndecl(call_stmt);
87620 + if (fndecl == NULL_TREE)
87621 + continue;
87622 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
87623 + continue;
87624 +
87625 + // is it a call to k*alloc
87626 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
87627 + if (!is_kalloc(fnname))
87628 + continue;
87629 +
87630 + // is the size arg the result of a simple const assignment
87631 + size = gimple_call_arg(call_stmt, 0);
87632 + while (true) {
87633 + gimple def_stmt;
87634 + expanded_location xloc;
87635 + size_t size_val;
87636 +
87637 + if (TREE_CODE(size) != SSA_NAME)
87638 + break;
87639 + def_stmt = SSA_NAME_DEF_STMT(size);
87640 + if (!def_stmt || !is_gimple_assign(def_stmt))
87641 + break;
87642 + if (gimple_num_ops(def_stmt) != 2)
87643 + break;
87644 + size = gimple_assign_rhs1(def_stmt);
87645 + if (!TREE_CONSTANT(size))
87646 + continue;
87647 + xloc = expand_location(gimple_location(def_stmt));
87648 + if (!xloc.file)
87649 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
87650 + size_val = TREE_INT_CST_LOW(size);
87651 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
87652 + break;
87653 + }
87654 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
87655 +//debug_tree(gimple_call_fn(call_stmt));
87656 +//print_node(stderr, "pax", fndecl, 4);
87657 + }
87658 + }
87659 +
87660 + return 0;
87661 +}
87662 +
87663 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
87664 +{
87665 + const char * const plugin_name = plugin_info->base_name;
87666 + struct register_pass_info kallocstat_pass_info = {
87667 + .pass = &kallocstat_pass.pass,
87668 + .reference_pass_name = "ssa",
87669 + .ref_pass_instance_number = 0,
87670 + .pos_op = PASS_POS_INSERT_AFTER
87671 + };
87672 +
87673 + if (!plugin_default_version_check(version, &gcc_version)) {
87674 + error(G_("incompatible gcc/plugin versions"));
87675 + return 1;
87676 + }
87677 +
87678 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
87679 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
87680 +
87681 + return 0;
87682 +}
87683 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
87684 new file mode 100644
87685 index 0000000..008f159
87686 --- /dev/null
87687 +++ b/tools/gcc/kernexec_plugin.c
87688 @@ -0,0 +1,427 @@
87689 +/*
87690 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
87691 + * Licensed under the GPL v2
87692 + *
87693 + * Note: the choice of the license means that the compilation process is
87694 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
87695 + * but for the kernel it doesn't matter since it doesn't link against
87696 + * any of the gcc libraries
87697 + *
87698 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
87699 + *
87700 + * TODO:
87701 + *
87702 + * BUGS:
87703 + * - none known
87704 + */
87705 +#include "gcc-plugin.h"
87706 +#include "config.h"
87707 +#include "system.h"
87708 +#include "coretypes.h"
87709 +#include "tree.h"
87710 +#include "tree-pass.h"
87711 +#include "flags.h"
87712 +#include "intl.h"
87713 +#include "toplev.h"
87714 +#include "plugin.h"
87715 +//#include "expr.h" where are you...
87716 +#include "diagnostic.h"
87717 +#include "plugin-version.h"
87718 +#include "tm.h"
87719 +#include "function.h"
87720 +#include "basic-block.h"
87721 +#include "gimple.h"
87722 +#include "rtl.h"
87723 +#include "emit-rtl.h"
87724 +#include "tree-flow.h"
87725 +
87726 +extern void print_gimple_stmt(FILE *, gimple, int, int);
87727 +extern rtx emit_move_insn(rtx x, rtx y);
87728 +
87729 +int plugin_is_GPL_compatible;
87730 +
87731 +static struct plugin_info kernexec_plugin_info = {
87732 + .version = "201111291120",
87733 + .help = "method=[bts|or]\tinstrumentation method\n"
87734 +};
87735 +
87736 +static unsigned int execute_kernexec_reload(void);
87737 +static unsigned int execute_kernexec_fptr(void);
87738 +static unsigned int execute_kernexec_retaddr(void);
87739 +static bool kernexec_cmodel_check(void);
87740 +
87741 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
87742 +static void (*kernexec_instrument_retaddr)(rtx);
87743 +
87744 +static struct gimple_opt_pass kernexec_reload_pass = {
87745 + .pass = {
87746 + .type = GIMPLE_PASS,
87747 + .name = "kernexec_reload",
87748 + .gate = kernexec_cmodel_check,
87749 + .execute = execute_kernexec_reload,
87750 + .sub = NULL,
87751 + .next = NULL,
87752 + .static_pass_number = 0,
87753 + .tv_id = TV_NONE,
87754 + .properties_required = 0,
87755 + .properties_provided = 0,
87756 + .properties_destroyed = 0,
87757 + .todo_flags_start = 0,
87758 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
87759 + }
87760 +};
87761 +
87762 +static struct gimple_opt_pass kernexec_fptr_pass = {
87763 + .pass = {
87764 + .type = GIMPLE_PASS,
87765 + .name = "kernexec_fptr",
87766 + .gate = kernexec_cmodel_check,
87767 + .execute = execute_kernexec_fptr,
87768 + .sub = NULL,
87769 + .next = NULL,
87770 + .static_pass_number = 0,
87771 + .tv_id = TV_NONE,
87772 + .properties_required = 0,
87773 + .properties_provided = 0,
87774 + .properties_destroyed = 0,
87775 + .todo_flags_start = 0,
87776 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
87777 + }
87778 +};
87779 +
87780 +static struct rtl_opt_pass kernexec_retaddr_pass = {
87781 + .pass = {
87782 + .type = RTL_PASS,
87783 + .name = "kernexec_retaddr",
87784 + .gate = kernexec_cmodel_check,
87785 + .execute = execute_kernexec_retaddr,
87786 + .sub = NULL,
87787 + .next = NULL,
87788 + .static_pass_number = 0,
87789 + .tv_id = TV_NONE,
87790 + .properties_required = 0,
87791 + .properties_provided = 0,
87792 + .properties_destroyed = 0,
87793 + .todo_flags_start = 0,
87794 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
87795 + }
87796 +};
87797 +
87798 +static bool kernexec_cmodel_check(void)
87799 +{
87800 + tree section;
87801 +
87802 + if (ix86_cmodel != CM_KERNEL)
87803 + return false;
87804 +
87805 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
87806 + if (!section || !TREE_VALUE(section))
87807 + return true;
87808 +
87809 + section = TREE_VALUE(TREE_VALUE(section));
87810 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
87811 + return true;
87812 +
87813 + return false;
87814 +}
87815 +
87816 +/*
87817 + * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
87818 + */
87819 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
87820 +{
87821 + gimple asm_movabs_stmt;
87822 +
87823 + // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
87824 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
87825 + gimple_asm_set_volatile(asm_movabs_stmt, true);
87826 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
87827 + update_stmt(asm_movabs_stmt);
87828 +}
87829 +
87830 +/*
87831 + * find all asm() stmts that clobber r10 and add a reload of r10
87832 + */
87833 +static unsigned int execute_kernexec_reload(void)
87834 +{
87835 + basic_block bb;
87836 +
87837 + // 1. loop through BBs and GIMPLE statements
87838 + FOR_EACH_BB(bb) {
87839 + gimple_stmt_iterator gsi;
87840 +
87841 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
87842 + // gimple match: __asm__ ("" : : : "r10");
87843 + gimple asm_stmt;
87844 + size_t nclobbers;
87845 +
87846 + // is it an asm ...
87847 + asm_stmt = gsi_stmt(gsi);
87848 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
87849 + continue;
87850 +
87851 + // ... clobbering r10
87852 + nclobbers = gimple_asm_nclobbers(asm_stmt);
87853 + while (nclobbers--) {
87854 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
87855 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
87856 + continue;
87857 + kernexec_reload_fptr_mask(&gsi);
87858 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
87859 + break;
87860 + }
87861 + }
87862 + }
87863 +
87864 + return 0;
87865 +}
87866 +
87867 +/*
87868 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
87869 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
87870 + */
87871 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
87872 +{
87873 + gimple assign_intptr, assign_new_fptr, call_stmt;
87874 + tree intptr, old_fptr, new_fptr, kernexec_mask;
87875 +
87876 + call_stmt = gsi_stmt(*gsi);
87877 + old_fptr = gimple_call_fn(call_stmt);
87878 +
87879 + // create temporary unsigned long variable used for bitops and cast fptr to it
87880 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
87881 + add_referenced_var(intptr);
87882 + mark_sym_for_renaming(intptr);
87883 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
87884 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
87885 + update_stmt(assign_intptr);
87886 +
87887 + // apply logical or to temporary unsigned long and bitmask
87888 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
87889 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
87890 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
87891 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
87892 + update_stmt(assign_intptr);
87893 +
87894 + // cast temporary unsigned long back to a temporary fptr variable
87895 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec");
87896 + add_referenced_var(new_fptr);
87897 + mark_sym_for_renaming(new_fptr);
87898 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
87899 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
87900 + update_stmt(assign_new_fptr);
87901 +
87902 + // replace call stmt fn with the new fptr
87903 + gimple_call_set_fn(call_stmt, new_fptr);
87904 + update_stmt(call_stmt);
87905 +}
87906 +
87907 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
87908 +{
87909 + gimple asm_or_stmt, call_stmt;
87910 + tree old_fptr, new_fptr, input, output;
87911 + VEC(tree, gc) *inputs = NULL;
87912 + VEC(tree, gc) *outputs = NULL;
87913 +
87914 + call_stmt = gsi_stmt(*gsi);
87915 + old_fptr = gimple_call_fn(call_stmt);
87916 +
87917 + // create temporary fptr variable
87918 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
87919 + add_referenced_var(new_fptr);
87920 + mark_sym_for_renaming(new_fptr);
87921 +
87922 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
87923 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
87924 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
87925 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
87926 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
87927 + VEC_safe_push(tree, gc, inputs, input);
87928 + VEC_safe_push(tree, gc, outputs, output);
87929 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
87930 + gimple_asm_set_volatile(asm_or_stmt, true);
87931 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
87932 + update_stmt(asm_or_stmt);
87933 +
87934 + // replace call stmt fn with the new fptr
87935 + gimple_call_set_fn(call_stmt, new_fptr);
87936 + update_stmt(call_stmt);
87937 +}
87938 +
87939 +/*
87940 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
87941 + */
87942 +static unsigned int execute_kernexec_fptr(void)
87943 +{
87944 + basic_block bb;
87945 +
87946 + // 1. loop through BBs and GIMPLE statements
87947 + FOR_EACH_BB(bb) {
87948 + gimple_stmt_iterator gsi;
87949 +
87950 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
87951 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
87952 + tree fn;
87953 + gimple call_stmt;
87954 +
87955 + // is it a call ...
87956 + call_stmt = gsi_stmt(gsi);
87957 + if (!is_gimple_call(call_stmt))
87958 + continue;
87959 + fn = gimple_call_fn(call_stmt);
87960 + if (TREE_CODE(fn) == ADDR_EXPR)
87961 + continue;
87962 + if (TREE_CODE(fn) != SSA_NAME)
87963 + gcc_unreachable();
87964 +
87965 + // ... through a function pointer
87966 + fn = SSA_NAME_VAR(fn);
87967 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
87968 + continue;
87969 + fn = TREE_TYPE(fn);
87970 + if (TREE_CODE(fn) != POINTER_TYPE)
87971 + continue;
87972 + fn = TREE_TYPE(fn);
87973 + if (TREE_CODE(fn) != FUNCTION_TYPE)
87974 + continue;
87975 +
87976 + kernexec_instrument_fptr(&gsi);
87977 +
87978 +//debug_tree(gimple_call_fn(call_stmt));
87979 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
87980 + }
87981 + }
87982 +
87983 + return 0;
87984 +}
87985 +
87986 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
87987 +static void kernexec_instrument_retaddr_bts(rtx insn)
87988 +{
87989 + rtx btsq;
87990 + rtvec argvec, constraintvec, labelvec;
87991 + int line;
87992 +
87993 + // create asm volatile("btsq $63,(%%rsp)":::)
87994 + argvec = rtvec_alloc(0);
87995 + constraintvec = rtvec_alloc(0);
87996 + labelvec = rtvec_alloc(0);
87997 + line = expand_location(RTL_LOCATION(insn)).line;
87998 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
87999 + MEM_VOLATILE_P(btsq) = 1;
88000 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
88001 + emit_insn_before(btsq, insn);
88002 +}
88003 +
88004 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
88005 +static void kernexec_instrument_retaddr_or(rtx insn)
88006 +{
88007 + rtx orq;
88008 + rtvec argvec, constraintvec, labelvec;
88009 + int line;
88010 +
88011 + // create asm volatile("orq %%r10,(%%rsp)":::)
88012 + argvec = rtvec_alloc(0);
88013 + constraintvec = rtvec_alloc(0);
88014 + labelvec = rtvec_alloc(0);
88015 + line = expand_location(RTL_LOCATION(insn)).line;
88016 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
88017 + MEM_VOLATILE_P(orq) = 1;
88018 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
88019 + emit_insn_before(orq, insn);
88020 +}
88021 +
88022 +/*
88023 + * find all asm level function returns and forcibly set the highest bit of the return address
88024 + */
88025 +static unsigned int execute_kernexec_retaddr(void)
88026 +{
88027 + rtx insn;
88028 +
88029 + // 1. find function returns
88030 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
88031 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
88032 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
88033 + rtx body;
88034 +
88035 + // is it a retn
88036 + if (!JUMP_P(insn))
88037 + continue;
88038 + body = PATTERN(insn);
88039 + if (GET_CODE(body) == PARALLEL)
88040 + body = XVECEXP(body, 0, 0);
88041 + if (GET_CODE(body) != RETURN)
88042 + continue;
88043 + kernexec_instrument_retaddr(insn);
88044 + }
88045 +
88046 +// print_simple_rtl(stderr, get_insns());
88047 +// print_rtl(stderr, get_insns());
88048 +
88049 + return 0;
88050 +}
88051 +
88052 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
88053 +{
88054 + const char * const plugin_name = plugin_info->base_name;
88055 + const int argc = plugin_info->argc;
88056 + const struct plugin_argument * const argv = plugin_info->argv;
88057 + int i;
88058 + struct register_pass_info kernexec_reload_pass_info = {
88059 + .pass = &kernexec_reload_pass.pass,
88060 + .reference_pass_name = "ssa",
88061 + .ref_pass_instance_number = 0,
88062 + .pos_op = PASS_POS_INSERT_AFTER
88063 + };
88064 + struct register_pass_info kernexec_fptr_pass_info = {
88065 + .pass = &kernexec_fptr_pass.pass,
88066 + .reference_pass_name = "ssa",
88067 + .ref_pass_instance_number = 0,
88068 + .pos_op = PASS_POS_INSERT_AFTER
88069 + };
88070 + struct register_pass_info kernexec_retaddr_pass_info = {
88071 + .pass = &kernexec_retaddr_pass.pass,
88072 + .reference_pass_name = "pro_and_epilogue",
88073 + .ref_pass_instance_number = 0,
88074 + .pos_op = PASS_POS_INSERT_AFTER
88075 + };
88076 +
88077 + if (!plugin_default_version_check(version, &gcc_version)) {
88078 + error(G_("incompatible gcc/plugin versions"));
88079 + return 1;
88080 + }
88081 +
88082 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
88083 +
88084 + if (TARGET_64BIT == 0)
88085 + return 0;
88086 +
88087 + for (i = 0; i < argc; ++i) {
88088 + if (!strcmp(argv[i].key, "method")) {
88089 + if (!argv[i].value) {
88090 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
88091 + continue;
88092 + }
88093 + if (!strcmp(argv[i].value, "bts")) {
88094 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
88095 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
88096 + } else if (!strcmp(argv[i].value, "or")) {
88097 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
88098 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
88099 + fix_register("r10", 1, 1);
88100 + } else
88101 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
88102 + continue;
88103 + }
88104 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
88105 + }
88106 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
88107 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
88108 +
88109 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
88110 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
88111 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
88112 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
88113 +
88114 + return 0;
88115 +}
88116 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
88117 new file mode 100644
88118 index 0000000..8b61031
88119 --- /dev/null
88120 +++ b/tools/gcc/stackleak_plugin.c
88121 @@ -0,0 +1,295 @@
88122 +/*
88123 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
88124 + * Licensed under the GPL v2
88125 + *
88126 + * Note: the choice of the license means that the compilation process is
88127 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
88128 + * but for the kernel it doesn't matter since it doesn't link against
88129 + * any of the gcc libraries
88130 + *
88131 + * gcc plugin to help implement various PaX features
88132 + *
88133 + * - track lowest stack pointer
88134 + *
88135 + * TODO:
88136 + * - initialize all local variables
88137 + *
88138 + * BUGS:
88139 + * - none known
88140 + */
88141 +#include "gcc-plugin.h"
88142 +#include "config.h"
88143 +#include "system.h"
88144 +#include "coretypes.h"
88145 +#include "tree.h"
88146 +#include "tree-pass.h"
88147 +#include "flags.h"
88148 +#include "intl.h"
88149 +#include "toplev.h"
88150 +#include "plugin.h"
88151 +//#include "expr.h" where are you...
88152 +#include "diagnostic.h"
88153 +#include "plugin-version.h"
88154 +#include "tm.h"
88155 +#include "function.h"
88156 +#include "basic-block.h"
88157 +#include "gimple.h"
88158 +#include "rtl.h"
88159 +#include "emit-rtl.h"
88160 +
88161 +extern void print_gimple_stmt(FILE *, gimple, int, int);
88162 +
88163 +int plugin_is_GPL_compatible;
88164 +
88165 +static int track_frame_size = -1;
88166 +static const char track_function[] = "pax_track_stack";
88167 +static const char check_function[] = "pax_check_alloca";
88168 +static bool init_locals;
88169 +
88170 +static struct plugin_info stackleak_plugin_info = {
88171 + .version = "201111150100",
88172 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
88173 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
88174 +};
88175 +
88176 +static bool gate_stackleak_track_stack(void);
88177 +static unsigned int execute_stackleak_tree_instrument(void);
88178 +static unsigned int execute_stackleak_final(void);
88179 +
88180 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
88181 + .pass = {
88182 + .type = GIMPLE_PASS,
88183 + .name = "stackleak_tree_instrument",
88184 + .gate = gate_stackleak_track_stack,
88185 + .execute = execute_stackleak_tree_instrument,
88186 + .sub = NULL,
88187 + .next = NULL,
88188 + .static_pass_number = 0,
88189 + .tv_id = TV_NONE,
88190 + .properties_required = PROP_gimple_leh | PROP_cfg,
88191 + .properties_provided = 0,
88192 + .properties_destroyed = 0,
88193 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
88194 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
88195 + }
88196 +};
88197 +
88198 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
88199 + .pass = {
88200 + .type = RTL_PASS,
88201 + .name = "stackleak_final",
88202 + .gate = gate_stackleak_track_stack,
88203 + .execute = execute_stackleak_final,
88204 + .sub = NULL,
88205 + .next = NULL,
88206 + .static_pass_number = 0,
88207 + .tv_id = TV_NONE,
88208 + .properties_required = 0,
88209 + .properties_provided = 0,
88210 + .properties_destroyed = 0,
88211 + .todo_flags_start = 0,
88212 + .todo_flags_finish = TODO_dump_func
88213 + }
88214 +};
88215 +
88216 +static bool gate_stackleak_track_stack(void)
88217 +{
88218 + return track_frame_size >= 0;
88219 +}
88220 +
88221 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
88222 +{
88223 + gimple check_alloca;
88224 + tree fndecl, fntype, alloca_size;
88225 +
88226 + // insert call to void pax_check_alloca(unsigned long size)
88227 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
88228 + fndecl = build_fn_decl(check_function, fntype);
88229 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
88230 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
88231 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
88232 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
88233 +}
88234 +
88235 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
88236 +{
88237 + gimple track_stack;
88238 + tree fndecl, fntype;
88239 +
88240 + // insert call to void pax_track_stack(void)
88241 + fntype = build_function_type_list(void_type_node, NULL_TREE);
88242 + fndecl = build_fn_decl(track_function, fntype);
88243 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
88244 + track_stack = gimple_build_call(fndecl, 0);
88245 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
88246 +}
88247 +
88248 +#if BUILDING_GCC_VERSION == 4005
88249 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
88250 +{
88251 + tree fndecl;
88252 +
88253 + if (!is_gimple_call(stmt))
88254 + return false;
88255 + fndecl = gimple_call_fndecl(stmt);
88256 + if (!fndecl)
88257 + return false;
88258 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
88259 + return false;
88260 +// print_node(stderr, "pax", fndecl, 4);
88261 + return DECL_FUNCTION_CODE(fndecl) == code;
88262 +}
88263 +#endif
88264 +
88265 +static bool is_alloca(gimple stmt)
88266 +{
88267 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
88268 + return true;
88269 +
88270 +#if BUILDING_GCC_VERSION >= 4007
88271 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
88272 + return true;
88273 +#endif
88274 +
88275 + return false;
88276 +}
88277 +
88278 +static unsigned int execute_stackleak_tree_instrument(void)
88279 +{
88280 + basic_block bb, entry_bb;
88281 + bool prologue_instrumented = false;
88282 +
88283 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
88284 +
88285 + // 1. loop through BBs and GIMPLE statements
88286 + FOR_EACH_BB(bb) {
88287 + gimple_stmt_iterator gsi;
88288 +
88289 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
88290 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
88291 + if (!is_alloca(gsi_stmt(gsi)))
88292 + continue;
88293 +
88294 + // 2. insert stack overflow check before each __builtin_alloca call
88295 + stackleak_check_alloca(&gsi);
88296 +
88297 + // 3. insert track call after each __builtin_alloca call
88298 + stackleak_add_instrumentation(&gsi);
88299 + if (bb == entry_bb)
88300 + prologue_instrumented = true;
88301 + }
88302 + }
88303 +
88304 + // 4. insert track call at the beginning
88305 + if (!prologue_instrumented) {
88306 + gimple_stmt_iterator gsi;
88307 +
88308 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
88309 + if (dom_info_available_p(CDI_DOMINATORS))
88310 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
88311 + gsi = gsi_start_bb(bb);
88312 + stackleak_add_instrumentation(&gsi);
88313 + }
88314 +
88315 + return 0;
88316 +}
88317 +
88318 +static unsigned int execute_stackleak_final(void)
88319 +{
88320 + rtx insn;
88321 +
88322 + if (cfun->calls_alloca)
88323 + return 0;
88324 +
88325 + // keep calls only if function frame is big enough
88326 + if (get_frame_size() >= track_frame_size)
88327 + return 0;
88328 +
88329 + // 1. find pax_track_stack calls
88330 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
88331 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
88332 + rtx body;
88333 +
88334 + if (!CALL_P(insn))
88335 + continue;
88336 + body = PATTERN(insn);
88337 + if (GET_CODE(body) != CALL)
88338 + continue;
88339 + body = XEXP(body, 0);
88340 + if (GET_CODE(body) != MEM)
88341 + continue;
88342 + body = XEXP(body, 0);
88343 + if (GET_CODE(body) != SYMBOL_REF)
88344 + continue;
88345 + if (strcmp(XSTR(body, 0), track_function))
88346 + continue;
88347 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
88348 + // 2. delete call
88349 + insn = delete_insn_and_edges(insn);
88350 +#if BUILDING_GCC_VERSION >= 4007
88351 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
88352 + insn = delete_insn_and_edges(insn);
88353 +#endif
88354 + }
88355 +
88356 +// print_simple_rtl(stderr, get_insns());
88357 +// print_rtl(stderr, get_insns());
88358 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
88359 +
88360 + return 0;
88361 +}
88362 +
88363 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
88364 +{
88365 + const char * const plugin_name = plugin_info->base_name;
88366 + const int argc = plugin_info->argc;
88367 + const struct plugin_argument * const argv = plugin_info->argv;
88368 + int i;
88369 + struct register_pass_info stackleak_tree_instrument_pass_info = {
88370 + .pass = &stackleak_tree_instrument_pass.pass,
88371 +// .reference_pass_name = "tree_profile",
88372 + .reference_pass_name = "optimized",
88373 + .ref_pass_instance_number = 0,
88374 + .pos_op = PASS_POS_INSERT_AFTER
88375 + };
88376 + struct register_pass_info stackleak_final_pass_info = {
88377 + .pass = &stackleak_final_rtl_opt_pass.pass,
88378 + .reference_pass_name = "final",
88379 + .ref_pass_instance_number = 0,
88380 + .pos_op = PASS_POS_INSERT_BEFORE
88381 + };
88382 +
88383 + if (!plugin_default_version_check(version, &gcc_version)) {
88384 + error(G_("incompatible gcc/plugin versions"));
88385 + return 1;
88386 + }
88387 +
88388 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
88389 +
88390 + for (i = 0; i < argc; ++i) {
88391 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
88392 + if (!argv[i].value) {
88393 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
88394 + continue;
88395 + }
88396 + track_frame_size = atoi(argv[i].value);
88397 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
88398 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
88399 + continue;
88400 + }
88401 + if (!strcmp(argv[i].key, "initialize-locals")) {
88402 + if (argv[i].value) {
88403 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
88404 + continue;
88405 + }
88406 + init_locals = true;
88407 + continue;
88408 + }
88409 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
88410 + }
88411 +
88412 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
88413 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
88414 +
88415 + return 0;
88416 +}
88417 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
88418 index 83b3dde..835bee7 100644
88419 --- a/usr/gen_init_cpio.c
88420 +++ b/usr/gen_init_cpio.c
88421 @@ -299,7 +299,7 @@ static int cpio_mkfile(const char *name, const char *location,
88422 int retval;
88423 int rc = -1;
88424 int namesize;
88425 - int i;
88426 + unsigned int i;
88427
88428 mode |= S_IFREG;
88429
88430 @@ -383,9 +383,10 @@ static char *cpio_replace_env(char *new_location)
88431 *env_var = *expanded = '\0';
88432 strncat(env_var, start + 2, end - start - 2);
88433 strncat(expanded, new_location, start - new_location);
88434 - strncat(expanded, getenv(env_var), PATH_MAX);
88435 - strncat(expanded, end + 1, PATH_MAX);
88436 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
88437 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
88438 strncpy(new_location, expanded, PATH_MAX);
88439 + new_location[PATH_MAX] = 0;
88440 } else
88441 break;
88442 }
88443 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
88444 index 4f3434f..159bc3e 100644
88445 --- a/virt/kvm/kvm_main.c
88446 +++ b/virt/kvm/kvm_main.c
88447 @@ -2494,7 +2494,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void)
88448 if (kvm_rebooting)
88449 /* spin while reset goes on */
88450 while (true)
88451 - ;
88452 + cpu_relax();
88453 /* Fault while not rebooting. We want the trace. */
88454 BUG();
88455 }
88456 @@ -2714,7 +2714,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
88457 kvm_arch_vcpu_put(vcpu);
88458 }
88459
88460 -int kvm_init(void *opaque, unsigned int vcpu_size,
88461 +int kvm_init(const void *opaque, unsigned int vcpu_size,
88462 struct module *module)
88463 {
88464 int r;
88465 @@ -2767,15 +2767,17 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
88466 /* A kmem cache lets us meet the alignment requirements of fx_save. */
88467 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
88468 __alignof__(struct kvm_vcpu),
88469 - 0, NULL);
88470 + SLAB_USERCOPY, NULL);
88471 if (!kvm_vcpu_cache) {
88472 r = -ENOMEM;
88473 goto out_free_5;
88474 }
88475
88476 - kvm_chardev_ops.owner = module;
88477 - kvm_vm_fops.owner = module;
88478 - kvm_vcpu_fops.owner = module;
88479 + pax_open_kernel();
88480 + *(void **)&kvm_chardev_ops.owner = module;
88481 + *(void **)&kvm_vm_fops.owner = module;
88482 + *(void **)&kvm_vcpu_fops.owner = module;
88483 + pax_close_kernel();
88484
88485 r = misc_register(&kvm_dev);
88486 if (r) {