]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.9-2.6.32.58-201203112135.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9-2.6.32.58-201203112135.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index e1efc40..47f0daf 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -1,15 +1,19 @@
6 *.a
7 *.aux
8 *.bin
9 +*.cis
10 *.cpio
11 *.csp
12 +*.dbg
13 *.dsp
14 *.dvi
15 *.elf
16 *.eps
17 *.fw
18 +*.gcno
19 *.gen.S
20 *.gif
21 +*.gmo
22 *.grep
23 *.grp
24 *.gz
25 @@ -38,8 +42,10 @@
26 *.tab.h
27 *.tex
28 *.ver
29 +*.vim
30 *.xml
31 *_MODULES
32 +*_reg_safe.h
33 *_vga16.c
34 *~
35 *.9
36 @@ -49,11 +55,16 @@
37 53c700_d.h
38 CVS
39 ChangeSet
40 +GPATH
41 +GRTAGS
42 +GSYMS
43 +GTAGS
44 Image
45 Kerntypes
46 Module.markers
47 Module.symvers
48 PENDING
49 +PERF*
50 SCCS
51 System.map*
52 TAGS
53 @@ -76,7 +87,11 @@ btfixupprep
54 build
55 bvmlinux
56 bzImage*
57 +capability_names.h
58 +capflags.c
59 classlist.h*
60 +clut_vga16.c
61 +common-cmds.h
62 comp*.log
63 compile.h*
64 conf
65 @@ -84,6 +99,8 @@ config
66 config-*
67 config_data.h*
68 config_data.gz*
69 +config.c
70 +config.tmp
71 conmakehash
72 consolemap_deftbl.c*
73 cpustr.h
74 @@ -97,19 +114,23 @@ elfconfig.h*
75 fixdep
76 fore200e_mkfirm
77 fore200e_pca_fw.c*
78 +gate.lds
79 gconf
80 gen-devlist
81 gen_crc32table
82 gen_init_cpio
83 genksyms
84 *_gray256.c
85 +hash
86 +hid-example
87 ihex2fw
88 ikconfig.h*
89 initramfs_data.cpio
90 +initramfs_data.cpio.bz2
91 initramfs_data.cpio.gz
92 initramfs_list
93 kallsyms
94 -kconfig
95 +kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99 @@ -127,13 +148,16 @@ machtypes.h
100 map
101 maui_boot.h
102 mconf
103 +mdp
104 miboot*
105 mk_elfconfig
106 mkboot
107 mkbugboot
108 mkcpustr
109 mkdep
110 +mkpiggy
111 mkprep
112 +mkregtable
113 mktables
114 mktree
115 modpost
116 @@ -149,6 +173,7 @@ patches*
117 pca200e.bin
118 pca200e_ecd.bin2
119 piggy.gz
120 +piggy.S
121 piggyback
122 pnmtologo
123 ppc_defs.h*
124 @@ -157,12 +182,15 @@ qconf
125 raid6altivec*.c
126 raid6int*.c
127 raid6tables.c
128 +regdb.c
129 relocs
130 +rlim_names.h
131 series
132 setup
133 setup.bin
134 setup.elf
135 sImage
136 +slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140 @@ -171,6 +199,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144 +user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148 @@ -186,14 +215,20 @@ version.h*
149 vmlinux
150 vmlinux-*
151 vmlinux.aout
152 +vmlinux.bin.all
153 +vmlinux.bin.bz2
154 vmlinux.lds
155 +vmlinux.relocs
156 +voffset.h
157 vsyscall.lds
158 vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162 +utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168 +zoffset.h
169 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
170 index c840e7d..f4c451c 100644
171 --- a/Documentation/kernel-parameters.txt
172 +++ b/Documentation/kernel-parameters.txt
173 @@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters. It is defined in the file
174 the specified number of seconds. This is to be used if
175 your oopses keep scrolling off the screen.
176
177 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
178 + virtualization environments that don't cope well with the
179 + expand down segment used by UDEREF on X86-32 or the frequent
180 + page table updates on X86-64.
181 +
182 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
183 +
184 pcbit= [HW,ISDN]
185
186 pcd. [PARIDE]
187 diff --git a/Makefile b/Makefile
188 index ed78982..bcc432e 100644
189 --- a/Makefile
190 +++ b/Makefile
191 @@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
192
193 HOSTCC = gcc
194 HOSTCXX = g++
195 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
196 -HOSTCXXFLAGS = -O2
197 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
198 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
199 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
200
201 # Decide whether to build built-in, modular, or both.
202 # Normally, just do built-in.
203 @@ -376,8 +377,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
204 # Rules shared between *config targets and build targets
205
206 # Basic helpers built in scripts/
207 -PHONY += scripts_basic
208 -scripts_basic:
209 +PHONY += scripts_basic gcc-plugins
210 +scripts_basic: gcc-plugins
211 $(Q)$(MAKE) $(build)=scripts/basic
212
213 # To avoid any implicit rule to kick in, define an empty command.
214 @@ -403,7 +404,7 @@ endif
215 # of make so .config is not included in this case either (for *config).
216
217 no-dot-config-targets := clean mrproper distclean \
218 - cscope TAGS tags help %docs check% \
219 + cscope gtags TAGS tags help %docs check% \
220 include/linux/version.h headers_% \
221 kernelrelease kernelversion
222
223 @@ -526,6 +527,48 @@ else
224 KBUILD_CFLAGS += -O2
225 endif
226
227 +ifndef DISABLE_PAX_PLUGINS
228 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
229 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
230 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
231 +endif
232 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
233 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
234 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
235 +endif
236 +ifdef CONFIG_KALLOCSTAT_PLUGIN
237 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
238 +endif
239 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
240 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
241 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
242 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
243 +endif
244 +ifdef CONFIG_CHECKER_PLUGIN
245 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
246 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
247 +endif
248 +endif
249 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS) $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS)
250 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
251 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
252 +ifeq ($(KBUILD_EXTMOD),)
253 +gcc-plugins:
254 + $(Q)$(MAKE) $(build)=tools/gcc
255 +else
256 +gcc-plugins: ;
257 +endif
258 +else
259 +gcc-plugins:
260 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
261 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
262 +else
263 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
264 +endif
265 + $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
266 +endif
267 +endif
268 +
269 include $(srctree)/arch/$(SRCARCH)/Makefile
270
271 ifneq ($(CONFIG_FRAME_WARN),0)
272 @@ -647,7 +690,7 @@ export mod_strip_cmd
273
274
275 ifeq ($(KBUILD_EXTMOD),)
276 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
277 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
278
279 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
280 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
281 @@ -868,6 +911,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
282
283 # The actual objects are generated when descending,
284 # make sure no implicit rule kicks in
285 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
286 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
287 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
288
289 # Handle descending into subdirectories listed in $(vmlinux-dirs)
290 @@ -877,7 +922,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
291 # Error messages still appears in the original language
292
293 PHONY += $(vmlinux-dirs)
294 -$(vmlinux-dirs): prepare scripts
295 +$(vmlinux-dirs): gcc-plugins prepare scripts
296 $(Q)$(MAKE) $(build)=$@
297
298 # Build the kernel release string
299 @@ -986,6 +1031,7 @@ prepare0: archprepare FORCE
300 $(Q)$(MAKE) $(build)=. missing-syscalls
301
302 # All the preparing..
303 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
304 prepare: prepare0
305
306 # The asm symlink changes when $(ARCH) changes.
307 @@ -1127,6 +1173,8 @@ all: modules
308 # using awk while concatenating to the final file.
309
310 PHONY += modules
311 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
312 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
313 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
314 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
315 @$(kecho) ' Building modules, stage 2.';
316 @@ -1136,7 +1184,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
317
318 # Target to prepare building external modules
319 PHONY += modules_prepare
320 -modules_prepare: prepare scripts
321 +modules_prepare: gcc-plugins prepare scripts
322
323 # Target to install modules
324 PHONY += modules_install
325 @@ -1201,7 +1249,7 @@ MRPROPER_FILES += .config .config.old include/asm .version .old_version \
326 include/linux/autoconf.h include/linux/version.h \
327 include/linux/utsrelease.h \
328 include/linux/bounds.h include/asm*/asm-offsets.h \
329 - Module.symvers Module.markers tags TAGS cscope*
330 + Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
331
332 # clean - Delete most, but leave enough to build external modules
333 #
334 @@ -1245,7 +1293,7 @@ distclean: mrproper
335 @find $(srctree) $(RCS_FIND_IGNORE) \
336 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
337 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
338 - -o -name '.*.rej' -o -size 0 \
339 + -o -name '.*.rej' -o -name '*.so' -o -size 0 \
340 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
341 -type f -print | xargs rm -f
342
343 @@ -1292,6 +1340,7 @@ help:
344 @echo ' modules_prepare - Set up for building external modules'
345 @echo ' tags/TAGS - Generate tags file for editors'
346 @echo ' cscope - Generate cscope index'
347 + @echo ' gtags - Generate GNU GLOBAL index'
348 @echo ' kernelrelease - Output the release version string'
349 @echo ' kernelversion - Output the version stored in Makefile'
350 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
351 @@ -1393,6 +1442,8 @@ PHONY += $(module-dirs) modules
352 $(module-dirs): crmodverdir $(objtree)/Module.symvers
353 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
354
355 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
356 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
357 modules: $(module-dirs)
358 @$(kecho) ' Building modules, stage 2.';
359 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
360 @@ -1448,7 +1499,7 @@ endif # KBUILD_EXTMOD
361 quiet_cmd_tags = GEN $@
362 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
363
364 -tags TAGS cscope: FORCE
365 +tags TAGS cscope gtags: FORCE
366 $(call cmd,tags)
367
368 # Scripts to check various things for consistency
369 @@ -1513,17 +1564,21 @@ else
370 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
371 endif
372
373 -%.s: %.c prepare scripts FORCE
374 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
375 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
376 +%.s: %.c gcc-plugins prepare scripts FORCE
377 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
378 %.i: %.c prepare scripts FORCE
379 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
380 -%.o: %.c prepare scripts FORCE
381 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
382 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
383 +%.o: %.c gcc-plugins prepare scripts FORCE
384 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
385 %.lst: %.c prepare scripts FORCE
386 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
387 -%.s: %.S prepare scripts FORCE
388 +%.s: %.S gcc-plugins prepare scripts FORCE
389 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
390 -%.o: %.S prepare scripts FORCE
391 +%.o: %.S gcc-plugins prepare scripts FORCE
392 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
393 %.symtypes: %.c prepare scripts FORCE
394 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
395 @@ -1533,11 +1588,15 @@ endif
396 $(cmd_crmodverdir)
397 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
398 $(build)=$(build-dir)
399 -%/: prepare scripts FORCE
400 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
401 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
402 +%/: gcc-plugins prepare scripts FORCE
403 $(cmd_crmodverdir)
404 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
405 $(build)=$(build-dir)
406 -%.ko: prepare scripts FORCE
407 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
408 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
409 +%.ko: gcc-plugins prepare scripts FORCE
410 $(cmd_crmodverdir)
411 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
412 $(build)=$(build-dir) $(@:.ko=.o)
413 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
414 index 610dff4..f396854 100644
415 --- a/arch/alpha/include/asm/atomic.h
416 +++ b/arch/alpha/include/asm/atomic.h
417 @@ -251,6 +251,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
418 #define atomic_dec(v) atomic_sub(1,(v))
419 #define atomic64_dec(v) atomic64_sub(1,(v))
420
421 +#define atomic64_read_unchecked(v) atomic64_read(v)
422 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
423 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
424 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
425 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
426 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
427 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
428 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
429 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
430 +
431 #define smp_mb__before_atomic_dec() smp_mb()
432 #define smp_mb__after_atomic_dec() smp_mb()
433 #define smp_mb__before_atomic_inc() smp_mb()
434 diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
435 index f199e69..af005f5 100644
436 --- a/arch/alpha/include/asm/cache.h
437 +++ b/arch/alpha/include/asm/cache.h
438 @@ -4,19 +4,20 @@
439 #ifndef __ARCH_ALPHA_CACHE_H
440 #define __ARCH_ALPHA_CACHE_H
441
442 +#include <linux/const.h>
443
444 /* Bytes per L1 (data) cache line. */
445 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
446 -# define L1_CACHE_BYTES 64
447 # define L1_CACHE_SHIFT 6
448 #else
449 /* Both EV4 and EV5 are write-through, read-allocate,
450 direct-mapped, physical.
451 */
452 -# define L1_CACHE_BYTES 32
453 # define L1_CACHE_SHIFT 5
454 #endif
455
456 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
457 +
458 #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
459 #define SMP_CACHE_BYTES L1_CACHE_BYTES
460
461 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
462 index 5c75c1b..c82f878 100644
463 --- a/arch/alpha/include/asm/elf.h
464 +++ b/arch/alpha/include/asm/elf.h
465 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
466
467 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
468
469 +#ifdef CONFIG_PAX_ASLR
470 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
471 +
472 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
473 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
474 +#endif
475 +
476 /* $0 is set by ld.so to a pointer to a function which might be
477 registered using atexit. This provides a mean for the dynamic
478 linker to call DT_FINI functions for shared libraries that have
479 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
480 index 3f0c59f..cf1e100 100644
481 --- a/arch/alpha/include/asm/pgtable.h
482 +++ b/arch/alpha/include/asm/pgtable.h
483 @@ -101,6 +101,17 @@ struct vm_area_struct;
484 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
485 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
486 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
487 +
488 +#ifdef CONFIG_PAX_PAGEEXEC
489 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
490 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
491 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
492 +#else
493 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
494 +# define PAGE_COPY_NOEXEC PAGE_COPY
495 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
496 +#endif
497 +
498 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
499
500 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
501 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
502 index ebc3c89..20cfa63 100644
503 --- a/arch/alpha/kernel/module.c
504 +++ b/arch/alpha/kernel/module.c
505 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
506
507 /* The small sections were sorted to the end of the segment.
508 The following should definitely cover them. */
509 - gp = (u64)me->module_core + me->core_size - 0x8000;
510 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
511 got = sechdrs[me->arch.gotsecindex].sh_addr;
512
513 for (i = 0; i < n; i++) {
514 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
515 index a94e49c..d71dd44 100644
516 --- a/arch/alpha/kernel/osf_sys.c
517 +++ b/arch/alpha/kernel/osf_sys.c
518 @@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
519 /* At this point: (!vma || addr < vma->vm_end). */
520 if (limit - len < addr)
521 return -ENOMEM;
522 - if (!vma || addr + len <= vma->vm_start)
523 + if (check_heap_stack_gap(vma, addr, len))
524 return addr;
525 addr = vma->vm_end;
526 vma = vma->vm_next;
527 @@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
528 merely specific addresses, but regions of memory -- perhaps
529 this feature should be incorporated into all ports? */
530
531 +#ifdef CONFIG_PAX_RANDMMAP
532 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
533 +#endif
534 +
535 if (addr) {
536 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
537 if (addr != (unsigned long) -ENOMEM)
538 @@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
539 }
540
541 /* Next, try allocating at TASK_UNMAPPED_BASE. */
542 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
543 - len, limit);
544 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
545 +
546 if (addr != (unsigned long) -ENOMEM)
547 return addr;
548
549 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
550 index 00a31de..2ded0f2 100644
551 --- a/arch/alpha/mm/fault.c
552 +++ b/arch/alpha/mm/fault.c
553 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
554 __reload_thread(pcb);
555 }
556
557 +#ifdef CONFIG_PAX_PAGEEXEC
558 +/*
559 + * PaX: decide what to do with offenders (regs->pc = fault address)
560 + *
561 + * returns 1 when task should be killed
562 + * 2 when patched PLT trampoline was detected
563 + * 3 when unpatched PLT trampoline was detected
564 + */
565 +static int pax_handle_fetch_fault(struct pt_regs *regs)
566 +{
567 +
568 +#ifdef CONFIG_PAX_EMUPLT
569 + int err;
570 +
571 + do { /* PaX: patched PLT emulation #1 */
572 + unsigned int ldah, ldq, jmp;
573 +
574 + err = get_user(ldah, (unsigned int *)regs->pc);
575 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
576 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
577 +
578 + if (err)
579 + break;
580 +
581 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
582 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
583 + jmp == 0x6BFB0000U)
584 + {
585 + unsigned long r27, addr;
586 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
587 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
588 +
589 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
590 + err = get_user(r27, (unsigned long *)addr);
591 + if (err)
592 + break;
593 +
594 + regs->r27 = r27;
595 + regs->pc = r27;
596 + return 2;
597 + }
598 + } while (0);
599 +
600 + do { /* PaX: patched PLT emulation #2 */
601 + unsigned int ldah, lda, br;
602 +
603 + err = get_user(ldah, (unsigned int *)regs->pc);
604 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
605 + err |= get_user(br, (unsigned int *)(regs->pc+8));
606 +
607 + if (err)
608 + break;
609 +
610 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
611 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
612 + (br & 0xFFE00000U) == 0xC3E00000U)
613 + {
614 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
615 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
616 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
617 +
618 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
619 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
620 + return 2;
621 + }
622 + } while (0);
623 +
624 + do { /* PaX: unpatched PLT emulation */
625 + unsigned int br;
626 +
627 + err = get_user(br, (unsigned int *)regs->pc);
628 +
629 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
630 + unsigned int br2, ldq, nop, jmp;
631 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
632 +
633 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
634 + err = get_user(br2, (unsigned int *)addr);
635 + err |= get_user(ldq, (unsigned int *)(addr+4));
636 + err |= get_user(nop, (unsigned int *)(addr+8));
637 + err |= get_user(jmp, (unsigned int *)(addr+12));
638 + err |= get_user(resolver, (unsigned long *)(addr+16));
639 +
640 + if (err)
641 + break;
642 +
643 + if (br2 == 0xC3600000U &&
644 + ldq == 0xA77B000CU &&
645 + nop == 0x47FF041FU &&
646 + jmp == 0x6B7B0000U)
647 + {
648 + regs->r28 = regs->pc+4;
649 + regs->r27 = addr+16;
650 + regs->pc = resolver;
651 + return 3;
652 + }
653 + }
654 + } while (0);
655 +#endif
656 +
657 + return 1;
658 +}
659 +
660 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
661 +{
662 + unsigned long i;
663 +
664 + printk(KERN_ERR "PAX: bytes at PC: ");
665 + for (i = 0; i < 5; i++) {
666 + unsigned int c;
667 + if (get_user(c, (unsigned int *)pc+i))
668 + printk(KERN_CONT "???????? ");
669 + else
670 + printk(KERN_CONT "%08x ", c);
671 + }
672 + printk("\n");
673 +}
674 +#endif
675
676 /*
677 * This routine handles page faults. It determines the address,
678 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
679 good_area:
680 si_code = SEGV_ACCERR;
681 if (cause < 0) {
682 - if (!(vma->vm_flags & VM_EXEC))
683 + if (!(vma->vm_flags & VM_EXEC)) {
684 +
685 +#ifdef CONFIG_PAX_PAGEEXEC
686 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
687 + goto bad_area;
688 +
689 + up_read(&mm->mmap_sem);
690 + switch (pax_handle_fetch_fault(regs)) {
691 +
692 +#ifdef CONFIG_PAX_EMUPLT
693 + case 2:
694 + case 3:
695 + return;
696 +#endif
697 +
698 + }
699 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
700 + do_group_exit(SIGKILL);
701 +#else
702 goto bad_area;
703 +#endif
704 +
705 + }
706 } else if (!cause) {
707 /* Allow reads even for write-only mappings */
708 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
709 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
710 index b68faef..6dd1496 100644
711 --- a/arch/arm/Kconfig
712 +++ b/arch/arm/Kconfig
713 @@ -14,6 +14,7 @@ config ARM
714 select SYS_SUPPORTS_APM_EMULATION
715 select HAVE_OPROFILE
716 select HAVE_ARCH_KGDB
717 + select GENERIC_ATOMIC64
718 select HAVE_KPROBES if (!XIP_KERNEL)
719 select HAVE_KRETPROBES if (HAVE_KPROBES)
720 select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
721 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
722 index d0daeab..ff286a8 100644
723 --- a/arch/arm/include/asm/atomic.h
724 +++ b/arch/arm/include/asm/atomic.h
725 @@ -15,6 +15,10 @@
726 #include <linux/types.h>
727 #include <asm/system.h>
728
729 +#ifdef CONFIG_GENERIC_ATOMIC64
730 +#include <asm-generic/atomic64.h>
731 +#endif
732 +
733 #define ATOMIC_INIT(i) { (i) }
734
735 #ifdef __KERNEL__
736 diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
737 index 66c160b..bca1449 100644
738 --- a/arch/arm/include/asm/cache.h
739 +++ b/arch/arm/include/asm/cache.h
740 @@ -4,8 +4,10 @@
741 #ifndef __ASMARM_CACHE_H
742 #define __ASMARM_CACHE_H
743
744 +#include <linux/const.h>
745 +
746 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
747 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
748 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
749
750 /*
751 * Memory returned by kmalloc() may be used for DMA, so we must make
752 diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
753 index 3d0cdd2..19957c5 100644
754 --- a/arch/arm/include/asm/cacheflush.h
755 +++ b/arch/arm/include/asm/cacheflush.h
756 @@ -216,13 +216,13 @@ struct cpu_cache_fns {
757 void (*dma_inv_range)(const void *, const void *);
758 void (*dma_clean_range)(const void *, const void *);
759 void (*dma_flush_range)(const void *, const void *);
760 -};
761 +} __no_const;
762
763 struct outer_cache_fns {
764 void (*inv_range)(unsigned long, unsigned long);
765 void (*clean_range)(unsigned long, unsigned long);
766 void (*flush_range)(unsigned long, unsigned long);
767 -};
768 +} __no_const;
769
770 /*
771 * Select the calling method
772 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
773 index 6aac3f5..265536b 100644
774 --- a/arch/arm/include/asm/elf.h
775 +++ b/arch/arm/include/asm/elf.h
776 @@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
777 the loader. We need to make sure that it is out of the way of the program
778 that it will "exec", and that there is sufficient room for the brk. */
779
780 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
781 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
782 +
783 +#ifdef CONFIG_PAX_ASLR
784 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
785 +
786 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
787 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
788 +#endif
789
790 /* When the program starts, a1 contains a pointer to a function to be
791 registered with atexit, as per the SVR4 ABI. A value of 0 means we
792 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
793 index c019949..388fdd1 100644
794 --- a/arch/arm/include/asm/kmap_types.h
795 +++ b/arch/arm/include/asm/kmap_types.h
796 @@ -19,6 +19,7 @@ enum km_type {
797 KM_SOFTIRQ0,
798 KM_SOFTIRQ1,
799 KM_L2_CACHE,
800 + KM_CLEARPAGE,
801 KM_TYPE_NR
802 };
803
804 diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
805 index 3a32af4..c8def8a 100644
806 --- a/arch/arm/include/asm/page.h
807 +++ b/arch/arm/include/asm/page.h
808 @@ -122,7 +122,7 @@ struct cpu_user_fns {
809 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
810 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
811 unsigned long vaddr);
812 -};
813 +} __no_const;
814
815 #ifdef MULTI_USER
816 extern struct cpu_user_fns cpu_user;
817 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
818 index 1d6bd40..fba0cb9 100644
819 --- a/arch/arm/include/asm/uaccess.h
820 +++ b/arch/arm/include/asm/uaccess.h
821 @@ -22,6 +22,8 @@
822 #define VERIFY_READ 0
823 #define VERIFY_WRITE 1
824
825 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
826 +
827 /*
828 * The exception table consists of pairs of addresses: the first is the
829 * address of an instruction that is allowed to fault, and the second is
830 @@ -387,8 +389,23 @@ do { \
831
832
833 #ifdef CONFIG_MMU
834 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
835 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
836 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
837 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
838 +
839 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
840 +{
841 + if (!__builtin_constant_p(n))
842 + check_object_size(to, n, false);
843 + return ___copy_from_user(to, from, n);
844 +}
845 +
846 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
847 +{
848 + if (!__builtin_constant_p(n))
849 + check_object_size(from, n, true);
850 + return ___copy_to_user(to, from, n);
851 +}
852 +
853 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
854 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
855 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
856 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
857
858 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
859 {
860 + if ((long)n < 0)
861 + return n;
862 +
863 if (access_ok(VERIFY_READ, from, n))
864 n = __copy_from_user(to, from, n);
865 else /* security hole - plug it */
866 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
867
868 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
869 {
870 + if ((long)n < 0)
871 + return n;
872 +
873 if (access_ok(VERIFY_WRITE, to, n))
874 n = __copy_to_user(to, from, n);
875 return n;
876 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
877 index 0e62770..e2c2cd6 100644
878 --- a/arch/arm/kernel/armksyms.c
879 +++ b/arch/arm/kernel/armksyms.c
880 @@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
881 #ifdef CONFIG_MMU
882 EXPORT_SYMBOL(copy_page);
883
884 -EXPORT_SYMBOL(__copy_from_user);
885 -EXPORT_SYMBOL(__copy_to_user);
886 +EXPORT_SYMBOL(___copy_from_user);
887 +EXPORT_SYMBOL(___copy_to_user);
888 EXPORT_SYMBOL(__clear_user);
889
890 EXPORT_SYMBOL(__get_user_1);
891 diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
892 index ba8ccfe..2dc34dc 100644
893 --- a/arch/arm/kernel/kgdb.c
894 +++ b/arch/arm/kernel/kgdb.c
895 @@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
896 * and we handle the normal undef case within the do_undefinstr
897 * handler.
898 */
899 -struct kgdb_arch arch_kgdb_ops = {
900 +const struct kgdb_arch arch_kgdb_ops = {
901 #ifndef __ARMEB__
902 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
903 #else /* ! __ARMEB__ */
904 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
905 index c6c57b6..bedcd3c 100644
906 --- a/arch/arm/kernel/setup.c
907 +++ b/arch/arm/kernel/setup.c
908 @@ -95,13 +95,13 @@ struct processor processor;
909 struct cpu_tlb_fns cpu_tlb;
910 #endif
911 #ifdef MULTI_USER
912 -struct cpu_user_fns cpu_user;
913 +struct cpu_user_fns cpu_user __read_only;
914 #endif
915 #ifdef MULTI_CACHE
916 -struct cpu_cache_fns cpu_cache;
917 +struct cpu_cache_fns cpu_cache __read_only;
918 #endif
919 #ifdef CONFIG_OUTER_CACHE
920 -struct outer_cache_fns outer_cache;
921 +struct outer_cache_fns outer_cache __read_only;
922 #endif
923
924 struct stack {
925 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
926 index 3f361a7..6e806e1 100644
927 --- a/arch/arm/kernel/traps.c
928 +++ b/arch/arm/kernel/traps.c
929 @@ -247,6 +247,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
930
931 DEFINE_SPINLOCK(die_lock);
932
933 +extern void gr_handle_kernel_exploit(void);
934 +
935 /*
936 * This function is protected against re-entrancy.
937 */
938 @@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
939 if (panic_on_oops)
940 panic("Fatal exception");
941
942 + gr_handle_kernel_exploit();
943 +
944 do_exit(SIGSEGV);
945 }
946
947 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
948 index e4fe124..0fc246b 100644
949 --- a/arch/arm/lib/copy_from_user.S
950 +++ b/arch/arm/lib/copy_from_user.S
951 @@ -16,7 +16,7 @@
952 /*
953 * Prototype:
954 *
955 - * size_t __copy_from_user(void *to, const void *from, size_t n)
956 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
957 *
958 * Purpose:
959 *
960 @@ -84,11 +84,11 @@
961
962 .text
963
964 -ENTRY(__copy_from_user)
965 +ENTRY(___copy_from_user)
966
967 #include "copy_template.S"
968
969 -ENDPROC(__copy_from_user)
970 +ENDPROC(___copy_from_user)
971
972 .section .fixup,"ax"
973 .align 0
974 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
975 index 1a71e15..ac7b258 100644
976 --- a/arch/arm/lib/copy_to_user.S
977 +++ b/arch/arm/lib/copy_to_user.S
978 @@ -16,7 +16,7 @@
979 /*
980 * Prototype:
981 *
982 - * size_t __copy_to_user(void *to, const void *from, size_t n)
983 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
984 *
985 * Purpose:
986 *
987 @@ -88,11 +88,11 @@
988 .text
989
990 ENTRY(__copy_to_user_std)
991 -WEAK(__copy_to_user)
992 +WEAK(___copy_to_user)
993
994 #include "copy_template.S"
995
996 -ENDPROC(__copy_to_user)
997 +ENDPROC(___copy_to_user)
998
999 .section .fixup,"ax"
1000 .align 0
1001 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
1002 index ffdd274..91017b6 100644
1003 --- a/arch/arm/lib/uaccess.S
1004 +++ b/arch/arm/lib/uaccess.S
1005 @@ -19,7 +19,7 @@
1006
1007 #define PAGE_SHIFT 12
1008
1009 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
1010 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
1011 * Purpose : copy a block to user memory from kernel memory
1012 * Params : to - user memory
1013 * : from - kernel memory
1014 @@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
1015 sub r2, r2, ip
1016 b .Lc2u_dest_aligned
1017
1018 -ENTRY(__copy_to_user)
1019 +ENTRY(___copy_to_user)
1020 stmfd sp!, {r2, r4 - r7, lr}
1021 cmp r2, #4
1022 blt .Lc2u_not_enough
1023 @@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fault
1024 ldrgtb r3, [r1], #0
1025 USER( strgtbt r3, [r0], #1) @ May fault
1026 b .Lc2u_finished
1027 -ENDPROC(__copy_to_user)
1028 +ENDPROC(___copy_to_user)
1029
1030 .section .fixup,"ax"
1031 .align 0
1032 9001: ldmfd sp!, {r0, r4 - r7, pc}
1033 .previous
1034
1035 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
1036 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
1037 * Purpose : copy a block from user memory to kernel memory
1038 * Params : to - kernel memory
1039 * : from - user memory
1040 @@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
1041 sub r2, r2, ip
1042 b .Lcfu_dest_aligned
1043
1044 -ENTRY(__copy_from_user)
1045 +ENTRY(___copy_from_user)
1046 stmfd sp!, {r0, r2, r4 - r7, lr}
1047 cmp r2, #4
1048 blt .Lcfu_not_enough
1049 @@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fault
1050 USER( ldrgtbt r3, [r1], #1) @ May fault
1051 strgtb r3, [r0], #1
1052 b .Lcfu_finished
1053 -ENDPROC(__copy_from_user)
1054 +ENDPROC(___copy_from_user)
1055
1056 .section .fixup,"ax"
1057 .align 0
1058 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
1059 index 6b967ff..67d5b2b 100644
1060 --- a/arch/arm/lib/uaccess_with_memcpy.c
1061 +++ b/arch/arm/lib/uaccess_with_memcpy.c
1062 @@ -97,7 +97,7 @@ out:
1063 }
1064
1065 unsigned long
1066 -__copy_to_user(void __user *to, const void *from, unsigned long n)
1067 +___copy_to_user(void __user *to, const void *from, unsigned long n)
1068 {
1069 /*
1070 * This test is stubbed out of the main function above to keep
1071 diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
1072 index 4028724..beec230 100644
1073 --- a/arch/arm/mach-at91/pm.c
1074 +++ b/arch/arm/mach-at91/pm.c
1075 @@ -348,7 +348,7 @@ static void at91_pm_end(void)
1076 }
1077
1078
1079 -static struct platform_suspend_ops at91_pm_ops ={
1080 +static const struct platform_suspend_ops at91_pm_ops ={
1081 .valid = at91_pm_valid_state,
1082 .begin = at91_pm_begin,
1083 .enter = at91_pm_enter,
1084 diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
1085 index 5218943..0a34552 100644
1086 --- a/arch/arm/mach-omap1/pm.c
1087 +++ b/arch/arm/mach-omap1/pm.c
1088 @@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq = {
1089
1090
1091
1092 -static struct platform_suspend_ops omap_pm_ops ={
1093 +static const struct platform_suspend_ops omap_pm_ops ={
1094 .prepare = omap_pm_prepare,
1095 .enter = omap_pm_enter,
1096 .finish = omap_pm_finish,
1097 diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
1098 index bff5c4e..d4c649b 100644
1099 --- a/arch/arm/mach-omap2/pm24xx.c
1100 +++ b/arch/arm/mach-omap2/pm24xx.c
1101 @@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
1102 enable_hlt();
1103 }
1104
1105 -static struct platform_suspend_ops omap_pm_ops = {
1106 +static const struct platform_suspend_ops omap_pm_ops = {
1107 .prepare = omap2_pm_prepare,
1108 .enter = omap2_pm_enter,
1109 .finish = omap2_pm_finish,
1110 diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
1111 index 8946319..7d3e661 100644
1112 --- a/arch/arm/mach-omap2/pm34xx.c
1113 +++ b/arch/arm/mach-omap2/pm34xx.c
1114 @@ -401,7 +401,7 @@ static void omap3_pm_end(void)
1115 return;
1116 }
1117
1118 -static struct platform_suspend_ops omap_pm_ops = {
1119 +static const struct platform_suspend_ops omap_pm_ops = {
1120 .begin = omap3_pm_begin,
1121 .end = omap3_pm_end,
1122 .prepare = omap3_pm_prepare,
1123 diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
1124 index b3d8d53..6e68ebc 100644
1125 --- a/arch/arm/mach-pnx4008/pm.c
1126 +++ b/arch/arm/mach-pnx4008/pm.c
1127 @@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_state_t state)
1128 (state == PM_SUSPEND_MEM);
1129 }
1130
1131 -static struct platform_suspend_ops pnx4008_pm_ops = {
1132 +static const struct platform_suspend_ops pnx4008_pm_ops = {
1133 .enter = pnx4008_pm_enter,
1134 .valid = pnx4008_pm_valid,
1135 };
1136 diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
1137 index 7693355..9beb00a 100644
1138 --- a/arch/arm/mach-pxa/pm.c
1139 +++ b/arch/arm/mach-pxa/pm.c
1140 @@ -95,7 +95,7 @@ void pxa_pm_finish(void)
1141 pxa_cpu_pm_fns->finish();
1142 }
1143
1144 -static struct platform_suspend_ops pxa_pm_ops = {
1145 +static const struct platform_suspend_ops pxa_pm_ops = {
1146 .valid = pxa_pm_valid,
1147 .enter = pxa_pm_enter,
1148 .prepare = pxa_pm_prepare,
1149 diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
1150 index 629e05d..06be589 100644
1151 --- a/arch/arm/mach-pxa/sharpsl_pm.c
1152 +++ b/arch/arm/mach-pxa/sharpsl_pm.c
1153 @@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info)
1154 }
1155
1156 #ifdef CONFIG_PM
1157 -static struct platform_suspend_ops sharpsl_pm_ops = {
1158 +static const struct platform_suspend_ops sharpsl_pm_ops = {
1159 .prepare = pxa_pm_prepare,
1160 .finish = pxa_pm_finish,
1161 .enter = corgi_pxa_pm_enter,
1162 diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
1163 index c83fdc8..ab9fc44 100644
1164 --- a/arch/arm/mach-sa1100/pm.c
1165 +++ b/arch/arm/mach-sa1100/pm.c
1166 @@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
1167 return virt_to_phys(sp);
1168 }
1169
1170 -static struct platform_suspend_ops sa11x0_pm_ops = {
1171 +static const struct platform_suspend_ops sa11x0_pm_ops = {
1172 .enter = sa11x0_pm_enter,
1173 .valid = suspend_valid_only_mem,
1174 };
1175 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1176 index 3191cd6..c0739db 100644
1177 --- a/arch/arm/mm/fault.c
1178 +++ b/arch/arm/mm/fault.c
1179 @@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1180 }
1181 #endif
1182
1183 +#ifdef CONFIG_PAX_PAGEEXEC
1184 + if (fsr & FSR_LNX_PF) {
1185 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1186 + do_group_exit(SIGKILL);
1187 + }
1188 +#endif
1189 +
1190 tsk->thread.address = addr;
1191 tsk->thread.error_code = fsr;
1192 tsk->thread.trap_no = 14;
1193 @@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1194 }
1195 #endif /* CONFIG_MMU */
1196
1197 +#ifdef CONFIG_PAX_PAGEEXEC
1198 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1199 +{
1200 + long i;
1201 +
1202 + printk(KERN_ERR "PAX: bytes at PC: ");
1203 + for (i = 0; i < 20; i++) {
1204 + unsigned char c;
1205 + if (get_user(c, (__force unsigned char __user *)pc+i))
1206 + printk(KERN_CONT "?? ");
1207 + else
1208 + printk(KERN_CONT "%02x ", c);
1209 + }
1210 + printk("\n");
1211 +
1212 + printk(KERN_ERR "PAX: bytes at SP-4: ");
1213 + for (i = -1; i < 20; i++) {
1214 + unsigned long c;
1215 + if (get_user(c, (__force unsigned long __user *)sp+i))
1216 + printk(KERN_CONT "???????? ");
1217 + else
1218 + printk(KERN_CONT "%08lx ", c);
1219 + }
1220 + printk("\n");
1221 +}
1222 +#endif
1223 +
1224 /*
1225 * First Level Translation Fault Handler
1226 *
1227 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1228 index f5abc51..7ec524c 100644
1229 --- a/arch/arm/mm/mmap.c
1230 +++ b/arch/arm/mm/mmap.c
1231 @@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1232 if (len > TASK_SIZE)
1233 return -ENOMEM;
1234
1235 +#ifdef CONFIG_PAX_RANDMMAP
1236 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1237 +#endif
1238 +
1239 if (addr) {
1240 if (do_align)
1241 addr = COLOUR_ALIGN(addr, pgoff);
1242 @@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1243 addr = PAGE_ALIGN(addr);
1244
1245 vma = find_vma(mm, addr);
1246 - if (TASK_SIZE - len >= addr &&
1247 - (!vma || addr + len <= vma->vm_start))
1248 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1249 return addr;
1250 }
1251 if (len > mm->cached_hole_size) {
1252 - start_addr = addr = mm->free_area_cache;
1253 + start_addr = addr = mm->free_area_cache;
1254 } else {
1255 - start_addr = addr = TASK_UNMAPPED_BASE;
1256 - mm->cached_hole_size = 0;
1257 + start_addr = addr = mm->mmap_base;
1258 + mm->cached_hole_size = 0;
1259 }
1260
1261 full_search:
1262 @@ -94,14 +97,14 @@ full_search:
1263 * Start a new search - just in case we missed
1264 * some holes.
1265 */
1266 - if (start_addr != TASK_UNMAPPED_BASE) {
1267 - start_addr = addr = TASK_UNMAPPED_BASE;
1268 + if (start_addr != mm->mmap_base) {
1269 + start_addr = addr = mm->mmap_base;
1270 mm->cached_hole_size = 0;
1271 goto full_search;
1272 }
1273 return -ENOMEM;
1274 }
1275 - if (!vma || addr + len <= vma->vm_start) {
1276 + if (check_heap_stack_gap(vma, addr, len)) {
1277 /*
1278 * Remember the place where we stopped the search:
1279 */
1280 diff --git a/arch/arm/plat-s3c/pm.c b/arch/arm/plat-s3c/pm.c
1281 index 8d97db2..b66cfa5 100644
1282 --- a/arch/arm/plat-s3c/pm.c
1283 +++ b/arch/arm/plat-s3c/pm.c
1284 @@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
1285 s3c_pm_check_cleanup();
1286 }
1287
1288 -static struct platform_suspend_ops s3c_pm_ops = {
1289 +static const struct platform_suspend_ops s3c_pm_ops = {
1290 .enter = s3c_pm_enter,
1291 .prepare = s3c_pm_prepare,
1292 .finish = s3c_pm_finish,
1293 diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
1294 index d3cf35a..0ba6053 100644
1295 --- a/arch/avr32/include/asm/cache.h
1296 +++ b/arch/avr32/include/asm/cache.h
1297 @@ -1,8 +1,10 @@
1298 #ifndef __ASM_AVR32_CACHE_H
1299 #define __ASM_AVR32_CACHE_H
1300
1301 +#include <linux/const.h>
1302 +
1303 #define L1_CACHE_SHIFT 5
1304 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1305 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1306
1307 /*
1308 * Memory returned by kmalloc() may be used for DMA, so we must make
1309 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1310 index d5d1d41..856e2ed 100644
1311 --- a/arch/avr32/include/asm/elf.h
1312 +++ b/arch/avr32/include/asm/elf.h
1313 @@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1314 the loader. We need to make sure that it is out of the way of the program
1315 that it will "exec", and that there is sufficient room for the brk. */
1316
1317 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1318 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1319
1320 +#ifdef CONFIG_PAX_ASLR
1321 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1322 +
1323 +#define PAX_DELTA_MMAP_LEN 15
1324 +#define PAX_DELTA_STACK_LEN 15
1325 +#endif
1326
1327 /* This yields a mask that user programs can use to figure out what
1328 instruction set this CPU supports. This could be done in user space,
1329 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1330 index b7f5c68..556135c 100644
1331 --- a/arch/avr32/include/asm/kmap_types.h
1332 +++ b/arch/avr32/include/asm/kmap_types.h
1333 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1334 D(11) KM_IRQ1,
1335 D(12) KM_SOFTIRQ0,
1336 D(13) KM_SOFTIRQ1,
1337 -D(14) KM_TYPE_NR
1338 +D(14) KM_CLEARPAGE,
1339 +D(15) KM_TYPE_NR
1340 };
1341
1342 #undef D
1343 diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
1344 index f021edf..32d680e 100644
1345 --- a/arch/avr32/mach-at32ap/pm.c
1346 +++ b/arch/avr32/mach-at32ap/pm.c
1347 @@ -176,7 +176,7 @@ out:
1348 return 0;
1349 }
1350
1351 -static struct platform_suspend_ops avr32_pm_ops = {
1352 +static const struct platform_suspend_ops avr32_pm_ops = {
1353 .valid = avr32_pm_valid_state,
1354 .enter = avr32_pm_enter,
1355 };
1356 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1357 index b61d86d..e292c7f 100644
1358 --- a/arch/avr32/mm/fault.c
1359 +++ b/arch/avr32/mm/fault.c
1360 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1361
1362 int exception_trace = 1;
1363
1364 +#ifdef CONFIG_PAX_PAGEEXEC
1365 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1366 +{
1367 + unsigned long i;
1368 +
1369 + printk(KERN_ERR "PAX: bytes at PC: ");
1370 + for (i = 0; i < 20; i++) {
1371 + unsigned char c;
1372 + if (get_user(c, (unsigned char *)pc+i))
1373 + printk(KERN_CONT "???????? ");
1374 + else
1375 + printk(KERN_CONT "%02x ", c);
1376 + }
1377 + printk("\n");
1378 +}
1379 +#endif
1380 +
1381 /*
1382 * This routine handles page faults. It determines the address and the
1383 * problem, and then passes it off to one of the appropriate routines.
1384 @@ -157,6 +174,16 @@ bad_area:
1385 up_read(&mm->mmap_sem);
1386
1387 if (user_mode(regs)) {
1388 +
1389 +#ifdef CONFIG_PAX_PAGEEXEC
1390 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1391 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1392 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1393 + do_group_exit(SIGKILL);
1394 + }
1395 + }
1396 +#endif
1397 +
1398 if (exception_trace && printk_ratelimit())
1399 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1400 "sp %08lx ecr %lu\n",
1401 diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
1402 index 93f6c63..d144953 100644
1403 --- a/arch/blackfin/include/asm/cache.h
1404 +++ b/arch/blackfin/include/asm/cache.h
1405 @@ -7,12 +7,14 @@
1406 #ifndef __ARCH_BLACKFIN_CACHE_H
1407 #define __ARCH_BLACKFIN_CACHE_H
1408
1409 +#include <linux/const.h>
1410 +
1411 /*
1412 * Bytes per L1 cache line
1413 * Blackfin loads 32 bytes for cache
1414 */
1415 #define L1_CACHE_SHIFT 5
1416 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1417 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1418 #define SMP_CACHE_BYTES L1_CACHE_BYTES
1419
1420 #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
1421 diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
1422 index cce79d0..c406c85 100644
1423 --- a/arch/blackfin/kernel/kgdb.c
1424 +++ b/arch/blackfin/kernel/kgdb.c
1425 @@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vector, int signo,
1426 return -1; /* this means that we do not want to exit from the handler */
1427 }
1428
1429 -struct kgdb_arch arch_kgdb_ops = {
1430 +const struct kgdb_arch arch_kgdb_ops = {
1431 .gdb_bpt_instr = {0xa1},
1432 #ifdef CONFIG_SMP
1433 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
1434 diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
1435 index 8837be4..b2fb413 100644
1436 --- a/arch/blackfin/mach-common/pm.c
1437 +++ b/arch/blackfin/mach-common/pm.c
1438 @@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t state)
1439 return 0;
1440 }
1441
1442 -struct platform_suspend_ops bfin_pm_ops = {
1443 +const struct platform_suspend_ops bfin_pm_ops = {
1444 .enter = bfin_pm_enter,
1445 .valid = bfin_pm_valid,
1446 };
1447 diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
1448 index aea2718..3639a60 100644
1449 --- a/arch/cris/include/arch-v10/arch/cache.h
1450 +++ b/arch/cris/include/arch-v10/arch/cache.h
1451 @@ -1,8 +1,9 @@
1452 #ifndef _ASM_ARCH_CACHE_H
1453 #define _ASM_ARCH_CACHE_H
1454
1455 +#include <linux/const.h>
1456 /* Etrax 100LX have 32-byte cache-lines. */
1457 -#define L1_CACHE_BYTES 32
1458 #define L1_CACHE_SHIFT 5
1459 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1460
1461 #endif /* _ASM_ARCH_CACHE_H */
1462 diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
1463 index dfc7305..417f5b3 100644
1464 --- a/arch/cris/include/arch-v32/arch/cache.h
1465 +++ b/arch/cris/include/arch-v32/arch/cache.h
1466 @@ -1,11 +1,12 @@
1467 #ifndef _ASM_CRIS_ARCH_CACHE_H
1468 #define _ASM_CRIS_ARCH_CACHE_H
1469
1470 +#include <linux/const.h>
1471 #include <arch/hwregs/dma.h>
1472
1473 /* A cache-line is 32 bytes. */
1474 -#define L1_CACHE_BYTES 32
1475 #define L1_CACHE_SHIFT 5
1476 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1477
1478 void flush_dma_list(dma_descr_data *descr);
1479 void flush_dma_descr(dma_descr_data *descr, int flush_buf);
1480 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
1481 index 00a57af..c3ef0cd 100644
1482 --- a/arch/frv/include/asm/atomic.h
1483 +++ b/arch/frv/include/asm/atomic.h
1484 @@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
1485 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
1486 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
1487
1488 +#define atomic64_read_unchecked(v) atomic64_read(v)
1489 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1490 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1491 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1492 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1493 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
1494 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1495 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
1496 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1497 +
1498 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
1499 {
1500 int c, old;
1501 diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
1502 index 7dc0f0f..1e6a620 100644
1503 --- a/arch/frv/include/asm/cache.h
1504 +++ b/arch/frv/include/asm/cache.h
1505 @@ -12,10 +12,11 @@
1506 #ifndef __ASM_CACHE_H
1507 #define __ASM_CACHE_H
1508
1509 +#include <linux/const.h>
1510
1511 /* bytes per L1 cache line */
1512 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
1513 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1514 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1515
1516 #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
1517
1518 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1519 index f8e16b2..c73ff79 100644
1520 --- a/arch/frv/include/asm/kmap_types.h
1521 +++ b/arch/frv/include/asm/kmap_types.h
1522 @@ -23,6 +23,7 @@ enum km_type {
1523 KM_IRQ1,
1524 KM_SOFTIRQ0,
1525 KM_SOFTIRQ1,
1526 + KM_CLEARPAGE,
1527 KM_TYPE_NR
1528 };
1529
1530 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1531 index 385fd30..6c3d97e 100644
1532 --- a/arch/frv/mm/elf-fdpic.c
1533 +++ b/arch/frv/mm/elf-fdpic.c
1534 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1535 if (addr) {
1536 addr = PAGE_ALIGN(addr);
1537 vma = find_vma(current->mm, addr);
1538 - if (TASK_SIZE - len >= addr &&
1539 - (!vma || addr + len <= vma->vm_start))
1540 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1541 goto success;
1542 }
1543
1544 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1545 for (; vma; vma = vma->vm_next) {
1546 if (addr > limit)
1547 break;
1548 - if (addr + len <= vma->vm_start)
1549 + if (check_heap_stack_gap(vma, addr, len))
1550 goto success;
1551 addr = vma->vm_end;
1552 }
1553 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1554 for (; vma; vma = vma->vm_next) {
1555 if (addr > limit)
1556 break;
1557 - if (addr + len <= vma->vm_start)
1558 + if (check_heap_stack_gap(vma, addr, len))
1559 goto success;
1560 addr = vma->vm_end;
1561 }
1562 diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
1563 index c635028..6d9445a 100644
1564 --- a/arch/h8300/include/asm/cache.h
1565 +++ b/arch/h8300/include/asm/cache.h
1566 @@ -1,8 +1,10 @@
1567 #ifndef __ARCH_H8300_CACHE_H
1568 #define __ARCH_H8300_CACHE_H
1569
1570 +#include <linux/const.h>
1571 +
1572 /* bytes per L1 cache line */
1573 -#define L1_CACHE_BYTES 4
1574 +#define L1_CACHE_BYTES _AC(4,UL)
1575
1576 /* m68k-elf-gcc 2.95.2 doesn't like these */
1577
1578 diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
1579 index e4a80d8..11a7ea1 100644
1580 --- a/arch/ia64/hp/common/hwsw_iommu.c
1581 +++ b/arch/ia64/hp/common/hwsw_iommu.c
1582 @@ -17,7 +17,7 @@
1583 #include <linux/swiotlb.h>
1584 #include <asm/machvec.h>
1585
1586 -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1587 +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1588
1589 /* swiotlb declarations & definitions: */
1590 extern int swiotlb_late_init_with_default_size (size_t size);
1591 @@ -33,7 +33,7 @@ static inline int use_swiotlb(struct device *dev)
1592 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
1593 }
1594
1595 -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1596 +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1597 {
1598 if (use_swiotlb(dev))
1599 return &swiotlb_dma_ops;
1600 diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
1601 index 01ae69b..35752fd 100644
1602 --- a/arch/ia64/hp/common/sba_iommu.c
1603 +++ b/arch/ia64/hp/common/sba_iommu.c
1604 @@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_driver = {
1605 },
1606 };
1607
1608 -extern struct dma_map_ops swiotlb_dma_ops;
1609 +extern const struct dma_map_ops swiotlb_dma_ops;
1610
1611 static int __init
1612 sba_init(void)
1613 @@ -2211,7 +2211,7 @@ sba_page_override(char *str)
1614
1615 __setup("sbapagesize=",sba_page_override);
1616
1617 -struct dma_map_ops sba_dma_ops = {
1618 +const struct dma_map_ops sba_dma_ops = {
1619 .alloc_coherent = sba_alloc_coherent,
1620 .free_coherent = sba_free_coherent,
1621 .map_page = sba_map_page,
1622 diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
1623 index c69552b..c7122f4 100644
1624 --- a/arch/ia64/ia32/binfmt_elf32.c
1625 +++ b/arch/ia64/ia32/binfmt_elf32.c
1626 @@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_top);
1627
1628 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
1629
1630 +#ifdef CONFIG_PAX_ASLR
1631 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1632 +
1633 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1634 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1635 +#endif
1636 +
1637 /* Ugly but avoids duplication */
1638 #include "../../../fs/binfmt_elf.c"
1639
1640 diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
1641 index 0f15349..26b3429 100644
1642 --- a/arch/ia64/ia32/ia32priv.h
1643 +++ b/arch/ia64/ia32/ia32priv.h
1644 @@ -296,7 +296,14 @@ typedef struct compat_siginfo {
1645 #define ELF_DATA ELFDATA2LSB
1646 #define ELF_ARCH EM_386
1647
1648 -#define IA32_STACK_TOP IA32_PAGE_OFFSET
1649 +#ifdef CONFIG_PAX_RANDUSTACK
1650 +#define __IA32_DELTA_STACK (current->mm->delta_stack)
1651 +#else
1652 +#define __IA32_DELTA_STACK 0UL
1653 +#endif
1654 +
1655 +#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
1656 +
1657 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
1658 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
1659
1660 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
1661 index 88405cb..de5ca5d 100644
1662 --- a/arch/ia64/include/asm/atomic.h
1663 +++ b/arch/ia64/include/asm/atomic.h
1664 @@ -210,6 +210,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
1665 #define atomic64_inc(v) atomic64_add(1, (v))
1666 #define atomic64_dec(v) atomic64_sub(1, (v))
1667
1668 +#define atomic64_read_unchecked(v) atomic64_read(v)
1669 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1670 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1671 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1672 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1673 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
1674 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1675 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
1676 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1677 +
1678 /* Atomic operations are already serializing */
1679 #define smp_mb__before_atomic_dec() barrier()
1680 #define smp_mb__after_atomic_dec() barrier()
1681 diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
1682 index e7482bd..d1c9b8e 100644
1683 --- a/arch/ia64/include/asm/cache.h
1684 +++ b/arch/ia64/include/asm/cache.h
1685 @@ -1,6 +1,7 @@
1686 #ifndef _ASM_IA64_CACHE_H
1687 #define _ASM_IA64_CACHE_H
1688
1689 +#include <linux/const.h>
1690
1691 /*
1692 * Copyright (C) 1998-2000 Hewlett-Packard Co
1693 @@ -9,7 +10,7 @@
1694
1695 /* Bytes per L1 (data) cache line. */
1696 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
1697 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1698 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1699
1700 #ifdef CONFIG_SMP
1701 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
1702 diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
1703 index 8d3c79c..71b3af6 100644
1704 --- a/arch/ia64/include/asm/dma-mapping.h
1705 +++ b/arch/ia64/include/asm/dma-mapping.h
1706 @@ -12,7 +12,7 @@
1707
1708 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
1709
1710 -extern struct dma_map_ops *dma_ops;
1711 +extern const struct dma_map_ops *dma_ops;
1712 extern struct ia64_machine_vector ia64_mv;
1713 extern void set_iommu_machvec(void);
1714
1715 @@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
1716 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1717 dma_addr_t *daddr, gfp_t gfp)
1718 {
1719 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1720 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1721 void *caddr;
1722
1723 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
1724 @@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1725 static inline void dma_free_coherent(struct device *dev, size_t size,
1726 void *caddr, dma_addr_t daddr)
1727 {
1728 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1729 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1730 debug_dma_free_coherent(dev, size, caddr, daddr);
1731 ops->free_coherent(dev, size, caddr, daddr);
1732 }
1733 @@ -49,13 +49,13 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
1734
1735 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
1736 {
1737 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1738 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1739 return ops->mapping_error(dev, daddr);
1740 }
1741
1742 static inline int dma_supported(struct device *dev, u64 mask)
1743 {
1744 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1745 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1746 return ops->dma_supported(dev, mask);
1747 }
1748
1749 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1750 index 86eddee..b116bb4 100644
1751 --- a/arch/ia64/include/asm/elf.h
1752 +++ b/arch/ia64/include/asm/elf.h
1753 @@ -43,6 +43,13 @@
1754 */
1755 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1756
1757 +#ifdef CONFIG_PAX_ASLR
1758 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1759 +
1760 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1761 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1762 +#endif
1763 +
1764 #define PT_IA_64_UNWIND 0x70000001
1765
1766 /* IA-64 relocations: */
1767 diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
1768 index 367d299..9ad4279 100644
1769 --- a/arch/ia64/include/asm/machvec.h
1770 +++ b/arch/ia64/include/asm/machvec.h
1771 @@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event_t(void);
1772 /* DMA-mapping interface: */
1773 typedef void ia64_mv_dma_init (void);
1774 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1775 -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1776 +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1777
1778 /*
1779 * WARNING: The legacy I/O space is _architected_. Platforms are
1780 @@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(const char *cmdline);
1781 # endif /* CONFIG_IA64_GENERIC */
1782
1783 extern void swiotlb_dma_init(void);
1784 -extern struct dma_map_ops *dma_get_ops(struct device *);
1785 +extern const struct dma_map_ops *dma_get_ops(struct device *);
1786
1787 /*
1788 * Define default versions so we can extend machvec for new platforms without having
1789 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1790 index 8840a69..cdb63d9 100644
1791 --- a/arch/ia64/include/asm/pgtable.h
1792 +++ b/arch/ia64/include/asm/pgtable.h
1793 @@ -12,7 +12,7 @@
1794 * David Mosberger-Tang <davidm@hpl.hp.com>
1795 */
1796
1797 -
1798 +#include <linux/const.h>
1799 #include <asm/mman.h>
1800 #include <asm/page.h>
1801 #include <asm/processor.h>
1802 @@ -143,6 +143,17 @@
1803 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1804 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1805 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1806 +
1807 +#ifdef CONFIG_PAX_PAGEEXEC
1808 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1809 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1810 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1811 +#else
1812 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1813 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1814 +# define PAGE_COPY_NOEXEC PAGE_COPY
1815 +#endif
1816 +
1817 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1818 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1819 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1820 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1821 index 239ecdc..f94170e 100644
1822 --- a/arch/ia64/include/asm/spinlock.h
1823 +++ b/arch/ia64/include/asm/spinlock.h
1824 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
1825 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1826
1827 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1828 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1829 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1830 }
1831
1832 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1833 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1834 index 449c8c0..432a3d2 100644
1835 --- a/arch/ia64/include/asm/uaccess.h
1836 +++ b/arch/ia64/include/asm/uaccess.h
1837 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1838 const void *__cu_from = (from); \
1839 long __cu_len = (n); \
1840 \
1841 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
1842 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1843 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1844 __cu_len; \
1845 })
1846 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1847 long __cu_len = (n); \
1848 \
1849 __chk_user_ptr(__cu_from); \
1850 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
1851 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1852 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1853 __cu_len; \
1854 })
1855 diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
1856 index f2c1600..969398a 100644
1857 --- a/arch/ia64/kernel/dma-mapping.c
1858 +++ b/arch/ia64/kernel/dma-mapping.c
1859 @@ -3,7 +3,7 @@
1860 /* Set this to 1 if there is a HW IOMMU in the system */
1861 int iommu_detected __read_mostly;
1862
1863 -struct dma_map_ops *dma_ops;
1864 +const struct dma_map_ops *dma_ops;
1865 EXPORT_SYMBOL(dma_ops);
1866
1867 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1868 @@ -16,7 +16,7 @@ static int __init dma_init(void)
1869 }
1870 fs_initcall(dma_init);
1871
1872 -struct dma_map_ops *dma_get_ops(struct device *dev)
1873 +const struct dma_map_ops *dma_get_ops(struct device *dev)
1874 {
1875 return dma_ops;
1876 }
1877 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1878 index 1481b0a..e7d38ff 100644
1879 --- a/arch/ia64/kernel/module.c
1880 +++ b/arch/ia64/kernel/module.c
1881 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1882 void
1883 module_free (struct module *mod, void *module_region)
1884 {
1885 - if (mod && mod->arch.init_unw_table &&
1886 - module_region == mod->module_init) {
1887 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1888 unw_remove_unwind_table(mod->arch.init_unw_table);
1889 mod->arch.init_unw_table = NULL;
1890 }
1891 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1892 }
1893
1894 static inline int
1895 +in_init_rx (const struct module *mod, uint64_t addr)
1896 +{
1897 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1898 +}
1899 +
1900 +static inline int
1901 +in_init_rw (const struct module *mod, uint64_t addr)
1902 +{
1903 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1904 +}
1905 +
1906 +static inline int
1907 in_init (const struct module *mod, uint64_t addr)
1908 {
1909 - return addr - (uint64_t) mod->module_init < mod->init_size;
1910 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1911 +}
1912 +
1913 +static inline int
1914 +in_core_rx (const struct module *mod, uint64_t addr)
1915 +{
1916 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1917 +}
1918 +
1919 +static inline int
1920 +in_core_rw (const struct module *mod, uint64_t addr)
1921 +{
1922 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1923 }
1924
1925 static inline int
1926 in_core (const struct module *mod, uint64_t addr)
1927 {
1928 - return addr - (uint64_t) mod->module_core < mod->core_size;
1929 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1930 }
1931
1932 static inline int
1933 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1934 break;
1935
1936 case RV_BDREL:
1937 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1938 + if (in_init_rx(mod, val))
1939 + val -= (uint64_t) mod->module_init_rx;
1940 + else if (in_init_rw(mod, val))
1941 + val -= (uint64_t) mod->module_init_rw;
1942 + else if (in_core_rx(mod, val))
1943 + val -= (uint64_t) mod->module_core_rx;
1944 + else if (in_core_rw(mod, val))
1945 + val -= (uint64_t) mod->module_core_rw;
1946 break;
1947
1948 case RV_LTV:
1949 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1950 * addresses have been selected...
1951 */
1952 uint64_t gp;
1953 - if (mod->core_size > MAX_LTOFF)
1954 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1955 /*
1956 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1957 * at the end of the module.
1958 */
1959 - gp = mod->core_size - MAX_LTOFF / 2;
1960 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1961 else
1962 - gp = mod->core_size / 2;
1963 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1964 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1965 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1966 mod->arch.gp = gp;
1967 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1968 }
1969 diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
1970 index f6b1ff0..de773fb 100644
1971 --- a/arch/ia64/kernel/pci-dma.c
1972 +++ b/arch/ia64/kernel/pci-dma.c
1973 @@ -43,7 +43,7 @@ struct device fallback_dev = {
1974 .dma_mask = &fallback_dev.coherent_dma_mask,
1975 };
1976
1977 -extern struct dma_map_ops intel_dma_ops;
1978 +extern const struct dma_map_ops intel_dma_ops;
1979
1980 static int __init pci_iommu_init(void)
1981 {
1982 @@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *dev, u64 mask)
1983 }
1984 EXPORT_SYMBOL(iommu_dma_supported);
1985
1986 +extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1987 +extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1988 +extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1989 +extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1990 +extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1991 +extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1992 +extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1993 +
1994 +static const struct dma_map_ops intel_iommu_dma_ops = {
1995 + /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1996 + .alloc_coherent = intel_alloc_coherent,
1997 + .free_coherent = intel_free_coherent,
1998 + .map_sg = intel_map_sg,
1999 + .unmap_sg = intel_unmap_sg,
2000 + .map_page = intel_map_page,
2001 + .unmap_page = intel_unmap_page,
2002 + .mapping_error = intel_mapping_error,
2003 +
2004 + .sync_single_for_cpu = machvec_dma_sync_single,
2005 + .sync_sg_for_cpu = machvec_dma_sync_sg,
2006 + .sync_single_for_device = machvec_dma_sync_single,
2007 + .sync_sg_for_device = machvec_dma_sync_sg,
2008 + .dma_supported = iommu_dma_supported,
2009 +};
2010 +
2011 void __init pci_iommu_alloc(void)
2012 {
2013 - dma_ops = &intel_dma_ops;
2014 -
2015 - dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
2016 - dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
2017 - dma_ops->sync_single_for_device = machvec_dma_sync_single;
2018 - dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
2019 - dma_ops->dma_supported = iommu_dma_supported;
2020 + dma_ops = &intel_iommu_dma_ops;
2021
2022 /*
2023 * The order of these functions is important for
2024 diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
2025 index 285aae8..61dbab6 100644
2026 --- a/arch/ia64/kernel/pci-swiotlb.c
2027 +++ b/arch/ia64/kernel/pci-swiotlb.c
2028 @@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
2029 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
2030 }
2031
2032 -struct dma_map_ops swiotlb_dma_ops = {
2033 +const struct dma_map_ops swiotlb_dma_ops = {
2034 .alloc_coherent = ia64_swiotlb_alloc_coherent,
2035 .free_coherent = swiotlb_free_coherent,
2036 .map_page = swiotlb_map_page,
2037 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2038 index 609d500..7dde2a8 100644
2039 --- a/arch/ia64/kernel/sys_ia64.c
2040 +++ b/arch/ia64/kernel/sys_ia64.c
2041 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2042 if (REGION_NUMBER(addr) == RGN_HPAGE)
2043 addr = 0;
2044 #endif
2045 +
2046 +#ifdef CONFIG_PAX_RANDMMAP
2047 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2048 + addr = mm->free_area_cache;
2049 + else
2050 +#endif
2051 +
2052 if (!addr)
2053 addr = mm->free_area_cache;
2054
2055 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2056 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2057 /* At this point: (!vma || addr < vma->vm_end). */
2058 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2059 - if (start_addr != TASK_UNMAPPED_BASE) {
2060 + if (start_addr != mm->mmap_base) {
2061 /* Start a new search --- just in case we missed some holes. */
2062 - addr = TASK_UNMAPPED_BASE;
2063 + addr = mm->mmap_base;
2064 goto full_search;
2065 }
2066 return -ENOMEM;
2067 }
2068 - if (!vma || addr + len <= vma->vm_start) {
2069 + if (check_heap_stack_gap(vma, addr, len)) {
2070 /* Remember the address where we stopped this search: */
2071 mm->free_area_cache = addr + len;
2072 return addr;
2073 diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
2074 index 8f06035..b3a5818 100644
2075 --- a/arch/ia64/kernel/topology.c
2076 +++ b/arch/ia64/kernel/topology.c
2077 @@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char *
2078 return ret;
2079 }
2080
2081 -static struct sysfs_ops cache_sysfs_ops = {
2082 +static const struct sysfs_ops cache_sysfs_ops = {
2083 .show = cache_show
2084 };
2085
2086 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
2087 index 0a0c77b..8e55a81 100644
2088 --- a/arch/ia64/kernel/vmlinux.lds.S
2089 +++ b/arch/ia64/kernel/vmlinux.lds.S
2090 @@ -190,7 +190,7 @@ SECTIONS
2091 /* Per-cpu data: */
2092 . = ALIGN(PERCPU_PAGE_SIZE);
2093 PERCPU_VADDR(PERCPU_ADDR, :percpu)
2094 - __phys_per_cpu_start = __per_cpu_load;
2095 + __phys_per_cpu_start = per_cpu_load;
2096 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
2097 * into percpu page size
2098 */
2099 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
2100 index 19261a9..1611b7a 100644
2101 --- a/arch/ia64/mm/fault.c
2102 +++ b/arch/ia64/mm/fault.c
2103 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
2104 return pte_present(pte);
2105 }
2106
2107 +#ifdef CONFIG_PAX_PAGEEXEC
2108 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2109 +{
2110 + unsigned long i;
2111 +
2112 + printk(KERN_ERR "PAX: bytes at PC: ");
2113 + for (i = 0; i < 8; i++) {
2114 + unsigned int c;
2115 + if (get_user(c, (unsigned int *)pc+i))
2116 + printk(KERN_CONT "???????? ");
2117 + else
2118 + printk(KERN_CONT "%08x ", c);
2119 + }
2120 + printk("\n");
2121 +}
2122 +#endif
2123 +
2124 void __kprobes
2125 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2126 {
2127 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
2128 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2129 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2130
2131 - if ((vma->vm_flags & mask) != mask)
2132 + if ((vma->vm_flags & mask) != mask) {
2133 +
2134 +#ifdef CONFIG_PAX_PAGEEXEC
2135 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2136 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2137 + goto bad_area;
2138 +
2139 + up_read(&mm->mmap_sem);
2140 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2141 + do_group_exit(SIGKILL);
2142 + }
2143 +#endif
2144 +
2145 goto bad_area;
2146
2147 + }
2148 +
2149 survive:
2150 /*
2151 * If for any reason at all we couldn't handle the fault, make
2152 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2153 index b0f6157..a082bbc 100644
2154 --- a/arch/ia64/mm/hugetlbpage.c
2155 +++ b/arch/ia64/mm/hugetlbpage.c
2156 @@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
2157 /* At this point: (!vmm || addr < vmm->vm_end). */
2158 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2159 return -ENOMEM;
2160 - if (!vmm || (addr + len) <= vmm->vm_start)
2161 + if (check_heap_stack_gap(vmm, addr, len))
2162 return addr;
2163 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2164 }
2165 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
2166 index 1857766..05cc6a3 100644
2167 --- a/arch/ia64/mm/init.c
2168 +++ b/arch/ia64/mm/init.c
2169 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
2170 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2171 vma->vm_end = vma->vm_start + PAGE_SIZE;
2172 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2173 +
2174 +#ifdef CONFIG_PAX_PAGEEXEC
2175 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2176 + vma->vm_flags &= ~VM_EXEC;
2177 +
2178 +#ifdef CONFIG_PAX_MPROTECT
2179 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
2180 + vma->vm_flags &= ~VM_MAYEXEC;
2181 +#endif
2182 +
2183 + }
2184 +#endif
2185 +
2186 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2187 down_write(&current->mm->mmap_sem);
2188 if (insert_vm_struct(current->mm, vma)) {
2189 diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
2190 index 98b6849..8046766 100644
2191 --- a/arch/ia64/sn/pci/pci_dma.c
2192 +++ b/arch/ia64/sn/pci/pci_dma.c
2193 @@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
2194 return ret;
2195 }
2196
2197 -static struct dma_map_ops sn_dma_ops = {
2198 +static const struct dma_map_ops sn_dma_ops = {
2199 .alloc_coherent = sn_dma_alloc_coherent,
2200 .free_coherent = sn_dma_free_coherent,
2201 .map_page = sn_dma_map_page,
2202 diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2203 index 40b3ee9..8c2c112 100644
2204 --- a/arch/m32r/include/asm/cache.h
2205 +++ b/arch/m32r/include/asm/cache.h
2206 @@ -1,8 +1,10 @@
2207 #ifndef _ASM_M32R_CACHE_H
2208 #define _ASM_M32R_CACHE_H
2209
2210 +#include <linux/const.h>
2211 +
2212 /* L1 cache line size */
2213 #define L1_CACHE_SHIFT 4
2214 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2215 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2216
2217 #endif /* _ASM_M32R_CACHE_H */
2218 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2219 index 82abd15..d95ae5d 100644
2220 --- a/arch/m32r/lib/usercopy.c
2221 +++ b/arch/m32r/lib/usercopy.c
2222 @@ -14,6 +14,9 @@
2223 unsigned long
2224 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2225 {
2226 + if ((long)n < 0)
2227 + return n;
2228 +
2229 prefetch(from);
2230 if (access_ok(VERIFY_WRITE, to, n))
2231 __copy_user(to,from,n);
2232 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2233 unsigned long
2234 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2235 {
2236 + if ((long)n < 0)
2237 + return n;
2238 +
2239 prefetchw(to);
2240 if (access_ok(VERIFY_READ, from, n))
2241 __copy_user_zeroing(to,from,n);
2242 diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2243 index ecafbe1..432c3e4 100644
2244 --- a/arch/m68k/include/asm/cache.h
2245 +++ b/arch/m68k/include/asm/cache.h
2246 @@ -4,9 +4,11 @@
2247 #ifndef __ARCH_M68K_CACHE_H
2248 #define __ARCH_M68K_CACHE_H
2249
2250 +#include <linux/const.h>
2251 +
2252 /* bytes per L1 cache line */
2253 #define L1_CACHE_SHIFT 4
2254 -#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2255 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2256
2257 #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
2258
2259 diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2260 index c209c47..2ba96e2 100644
2261 --- a/arch/microblaze/include/asm/cache.h
2262 +++ b/arch/microblaze/include/asm/cache.h
2263 @@ -13,11 +13,12 @@
2264 #ifndef _ASM_MICROBLAZE_CACHE_H
2265 #define _ASM_MICROBLAZE_CACHE_H
2266
2267 +#include <linux/const.h>
2268 #include <asm/registers.h>
2269
2270 #define L1_CACHE_SHIFT 2
2271 /* word-granular cache in microblaze */
2272 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2273 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2274
2275 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2276
2277 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
2278 index fd7620f..63d73a6 100644
2279 --- a/arch/mips/Kconfig
2280 +++ b/arch/mips/Kconfig
2281 @@ -5,6 +5,7 @@ config MIPS
2282 select HAVE_IDE
2283 select HAVE_OPROFILE
2284 select HAVE_ARCH_KGDB
2285 + select GENERIC_ATOMIC64 if !64BIT
2286 # Horrible source of confusion. Die, die, die ...
2287 select EMBEDDED
2288 select RTC_LIB if !LEMOTE_FULOONG2E
2289 diff --git a/arch/mips/Makefile b/arch/mips/Makefile
2290 index 77f5021..2b1db8a 100644
2291 --- a/arch/mips/Makefile
2292 +++ b/arch/mips/Makefile
2293 @@ -51,6 +51,8 @@ endif
2294 cflags-y := -ffunction-sections
2295 cflags-y += $(call cc-option, -mno-check-zero-division)
2296
2297 +cflags-y += -Wno-sign-compare -Wno-extra
2298 +
2299 ifdef CONFIG_32BIT
2300 ld-emul = $(32bit-emul)
2301 vmlinux-32 = vmlinux
2302 diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
2303 index 632f986..fd0378d 100644
2304 --- a/arch/mips/alchemy/devboards/pm.c
2305 +++ b/arch/mips/alchemy/devboards/pm.c
2306 @@ -78,7 +78,7 @@ static void db1x_pm_end(void)
2307
2308 }
2309
2310 -static struct platform_suspend_ops db1x_pm_ops = {
2311 +static const struct platform_suspend_ops db1x_pm_ops = {
2312 .valid = suspend_valid_only_mem,
2313 .begin = db1x_pm_begin,
2314 .enter = db1x_pm_enter,
2315 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2316 index 09e7128..111035b 100644
2317 --- a/arch/mips/include/asm/atomic.h
2318 +++ b/arch/mips/include/asm/atomic.h
2319 @@ -21,6 +21,10 @@
2320 #include <asm/war.h>
2321 #include <asm/system.h>
2322
2323 +#ifdef CONFIG_GENERIC_ATOMIC64
2324 +#include <asm-generic/atomic64.h>
2325 +#endif
2326 +
2327 #define ATOMIC_INIT(i) { (i) }
2328
2329 /*
2330 @@ -782,6 +786,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2331 */
2332 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2333
2334 +#define atomic64_read_unchecked(v) atomic64_read(v)
2335 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2336 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2337 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2338 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2339 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2340 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2341 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2342 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2343 +
2344 #endif /* CONFIG_64BIT */
2345
2346 /*
2347 diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2348 index 37f175c..c7a3065 100644
2349 --- a/arch/mips/include/asm/cache.h
2350 +++ b/arch/mips/include/asm/cache.h
2351 @@ -9,10 +9,11 @@
2352 #ifndef _ASM_CACHE_H
2353 #define _ASM_CACHE_H
2354
2355 +#include <linux/const.h>
2356 #include <kmalloc.h>
2357
2358 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2359 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2360 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2361
2362 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2363 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2364 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2365 index 7990694..4e93acf 100644
2366 --- a/arch/mips/include/asm/elf.h
2367 +++ b/arch/mips/include/asm/elf.h
2368 @@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
2369 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2370 #endif
2371
2372 +#ifdef CONFIG_PAX_ASLR
2373 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2374 +
2375 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2376 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2377 +#endif
2378 +
2379 #endif /* _ASM_ELF_H */
2380 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2381 index f266295..627cfff 100644
2382 --- a/arch/mips/include/asm/page.h
2383 +++ b/arch/mips/include/asm/page.h
2384 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2385 #ifdef CONFIG_CPU_MIPS32
2386 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2387 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2388 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2389 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2390 #else
2391 typedef struct { unsigned long long pte; } pte_t;
2392 #define pte_val(x) ((x).pte)
2393 diff --git a/arch/mips/include/asm/reboot.h b/arch/mips/include/asm/reboot.h
2394 index e48c0bf..f3acf65 100644
2395 --- a/arch/mips/include/asm/reboot.h
2396 +++ b/arch/mips/include/asm/reboot.h
2397 @@ -9,7 +9,7 @@
2398 #ifndef _ASM_REBOOT_H
2399 #define _ASM_REBOOT_H
2400
2401 -extern void (*_machine_restart)(char *command);
2402 -extern void (*_machine_halt)(void);
2403 +extern void (*__noreturn _machine_restart)(char *command);
2404 +extern void (*__noreturn _machine_halt)(void);
2405
2406 #endif /* _ASM_REBOOT_H */
2407 diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
2408 index 83b5509..9fa24a23 100644
2409 --- a/arch/mips/include/asm/system.h
2410 +++ b/arch/mips/include/asm/system.h
2411 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
2412 */
2413 #define __ARCH_WANT_UNLOCKED_CTXSW
2414
2415 -extern unsigned long arch_align_stack(unsigned long sp);
2416 +#define arch_align_stack(x) ((x) & ~0xfUL)
2417
2418 #endif /* _ASM_SYSTEM_H */
2419 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2420 index 9fdd8bc..fcf9d68 100644
2421 --- a/arch/mips/kernel/binfmt_elfn32.c
2422 +++ b/arch/mips/kernel/binfmt_elfn32.c
2423 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2424 #undef ELF_ET_DYN_BASE
2425 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2426
2427 +#ifdef CONFIG_PAX_ASLR
2428 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2429 +
2430 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2431 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2432 +#endif
2433 +
2434 #include <asm/processor.h>
2435 #include <linux/module.h>
2436 #include <linux/elfcore.h>
2437 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2438 index ff44823..cf0b48a 100644
2439 --- a/arch/mips/kernel/binfmt_elfo32.c
2440 +++ b/arch/mips/kernel/binfmt_elfo32.c
2441 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2442 #undef ELF_ET_DYN_BASE
2443 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2444
2445 +#ifdef CONFIG_PAX_ASLR
2446 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2447 +
2448 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2449 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2450 +#endif
2451 +
2452 #include <asm/processor.h>
2453
2454 /*
2455 diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
2456 index 50c9bb8..efdd5f8 100644
2457 --- a/arch/mips/kernel/kgdb.c
2458 +++ b/arch/mips/kernel/kgdb.c
2459 @@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
2460 return -1;
2461 }
2462
2463 +/* cannot be const */
2464 struct kgdb_arch arch_kgdb_ops;
2465
2466 /*
2467 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2468 index f3d73e1..bb3f57a 100644
2469 --- a/arch/mips/kernel/process.c
2470 +++ b/arch/mips/kernel/process.c
2471 @@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_struct *task)
2472 out:
2473 return pc;
2474 }
2475 -
2476 -/*
2477 - * Don't forget that the stack pointer must be aligned on a 8 bytes
2478 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2479 - */
2480 -unsigned long arch_align_stack(unsigned long sp)
2481 -{
2482 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2483 - sp -= get_random_int() & ~PAGE_MASK;
2484 -
2485 - return sp & ALMASK;
2486 -}
2487 diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
2488 index 060563a..7fbf310 100644
2489 --- a/arch/mips/kernel/reset.c
2490 +++ b/arch/mips/kernel/reset.c
2491 @@ -19,8 +19,8 @@
2492 * So handle all using function pointers to machine specific
2493 * functions.
2494 */
2495 -void (*_machine_restart)(char *command);
2496 -void (*_machine_halt)(void);
2497 +void (*__noreturn _machine_restart)(char *command);
2498 +void (*__noreturn _machine_halt)(void);
2499 void (*pm_power_off)(void);
2500
2501 EXPORT_SYMBOL(pm_power_off);
2502 @@ -29,16 +29,19 @@ void machine_restart(char *command)
2503 {
2504 if (_machine_restart)
2505 _machine_restart(command);
2506 + BUG();
2507 }
2508
2509 void machine_halt(void)
2510 {
2511 if (_machine_halt)
2512 _machine_halt();
2513 + BUG();
2514 }
2515
2516 void machine_power_off(void)
2517 {
2518 if (pm_power_off)
2519 pm_power_off();
2520 + BUG();
2521 }
2522 diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
2523 index 3f7f466..3abe0b5 100644
2524 --- a/arch/mips/kernel/syscall.c
2525 +++ b/arch/mips/kernel/syscall.c
2526 @@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2527 do_color_align = 0;
2528 if (filp || (flags & MAP_SHARED))
2529 do_color_align = 1;
2530 +
2531 +#ifdef CONFIG_PAX_RANDMMAP
2532 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2533 +#endif
2534 +
2535 if (addr) {
2536 if (do_color_align)
2537 addr = COLOUR_ALIGN(addr, pgoff);
2538 else
2539 addr = PAGE_ALIGN(addr);
2540 vmm = find_vma(current->mm, addr);
2541 - if (task_size - len >= addr &&
2542 - (!vmm || addr + len <= vmm->vm_start))
2543 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
2544 return addr;
2545 }
2546 - addr = TASK_UNMAPPED_BASE;
2547 + addr = current->mm->mmap_base;
2548 if (do_color_align)
2549 addr = COLOUR_ALIGN(addr, pgoff);
2550 else
2551 @@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2552 /* At this point: (!vmm || addr < vmm->vm_end). */
2553 if (task_size - len < addr)
2554 return -ENOMEM;
2555 - if (!vmm || addr + len <= vmm->vm_start)
2556 + if (check_heap_stack_gap(vmm, addr, len))
2557 return addr;
2558 addr = vmm->vm_end;
2559 if (do_color_align)
2560 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2561 index e97a7a2..f18f5b0 100644
2562 --- a/arch/mips/mm/fault.c
2563 +++ b/arch/mips/mm/fault.c
2564 @@ -26,6 +26,23 @@
2565 #include <asm/ptrace.h>
2566 #include <asm/highmem.h> /* For VMALLOC_END */
2567
2568 +#ifdef CONFIG_PAX_PAGEEXEC
2569 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2570 +{
2571 + unsigned long i;
2572 +
2573 + printk(KERN_ERR "PAX: bytes at PC: ");
2574 + for (i = 0; i < 5; i++) {
2575 + unsigned int c;
2576 + if (get_user(c, (unsigned int *)pc+i))
2577 + printk(KERN_CONT "???????? ");
2578 + else
2579 + printk(KERN_CONT "%08x ", c);
2580 + }
2581 + printk("\n");
2582 +}
2583 +#endif
2584 +
2585 /*
2586 * This routine handles page faults. It determines the address,
2587 * and the problem, and then passes it off to one of the appropriate
2588 diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
2589 index bdc1f9a..e8de5c5 100644
2590 --- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
2591 +++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
2592 @@ -11,12 +11,14 @@
2593 #ifndef _ASM_PROC_CACHE_H
2594 #define _ASM_PROC_CACHE_H
2595
2596 +#include <linux/const.h>
2597 +
2598 /* L1 cache */
2599
2600 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
2601 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
2602 -#define L1_CACHE_BYTES 16 /* bytes per entry */
2603 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
2604 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
2605 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
2606
2607 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
2608 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
2609 index 8bc9e96..26554f8 100644
2610 --- a/arch/parisc/include/asm/atomic.h
2611 +++ b/arch/parisc/include/asm/atomic.h
2612 @@ -336,6 +336,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2613
2614 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
2615
2616 +#define atomic64_read_unchecked(v) atomic64_read(v)
2617 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2618 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2619 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2620 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2621 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2622 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2623 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2624 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2625 +
2626 #else /* CONFIG_64BIT */
2627
2628 #include <asm-generic/atomic64.h>
2629 diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
2630 index 32c2cca..a7b3a64 100644
2631 --- a/arch/parisc/include/asm/cache.h
2632 +++ b/arch/parisc/include/asm/cache.h
2633 @@ -5,6 +5,7 @@
2634 #ifndef __ARCH_PARISC_CACHE_H
2635 #define __ARCH_PARISC_CACHE_H
2636
2637 +#include <linux/const.h>
2638
2639 /*
2640 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
2641 @@ -15,13 +16,13 @@
2642 * just ruin performance.
2643 */
2644 #ifdef CONFIG_PA20
2645 -#define L1_CACHE_BYTES 64
2646 #define L1_CACHE_SHIFT 6
2647 #else
2648 -#define L1_CACHE_BYTES 32
2649 #define L1_CACHE_SHIFT 5
2650 #endif
2651
2652 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2653 +
2654 #ifndef __ASSEMBLY__
2655
2656 #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
2657 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
2658 index 9c802eb..0592e41 100644
2659 --- a/arch/parisc/include/asm/elf.h
2660 +++ b/arch/parisc/include/asm/elf.h
2661 @@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration... */
2662
2663 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
2664
2665 +#ifdef CONFIG_PAX_ASLR
2666 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
2667 +
2668 +#define PAX_DELTA_MMAP_LEN 16
2669 +#define PAX_DELTA_STACK_LEN 16
2670 +#endif
2671 +
2672 /* This yields a mask that user programs can use to figure out what
2673 instruction set this CPU supports. This could be done in user space,
2674 but it's not easy, and we've already done it here. */
2675 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
2676 index a27d2e2..18fd845 100644
2677 --- a/arch/parisc/include/asm/pgtable.h
2678 +++ b/arch/parisc/include/asm/pgtable.h
2679 @@ -207,6 +207,17 @@
2680 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
2681 #define PAGE_COPY PAGE_EXECREAD
2682 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
2683 +
2684 +#ifdef CONFIG_PAX_PAGEEXEC
2685 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
2686 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2687 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2688 +#else
2689 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
2690 +# define PAGE_COPY_NOEXEC PAGE_COPY
2691 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
2692 +#endif
2693 +
2694 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
2695 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
2696 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
2697 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
2698 index 2120746..8d70a5e 100644
2699 --- a/arch/parisc/kernel/module.c
2700 +++ b/arch/parisc/kernel/module.c
2701 @@ -95,16 +95,38 @@
2702
2703 /* three functions to determine where in the module core
2704 * or init pieces the location is */
2705 +static inline int in_init_rx(struct module *me, void *loc)
2706 +{
2707 + return (loc >= me->module_init_rx &&
2708 + loc < (me->module_init_rx + me->init_size_rx));
2709 +}
2710 +
2711 +static inline int in_init_rw(struct module *me, void *loc)
2712 +{
2713 + return (loc >= me->module_init_rw &&
2714 + loc < (me->module_init_rw + me->init_size_rw));
2715 +}
2716 +
2717 static inline int in_init(struct module *me, void *loc)
2718 {
2719 - return (loc >= me->module_init &&
2720 - loc <= (me->module_init + me->init_size));
2721 + return in_init_rx(me, loc) || in_init_rw(me, loc);
2722 +}
2723 +
2724 +static inline int in_core_rx(struct module *me, void *loc)
2725 +{
2726 + return (loc >= me->module_core_rx &&
2727 + loc < (me->module_core_rx + me->core_size_rx));
2728 +}
2729 +
2730 +static inline int in_core_rw(struct module *me, void *loc)
2731 +{
2732 + return (loc >= me->module_core_rw &&
2733 + loc < (me->module_core_rw + me->core_size_rw));
2734 }
2735
2736 static inline int in_core(struct module *me, void *loc)
2737 {
2738 - return (loc >= me->module_core &&
2739 - loc <= (me->module_core + me->core_size));
2740 + return in_core_rx(me, loc) || in_core_rw(me, loc);
2741 }
2742
2743 static inline int in_local(struct module *me, void *loc)
2744 @@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
2745 }
2746
2747 /* align things a bit */
2748 - me->core_size = ALIGN(me->core_size, 16);
2749 - me->arch.got_offset = me->core_size;
2750 - me->core_size += gots * sizeof(struct got_entry);
2751 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
2752 + me->arch.got_offset = me->core_size_rw;
2753 + me->core_size_rw += gots * sizeof(struct got_entry);
2754
2755 - me->core_size = ALIGN(me->core_size, 16);
2756 - me->arch.fdesc_offset = me->core_size;
2757 - me->core_size += fdescs * sizeof(Elf_Fdesc);
2758 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
2759 + me->arch.fdesc_offset = me->core_size_rw;
2760 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
2761
2762 me->arch.got_max = gots;
2763 me->arch.fdesc_max = fdescs;
2764 @@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2765
2766 BUG_ON(value == 0);
2767
2768 - got = me->module_core + me->arch.got_offset;
2769 + got = me->module_core_rw + me->arch.got_offset;
2770 for (i = 0; got[i].addr; i++)
2771 if (got[i].addr == value)
2772 goto out;
2773 @@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2774 #ifdef CONFIG_64BIT
2775 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2776 {
2777 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
2778 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
2779
2780 if (!value) {
2781 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
2782 @@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2783
2784 /* Create new one */
2785 fdesc->addr = value;
2786 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2787 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2788 return (Elf_Addr)fdesc;
2789 }
2790 #endif /* CONFIG_64BIT */
2791 @@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
2792
2793 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
2794 end = table + sechdrs[me->arch.unwind_section].sh_size;
2795 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2796 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2797
2798 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
2799 me->arch.unwind_section, table, end, gp);
2800 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
2801 index 9147391..f3d949a 100644
2802 --- a/arch/parisc/kernel/sys_parisc.c
2803 +++ b/arch/parisc/kernel/sys_parisc.c
2804 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
2805 /* At this point: (!vma || addr < vma->vm_end). */
2806 if (TASK_SIZE - len < addr)
2807 return -ENOMEM;
2808 - if (!vma || addr + len <= vma->vm_start)
2809 + if (check_heap_stack_gap(vma, addr, len))
2810 return addr;
2811 addr = vma->vm_end;
2812 }
2813 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
2814 /* At this point: (!vma || addr < vma->vm_end). */
2815 if (TASK_SIZE - len < addr)
2816 return -ENOMEM;
2817 - if (!vma || addr + len <= vma->vm_start)
2818 + if (check_heap_stack_gap(vma, addr, len))
2819 return addr;
2820 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
2821 if (addr < vma->vm_end) /* handle wraparound */
2822 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2823 if (flags & MAP_FIXED)
2824 return addr;
2825 if (!addr)
2826 - addr = TASK_UNMAPPED_BASE;
2827 + addr = current->mm->mmap_base;
2828
2829 if (filp) {
2830 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
2831 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
2832 index 8b58bf0..7afff03 100644
2833 --- a/arch/parisc/kernel/traps.c
2834 +++ b/arch/parisc/kernel/traps.c
2835 @@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
2836
2837 down_read(&current->mm->mmap_sem);
2838 vma = find_vma(current->mm,regs->iaoq[0]);
2839 - if (vma && (regs->iaoq[0] >= vma->vm_start)
2840 - && (vma->vm_flags & VM_EXEC)) {
2841 -
2842 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
2843 fault_address = regs->iaoq[0];
2844 fault_space = regs->iasq[0];
2845
2846 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
2847 index c6afbfc..c5839f6 100644
2848 --- a/arch/parisc/mm/fault.c
2849 +++ b/arch/parisc/mm/fault.c
2850 @@ -15,6 +15,7 @@
2851 #include <linux/sched.h>
2852 #include <linux/interrupt.h>
2853 #include <linux/module.h>
2854 +#include <linux/unistd.h>
2855
2856 #include <asm/uaccess.h>
2857 #include <asm/traps.h>
2858 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
2859 static unsigned long
2860 parisc_acctyp(unsigned long code, unsigned int inst)
2861 {
2862 - if (code == 6 || code == 16)
2863 + if (code == 6 || code == 7 || code == 16)
2864 return VM_EXEC;
2865
2866 switch (inst & 0xf0000000) {
2867 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
2868 }
2869 #endif
2870
2871 +#ifdef CONFIG_PAX_PAGEEXEC
2872 +/*
2873 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
2874 + *
2875 + * returns 1 when task should be killed
2876 + * 2 when rt_sigreturn trampoline was detected
2877 + * 3 when unpatched PLT trampoline was detected
2878 + */
2879 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2880 +{
2881 +
2882 +#ifdef CONFIG_PAX_EMUPLT
2883 + int err;
2884 +
2885 + do { /* PaX: unpatched PLT emulation */
2886 + unsigned int bl, depwi;
2887 +
2888 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
2889 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
2890 +
2891 + if (err)
2892 + break;
2893 +
2894 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
2895 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
2896 +
2897 + err = get_user(ldw, (unsigned int *)addr);
2898 + err |= get_user(bv, (unsigned int *)(addr+4));
2899 + err |= get_user(ldw2, (unsigned int *)(addr+8));
2900 +
2901 + if (err)
2902 + break;
2903 +
2904 + if (ldw == 0x0E801096U &&
2905 + bv == 0xEAC0C000U &&
2906 + ldw2 == 0x0E881095U)
2907 + {
2908 + unsigned int resolver, map;
2909 +
2910 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
2911 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
2912 + if (err)
2913 + break;
2914 +
2915 + regs->gr[20] = instruction_pointer(regs)+8;
2916 + regs->gr[21] = map;
2917 + regs->gr[22] = resolver;
2918 + regs->iaoq[0] = resolver | 3UL;
2919 + regs->iaoq[1] = regs->iaoq[0] + 4;
2920 + return 3;
2921 + }
2922 + }
2923 + } while (0);
2924 +#endif
2925 +
2926 +#ifdef CONFIG_PAX_EMUTRAMP
2927 +
2928 +#ifndef CONFIG_PAX_EMUSIGRT
2929 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
2930 + return 1;
2931 +#endif
2932 +
2933 + do { /* PaX: rt_sigreturn emulation */
2934 + unsigned int ldi1, ldi2, bel, nop;
2935 +
2936 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2937 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2938 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2939 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2940 +
2941 + if (err)
2942 + break;
2943 +
2944 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2945 + ldi2 == 0x3414015AU &&
2946 + bel == 0xE4008200U &&
2947 + nop == 0x08000240U)
2948 + {
2949 + regs->gr[25] = (ldi1 & 2) >> 1;
2950 + regs->gr[20] = __NR_rt_sigreturn;
2951 + regs->gr[31] = regs->iaoq[1] + 16;
2952 + regs->sr[0] = regs->iasq[1];
2953 + regs->iaoq[0] = 0x100UL;
2954 + regs->iaoq[1] = regs->iaoq[0] + 4;
2955 + regs->iasq[0] = regs->sr[2];
2956 + regs->iasq[1] = regs->sr[2];
2957 + return 2;
2958 + }
2959 + } while (0);
2960 +#endif
2961 +
2962 + return 1;
2963 +}
2964 +
2965 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2966 +{
2967 + unsigned long i;
2968 +
2969 + printk(KERN_ERR "PAX: bytes at PC: ");
2970 + for (i = 0; i < 5; i++) {
2971 + unsigned int c;
2972 + if (get_user(c, (unsigned int *)pc+i))
2973 + printk(KERN_CONT "???????? ");
2974 + else
2975 + printk(KERN_CONT "%08x ", c);
2976 + }
2977 + printk("\n");
2978 +}
2979 +#endif
2980 +
2981 int fixup_exception(struct pt_regs *regs)
2982 {
2983 const struct exception_table_entry *fix;
2984 @@ -192,8 +303,33 @@ good_area:
2985
2986 acc_type = parisc_acctyp(code,regs->iir);
2987
2988 - if ((vma->vm_flags & acc_type) != acc_type)
2989 + if ((vma->vm_flags & acc_type) != acc_type) {
2990 +
2991 +#ifdef CONFIG_PAX_PAGEEXEC
2992 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2993 + (address & ~3UL) == instruction_pointer(regs))
2994 + {
2995 + up_read(&mm->mmap_sem);
2996 + switch (pax_handle_fetch_fault(regs)) {
2997 +
2998 +#ifdef CONFIG_PAX_EMUPLT
2999 + case 3:
3000 + return;
3001 +#endif
3002 +
3003 +#ifdef CONFIG_PAX_EMUTRAMP
3004 + case 2:
3005 + return;
3006 +#endif
3007 +
3008 + }
3009 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3010 + do_group_exit(SIGKILL);
3011 + }
3012 +#endif
3013 +
3014 goto bad_area;
3015 + }
3016
3017 /*
3018 * If for any reason at all we couldn't handle the fault, make
3019 diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
3020 index c107b74..409dc0f 100644
3021 --- a/arch/powerpc/Makefile
3022 +++ b/arch/powerpc/Makefile
3023 @@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
3024 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
3025 CPP = $(CC) -E $(KBUILD_CFLAGS)
3026
3027 +cflags-y += -Wno-sign-compare -Wno-extra
3028 +
3029 CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
3030
3031 ifeq ($(CONFIG_PPC64),y)
3032 diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
3033 index 81de6eb..d5d0e24 100644
3034 --- a/arch/powerpc/include/asm/cache.h
3035 +++ b/arch/powerpc/include/asm/cache.h
3036 @@ -3,6 +3,7 @@
3037
3038 #ifdef __KERNEL__
3039
3040 +#include <linux/const.h>
3041
3042 /* bytes per L1 cache line */
3043 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3044 @@ -18,7 +19,7 @@
3045 #define L1_CACHE_SHIFT 7
3046 #endif
3047
3048 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3049 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3050
3051 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3052
3053 diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
3054 index 6d94d27..50d4cad 100644
3055 --- a/arch/powerpc/include/asm/device.h
3056 +++ b/arch/powerpc/include/asm/device.h
3057 @@ -14,7 +14,7 @@ struct dev_archdata {
3058 struct device_node *of_node;
3059
3060 /* DMA operations on that device */
3061 - struct dma_map_ops *dma_ops;
3062 + const struct dma_map_ops *dma_ops;
3063
3064 /*
3065 * When an iommu is in use, dma_data is used as a ptr to the base of the
3066 diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
3067 index e281dae..2b8a784 100644
3068 --- a/arch/powerpc/include/asm/dma-mapping.h
3069 +++ b/arch/powerpc/include/asm/dma-mapping.h
3070 @@ -69,9 +69,9 @@ static inline unsigned long device_to_mask(struct device *dev)
3071 #ifdef CONFIG_PPC64
3072 extern struct dma_map_ops dma_iommu_ops;
3073 #endif
3074 -extern struct dma_map_ops dma_direct_ops;
3075 +extern const struct dma_map_ops dma_direct_ops;
3076
3077 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3078 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3079 {
3080 /* We don't handle the NULL dev case for ISA for now. We could
3081 * do it via an out of line call but it is not needed for now. The
3082 @@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3083 return dev->archdata.dma_ops;
3084 }
3085
3086 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
3087 +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
3088 {
3089 dev->archdata.dma_ops = ops;
3090 }
3091 @@ -118,7 +118,7 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
3092
3093 static inline int dma_supported(struct device *dev, u64 mask)
3094 {
3095 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
3096 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3097
3098 if (unlikely(dma_ops == NULL))
3099 return 0;
3100 @@ -132,7 +132,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
3101
3102 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
3103 {
3104 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
3105 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3106
3107 if (unlikely(dma_ops == NULL))
3108 return -EIO;
3109 @@ -147,7 +147,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
3110 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3111 dma_addr_t *dma_handle, gfp_t flag)
3112 {
3113 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
3114 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3115 void *cpu_addr;
3116
3117 BUG_ON(!dma_ops);
3118 @@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3119 static inline void dma_free_coherent(struct device *dev, size_t size,
3120 void *cpu_addr, dma_addr_t dma_handle)
3121 {
3122 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
3123 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3124
3125 BUG_ON(!dma_ops);
3126
3127 @@ -173,7 +173,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
3128
3129 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
3130 {
3131 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
3132 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3133
3134 if (dma_ops->mapping_error)
3135 return dma_ops->mapping_error(dev, dma_addr);
3136 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3137 index 5698502..5db093c 100644
3138 --- a/arch/powerpc/include/asm/elf.h
3139 +++ b/arch/powerpc/include/asm/elf.h
3140 @@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
3141 the loader. We need to make sure that it is out of the way of the program
3142 that it will "exec", and that there is sufficient room for the brk. */
3143
3144 -extern unsigned long randomize_et_dyn(unsigned long base);
3145 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3146 +#define ELF_ET_DYN_BASE (0x20000000)
3147 +
3148 +#ifdef CONFIG_PAX_ASLR
3149 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3150 +
3151 +#ifdef __powerpc64__
3152 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
3153 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
3154 +#else
3155 +#define PAX_DELTA_MMAP_LEN 15
3156 +#define PAX_DELTA_STACK_LEN 15
3157 +#endif
3158 +#endif
3159
3160 /*
3161 * Our registers are always unsigned longs, whether we're a 32 bit
3162 @@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3163 (0x7ff >> (PAGE_SHIFT - 12)) : \
3164 (0x3ffff >> (PAGE_SHIFT - 12)))
3165
3166 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3167 -#define arch_randomize_brk arch_randomize_brk
3168 -
3169 #endif /* __KERNEL__ */
3170
3171 /*
3172 diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
3173 index edfc980..1766f59 100644
3174 --- a/arch/powerpc/include/asm/iommu.h
3175 +++ b/arch/powerpc/include/asm/iommu.h
3176 @@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(void);
3177 extern void iommu_init_early_dart(void);
3178 extern void iommu_init_early_pasemi(void);
3179
3180 +/* dma-iommu.c */
3181 +extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
3182 +
3183 #ifdef CONFIG_PCI
3184 extern void pci_iommu_init(void);
3185 extern void pci_direct_iommu_init(void);
3186 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3187 index 9163695..5a00112 100644
3188 --- a/arch/powerpc/include/asm/kmap_types.h
3189 +++ b/arch/powerpc/include/asm/kmap_types.h
3190 @@ -26,6 +26,7 @@ enum km_type {
3191 KM_SOFTIRQ1,
3192 KM_PPC_SYNC_PAGE,
3193 KM_PPC_SYNC_ICACHE,
3194 + KM_CLEARPAGE,
3195 KM_TYPE_NR
3196 };
3197
3198 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
3199 index ff24254..fe45b21 100644
3200 --- a/arch/powerpc/include/asm/page.h
3201 +++ b/arch/powerpc/include/asm/page.h
3202 @@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
3203 * and needs to be executable. This means the whole heap ends
3204 * up being executable.
3205 */
3206 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3207 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3208 +#define VM_DATA_DEFAULT_FLAGS32 \
3209 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3210 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3211
3212 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3213 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3214 @@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
3215 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3216 #endif
3217
3218 +#define ktla_ktva(addr) (addr)
3219 +#define ktva_ktla(addr) (addr)
3220 +
3221 #ifndef __ASSEMBLY__
3222
3223 #undef STRICT_MM_TYPECHECKS
3224 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
3225 index 3f17b83..1f9e766 100644
3226 --- a/arch/powerpc/include/asm/page_64.h
3227 +++ b/arch/powerpc/include/asm/page_64.h
3228 @@ -180,15 +180,18 @@ do { \
3229 * stack by default, so in the absense of a PT_GNU_STACK program header
3230 * we turn execute permission off.
3231 */
3232 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3233 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3234 +#define VM_STACK_DEFAULT_FLAGS32 \
3235 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3236 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3237
3238 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3239 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3240
3241 +#ifndef CONFIG_PAX_PAGEEXEC
3242 #define VM_STACK_DEFAULT_FLAGS \
3243 (test_thread_flag(TIF_32BIT) ? \
3244 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3245 +#endif
3246
3247 #include <asm-generic/getorder.h>
3248
3249 diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
3250 index b5ea626..40308222 100644
3251 --- a/arch/powerpc/include/asm/pci.h
3252 +++ b/arch/powerpc/include/asm/pci.h
3253 @@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
3254 }
3255
3256 #ifdef CONFIG_PCI
3257 -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
3258 -extern struct dma_map_ops *get_pci_dma_ops(void);
3259 +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
3260 +extern const struct dma_map_ops *get_pci_dma_ops(void);
3261 #else /* CONFIG_PCI */
3262 #define set_pci_dma_ops(d)
3263 #define get_pci_dma_ops() NULL
3264 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
3265 index 2a5da06..d65bea2 100644
3266 --- a/arch/powerpc/include/asm/pgtable.h
3267 +++ b/arch/powerpc/include/asm/pgtable.h
3268 @@ -2,6 +2,7 @@
3269 #define _ASM_POWERPC_PGTABLE_H
3270 #ifdef __KERNEL__
3271
3272 +#include <linux/const.h>
3273 #ifndef __ASSEMBLY__
3274 #include <asm/processor.h> /* For TASK_SIZE */
3275 #include <asm/mmu.h>
3276 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3277 index 4aad413..85d86bf 100644
3278 --- a/arch/powerpc/include/asm/pte-hash32.h
3279 +++ b/arch/powerpc/include/asm/pte-hash32.h
3280 @@ -21,6 +21,7 @@
3281 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3282 #define _PAGE_USER 0x004 /* usermode access allowed */
3283 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
3284 +#define _PAGE_EXEC _PAGE_GUARDED
3285 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3286 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3287 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
3288 diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
3289 index 8c34149..78f425a 100644
3290 --- a/arch/powerpc/include/asm/ptrace.h
3291 +++ b/arch/powerpc/include/asm/ptrace.h
3292 @@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
3293 } while(0)
3294
3295 struct task_struct;
3296 -extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
3297 +extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
3298 extern int ptrace_put_reg(struct task_struct *task, int regno,
3299 unsigned long data);
3300
3301 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
3302 index 32a7c30..be3a8bb 100644
3303 --- a/arch/powerpc/include/asm/reg.h
3304 +++ b/arch/powerpc/include/asm/reg.h
3305 @@ -191,6 +191,7 @@
3306 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3307 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3308 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3309 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3310 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3311 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3312 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
3313 diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
3314 index 8979d4c..d2fd0d3 100644
3315 --- a/arch/powerpc/include/asm/swiotlb.h
3316 +++ b/arch/powerpc/include/asm/swiotlb.h
3317 @@ -13,7 +13,7 @@
3318
3319 #include <linux/swiotlb.h>
3320
3321 -extern struct dma_map_ops swiotlb_dma_ops;
3322 +extern const struct dma_map_ops swiotlb_dma_ops;
3323
3324 static inline void dma_mark_clean(void *addr, size_t size) {}
3325
3326 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
3327 index 094a12a..877a60a 100644
3328 --- a/arch/powerpc/include/asm/system.h
3329 +++ b/arch/powerpc/include/asm/system.h
3330 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
3331 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
3332 #endif
3333
3334 -extern unsigned long arch_align_stack(unsigned long sp);
3335 +#define arch_align_stack(x) ((x) & ~0xfUL)
3336
3337 /* Used in very early kernel initialization. */
3338 extern unsigned long reloc_offset(void);
3339 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3340 index bd0fb84..a42a14b 100644
3341 --- a/arch/powerpc/include/asm/uaccess.h
3342 +++ b/arch/powerpc/include/asm/uaccess.h
3343 @@ -13,6 +13,8 @@
3344 #define VERIFY_READ 0
3345 #define VERIFY_WRITE 1
3346
3347 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3348 +
3349 /*
3350 * The fs value determines whether argument validity checking should be
3351 * performed or not. If get_fs() == USER_DS, checking is performed, with
3352 @@ -327,52 +329,6 @@ do { \
3353 extern unsigned long __copy_tofrom_user(void __user *to,
3354 const void __user *from, unsigned long size);
3355
3356 -#ifndef __powerpc64__
3357 -
3358 -static inline unsigned long copy_from_user(void *to,
3359 - const void __user *from, unsigned long n)
3360 -{
3361 - unsigned long over;
3362 -
3363 - if (access_ok(VERIFY_READ, from, n))
3364 - return __copy_tofrom_user((__force void __user *)to, from, n);
3365 - if ((unsigned long)from < TASK_SIZE) {
3366 - over = (unsigned long)from + n - TASK_SIZE;
3367 - return __copy_tofrom_user((__force void __user *)to, from,
3368 - n - over) + over;
3369 - }
3370 - return n;
3371 -}
3372 -
3373 -static inline unsigned long copy_to_user(void __user *to,
3374 - const void *from, unsigned long n)
3375 -{
3376 - unsigned long over;
3377 -
3378 - if (access_ok(VERIFY_WRITE, to, n))
3379 - return __copy_tofrom_user(to, (__force void __user *)from, n);
3380 - if ((unsigned long)to < TASK_SIZE) {
3381 - over = (unsigned long)to + n - TASK_SIZE;
3382 - return __copy_tofrom_user(to, (__force void __user *)from,
3383 - n - over) + over;
3384 - }
3385 - return n;
3386 -}
3387 -
3388 -#else /* __powerpc64__ */
3389 -
3390 -#define __copy_in_user(to, from, size) \
3391 - __copy_tofrom_user((to), (from), (size))
3392 -
3393 -extern unsigned long copy_from_user(void *to, const void __user *from,
3394 - unsigned long n);
3395 -extern unsigned long copy_to_user(void __user *to, const void *from,
3396 - unsigned long n);
3397 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
3398 - unsigned long n);
3399 -
3400 -#endif /* __powerpc64__ */
3401 -
3402 static inline unsigned long __copy_from_user_inatomic(void *to,
3403 const void __user *from, unsigned long n)
3404 {
3405 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
3406 if (ret == 0)
3407 return 0;
3408 }
3409 +
3410 + if (!__builtin_constant_p(n))
3411 + check_object_size(to, n, false);
3412 +
3413 return __copy_tofrom_user((__force void __user *)to, from, n);
3414 }
3415
3416 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
3417 if (ret == 0)
3418 return 0;
3419 }
3420 +
3421 + if (!__builtin_constant_p(n))
3422 + check_object_size(from, n, true);
3423 +
3424 return __copy_tofrom_user(to, (__force const void __user *)from, n);
3425 }
3426
3427 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
3428 return __copy_to_user_inatomic(to, from, size);
3429 }
3430
3431 +#ifndef __powerpc64__
3432 +
3433 +static inline unsigned long __must_check copy_from_user(void *to,
3434 + const void __user *from, unsigned long n)
3435 +{
3436 + unsigned long over;
3437 +
3438 + if ((long)n < 0)
3439 + return n;
3440 +
3441 + if (access_ok(VERIFY_READ, from, n)) {
3442 + if (!__builtin_constant_p(n))
3443 + check_object_size(to, n, false);
3444 + return __copy_tofrom_user((__force void __user *)to, from, n);
3445 + }
3446 + if ((unsigned long)from < TASK_SIZE) {
3447 + over = (unsigned long)from + n - TASK_SIZE;
3448 + if (!__builtin_constant_p(n - over))
3449 + check_object_size(to, n - over, false);
3450 + return __copy_tofrom_user((__force void __user *)to, from,
3451 + n - over) + over;
3452 + }
3453 + return n;
3454 +}
3455 +
3456 +static inline unsigned long __must_check copy_to_user(void __user *to,
3457 + const void *from, unsigned long n)
3458 +{
3459 + unsigned long over;
3460 +
3461 + if ((long)n < 0)
3462 + return n;
3463 +
3464 + if (access_ok(VERIFY_WRITE, to, n)) {
3465 + if (!__builtin_constant_p(n))
3466 + check_object_size(from, n, true);
3467 + return __copy_tofrom_user(to, (__force void __user *)from, n);
3468 + }
3469 + if ((unsigned long)to < TASK_SIZE) {
3470 + over = (unsigned long)to + n - TASK_SIZE;
3471 + if (!__builtin_constant_p(n))
3472 + check_object_size(from, n - over, true);
3473 + return __copy_tofrom_user(to, (__force void __user *)from,
3474 + n - over) + over;
3475 + }
3476 + return n;
3477 +}
3478 +
3479 +#else /* __powerpc64__ */
3480 +
3481 +#define __copy_in_user(to, from, size) \
3482 + __copy_tofrom_user((to), (from), (size))
3483 +
3484 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
3485 +{
3486 + if ((long)n < 0 || n > INT_MAX)
3487 + return n;
3488 +
3489 + if (!__builtin_constant_p(n))
3490 + check_object_size(to, n, false);
3491 +
3492 + if (likely(access_ok(VERIFY_READ, from, n)))
3493 + n = __copy_from_user(to, from, n);
3494 + else
3495 + memset(to, 0, n);
3496 + return n;
3497 +}
3498 +
3499 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
3500 +{
3501 + if ((long)n < 0 || n > INT_MAX)
3502 + return n;
3503 +
3504 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
3505 + if (!__builtin_constant_p(n))
3506 + check_object_size(from, n, true);
3507 + n = __copy_to_user(to, from, n);
3508 + }
3509 + return n;
3510 +}
3511 +
3512 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
3513 + unsigned long n);
3514 +
3515 +#endif /* __powerpc64__ */
3516 +
3517 extern unsigned long __clear_user(void __user *addr, unsigned long size);
3518
3519 static inline unsigned long clear_user(void __user *addr, unsigned long size)
3520 diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
3521 index bb37b1d..01fe9ce 100644
3522 --- a/arch/powerpc/kernel/cacheinfo.c
3523 +++ b/arch/powerpc/kernel/cacheinfo.c
3524 @@ -642,7 +642,7 @@ static struct kobj_attribute *cache_index_opt_attrs[] = {
3525 &cache_assoc_attr,
3526 };
3527
3528 -static struct sysfs_ops cache_index_ops = {
3529 +static const struct sysfs_ops cache_index_ops = {
3530 .show = cache_index_show,
3531 };
3532
3533 diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
3534 index 37771a5..648530c 100644
3535 --- a/arch/powerpc/kernel/dma-iommu.c
3536 +++ b/arch/powerpc/kernel/dma-iommu.c
3537 @@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
3538 }
3539
3540 /* We support DMA to/from any memory page via the iommu */
3541 -static int dma_iommu_dma_supported(struct device *dev, u64 mask)
3542 +int dma_iommu_dma_supported(struct device *dev, u64 mask)
3543 {
3544 struct iommu_table *tbl = get_iommu_table_base(dev);
3545
3546 diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
3547 index e96cbbd..bdd6d41 100644
3548 --- a/arch/powerpc/kernel/dma-swiotlb.c
3549 +++ b/arch/powerpc/kernel/dma-swiotlb.c
3550 @@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
3551 * map_page, and unmap_page on highmem, use normal dma_ops
3552 * for everything else.
3553 */
3554 -struct dma_map_ops swiotlb_dma_ops = {
3555 +const struct dma_map_ops swiotlb_dma_ops = {
3556 .alloc_coherent = dma_direct_alloc_coherent,
3557 .free_coherent = dma_direct_free_coherent,
3558 .map_sg = swiotlb_map_sg_attrs,
3559 diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
3560 index 6215062..ebea59c 100644
3561 --- a/arch/powerpc/kernel/dma.c
3562 +++ b/arch/powerpc/kernel/dma.c
3563 @@ -134,7 +134,7 @@ static inline void dma_direct_sync_single_range(struct device *dev,
3564 }
3565 #endif
3566
3567 -struct dma_map_ops dma_direct_ops = {
3568 +const struct dma_map_ops dma_direct_ops = {
3569 .alloc_coherent = dma_direct_alloc_coherent,
3570 .free_coherent = dma_direct_free_coherent,
3571 .map_sg = dma_direct_map_sg,
3572 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3573 index 24dcc0e..a300455 100644
3574 --- a/arch/powerpc/kernel/exceptions-64e.S
3575 +++ b/arch/powerpc/kernel/exceptions-64e.S
3576 @@ -455,6 +455,7 @@ storage_fault_common:
3577 std r14,_DAR(r1)
3578 std r15,_DSISR(r1)
3579 addi r3,r1,STACK_FRAME_OVERHEAD
3580 + bl .save_nvgprs
3581 mr r4,r14
3582 mr r5,r15
3583 ld r14,PACA_EXGEN+EX_R14(r13)
3584 @@ -464,8 +465,7 @@ storage_fault_common:
3585 cmpdi r3,0
3586 bne- 1f
3587 b .ret_from_except_lite
3588 -1: bl .save_nvgprs
3589 - mr r5,r3
3590 +1: mr r5,r3
3591 addi r3,r1,STACK_FRAME_OVERHEAD
3592 ld r4,_DAR(r1)
3593 bl .bad_page_fault
3594 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3595 index 1808876..9fd206a 100644
3596 --- a/arch/powerpc/kernel/exceptions-64s.S
3597 +++ b/arch/powerpc/kernel/exceptions-64s.S
3598 @@ -818,10 +818,10 @@ handle_page_fault:
3599 11: ld r4,_DAR(r1)
3600 ld r5,_DSISR(r1)
3601 addi r3,r1,STACK_FRAME_OVERHEAD
3602 + bl .save_nvgprs
3603 bl .do_page_fault
3604 cmpdi r3,0
3605 beq+ 13f
3606 - bl .save_nvgprs
3607 mr r5,r3
3608 addi r3,r1,STACK_FRAME_OVERHEAD
3609 lwz r4,_DAR(r1)
3610 diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
3611 index a4c8b38..1b09ad9 100644
3612 --- a/arch/powerpc/kernel/ibmebus.c
3613 +++ b/arch/powerpc/kernel/ibmebus.c
3614 @@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
3615 return 1;
3616 }
3617
3618 -static struct dma_map_ops ibmebus_dma_ops = {
3619 +static const struct dma_map_ops ibmebus_dma_ops = {
3620 .alloc_coherent = ibmebus_alloc_coherent,
3621 .free_coherent = ibmebus_free_coherent,
3622 .map_sg = ibmebus_map_sg,
3623 diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
3624 index 641c74b..8339ad7 100644
3625 --- a/arch/powerpc/kernel/kgdb.c
3626 +++ b/arch/powerpc/kernel/kgdb.c
3627 @@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
3628 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
3629 return 0;
3630
3631 - if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3632 + if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3633 regs->nip += 4;
3634
3635 return 1;
3636 @@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
3637 /*
3638 * Global data
3639 */
3640 -struct kgdb_arch arch_kgdb_ops = {
3641 +const struct kgdb_arch arch_kgdb_ops = {
3642 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
3643 };
3644
3645 diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
3646 index 477c663..4f50234 100644
3647 --- a/arch/powerpc/kernel/module.c
3648 +++ b/arch/powerpc/kernel/module.c
3649 @@ -31,11 +31,24 @@
3650
3651 LIST_HEAD(module_bug_list);
3652
3653 +#ifdef CONFIG_PAX_KERNEXEC
3654 void *module_alloc(unsigned long size)
3655 {
3656 if (size == 0)
3657 return NULL;
3658
3659 + return vmalloc(size);
3660 +}
3661 +
3662 +void *module_alloc_exec(unsigned long size)
3663 +#else
3664 +void *module_alloc(unsigned long size)
3665 +#endif
3666 +
3667 +{
3668 + if (size == 0)
3669 + return NULL;
3670 +
3671 return vmalloc_exec(size);
3672 }
3673
3674 @@ -45,6 +58,13 @@ void module_free(struct module *mod, void *module_region)
3675 vfree(module_region);
3676 }
3677
3678 +#ifdef CONFIG_PAX_KERNEXEC
3679 +void module_free_exec(struct module *mod, void *module_region)
3680 +{
3681 + module_free(mod, module_region);
3682 +}
3683 +#endif
3684 +
3685 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
3686 const Elf_Shdr *sechdrs,
3687 const char *name)
3688 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
3689 index f832773..0507238 100644
3690 --- a/arch/powerpc/kernel/module_32.c
3691 +++ b/arch/powerpc/kernel/module_32.c
3692 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
3693 me->arch.core_plt_section = i;
3694 }
3695 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
3696 - printk("Module doesn't contain .plt or .init.plt sections.\n");
3697 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
3698 return -ENOEXEC;
3699 }
3700
3701 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *location,
3702
3703 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
3704 /* Init, or core PLT? */
3705 - if (location >= mod->module_core
3706 - && location < mod->module_core + mod->core_size)
3707 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
3708 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
3709 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
3710 - else
3711 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
3712 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
3713 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
3714 + else {
3715 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
3716 + return ~0UL;
3717 + }
3718
3719 /* Find this entry, or if that fails, the next avail. entry */
3720 while (entry->jump[0]) {
3721 diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
3722 index cadbed6..b9bbb00 100644
3723 --- a/arch/powerpc/kernel/pci-common.c
3724 +++ b/arch/powerpc/kernel/pci-common.c
3725 @@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
3726 unsigned int ppc_pci_flags = 0;
3727
3728
3729 -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3730 +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3731
3732 -void set_pci_dma_ops(struct dma_map_ops *dma_ops)
3733 +void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
3734 {
3735 pci_dma_ops = dma_ops;
3736 }
3737
3738 -struct dma_map_ops *get_pci_dma_ops(void)
3739 +const struct dma_map_ops *get_pci_dma_ops(void)
3740 {
3741 return pci_dma_ops;
3742 }
3743 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
3744 index 7b816da..8d5c277 100644
3745 --- a/arch/powerpc/kernel/process.c
3746 +++ b/arch/powerpc/kernel/process.c
3747 @@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
3748 * Lookup NIP late so we have the best change of getting the
3749 * above info out without failing
3750 */
3751 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
3752 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
3753 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
3754 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
3755 #endif
3756 show_stack(current, (unsigned long *) regs->gpr[1]);
3757 if (!user_mode(regs))
3758 @@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3759 newsp = stack[0];
3760 ip = stack[STACK_FRAME_LR_SAVE];
3761 if (!firstframe || ip != lr) {
3762 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
3763 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
3764 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3765 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
3766 - printk(" (%pS)",
3767 + printk(" (%pA)",
3768 (void *)current->ret_stack[curr_frame].ret);
3769 curr_frame--;
3770 }
3771 @@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3772 struct pt_regs *regs = (struct pt_regs *)
3773 (sp + STACK_FRAME_OVERHEAD);
3774 lr = regs->link;
3775 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
3776 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
3777 regs->trap, (void *)regs->nip, (void *)lr);
3778 firstframe = 1;
3779 }
3780 @@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
3781 }
3782
3783 #endif /* THREAD_SHIFT < PAGE_SHIFT */
3784 -
3785 -unsigned long arch_align_stack(unsigned long sp)
3786 -{
3787 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3788 - sp -= get_random_int() & ~PAGE_MASK;
3789 - return sp & ~0xf;
3790 -}
3791 -
3792 -static inline unsigned long brk_rnd(void)
3793 -{
3794 - unsigned long rnd = 0;
3795 -
3796 - /* 8MB for 32bit, 1GB for 64bit */
3797 - if (is_32bit_task())
3798 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
3799 - else
3800 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
3801 -
3802 - return rnd << PAGE_SHIFT;
3803 -}
3804 -
3805 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3806 -{
3807 - unsigned long base = mm->brk;
3808 - unsigned long ret;
3809 -
3810 -#ifdef CONFIG_PPC_STD_MMU_64
3811 - /*
3812 - * If we are using 1TB segments and we are allowed to randomise
3813 - * the heap, we can put it above 1TB so it is backed by a 1TB
3814 - * segment. Otherwise the heap will be in the bottom 1TB
3815 - * which always uses 256MB segments and this may result in a
3816 - * performance penalty.
3817 - */
3818 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
3819 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
3820 -#endif
3821 -
3822 - ret = PAGE_ALIGN(base + brk_rnd());
3823 -
3824 - if (ret < mm->brk)
3825 - return mm->brk;
3826 -
3827 - return ret;
3828 -}
3829 -
3830 -unsigned long randomize_et_dyn(unsigned long base)
3831 -{
3832 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3833 -
3834 - if (ret < base)
3835 - return base;
3836 -
3837 - return ret;
3838 -}
3839 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
3840 index ef14988..856c4bc 100644
3841 --- a/arch/powerpc/kernel/ptrace.c
3842 +++ b/arch/powerpc/kernel/ptrace.c
3843 @@ -86,7 +86,7 @@ static int set_user_trap(struct task_struct *task, unsigned long trap)
3844 /*
3845 * Get contents of register REGNO in task TASK.
3846 */
3847 -unsigned long ptrace_get_reg(struct task_struct *task, int regno)
3848 +unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
3849 {
3850 if (task->thread.regs == NULL)
3851 return -EIO;
3852 @@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
3853
3854 CHECK_FULL_REGS(child->thread.regs);
3855 if (index < PT_FPR0) {
3856 - tmp = ptrace_get_reg(child, (int) index);
3857 + tmp = ptrace_get_reg(child, index);
3858 } else {
3859 flush_fp_to_thread(child);
3860 tmp = ((unsigned long *)child->thread.fpr)
3861 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
3862 index d670429..2bc59b2 100644
3863 --- a/arch/powerpc/kernel/signal_32.c
3864 +++ b/arch/powerpc/kernel/signal_32.c
3865 @@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
3866 /* Save user registers on the stack */
3867 frame = &rt_sf->uc.uc_mcontext;
3868 addr = frame;
3869 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
3870 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3871 if (save_user_regs(regs, frame, 0, 1))
3872 goto badframe;
3873 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
3874 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
3875 index 2fe6fc6..ada0d96 100644
3876 --- a/arch/powerpc/kernel/signal_64.c
3877 +++ b/arch/powerpc/kernel/signal_64.c
3878 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
3879 current->thread.fpscr.val = 0;
3880
3881 /* Set up to return from userspace. */
3882 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
3883 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3884 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
3885 } else {
3886 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
3887 diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
3888 index b97c2d6..dd01a6a 100644
3889 --- a/arch/powerpc/kernel/sys_ppc32.c
3890 +++ b/arch/powerpc/kernel/sys_ppc32.c
3891 @@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args)
3892 if (oldlenp) {
3893 if (!error) {
3894 if (get_user(oldlen, oldlenp) ||
3895 - put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
3896 + put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
3897 + copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
3898 error = -EFAULT;
3899 }
3900 - copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
3901 }
3902 return error;
3903 }
3904 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
3905 index 6f0ae1a..e4b6a56 100644
3906 --- a/arch/powerpc/kernel/traps.c
3907 +++ b/arch/powerpc/kernel/traps.c
3908 @@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
3909 static inline void pmac_backlight_unblank(void) { }
3910 #endif
3911
3912 +extern void gr_handle_kernel_exploit(void);
3913 +
3914 int die(const char *str, struct pt_regs *regs, long err)
3915 {
3916 static struct {
3917 @@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs *regs, long err)
3918 if (panic_on_oops)
3919 panic("Fatal exception");
3920
3921 + gr_handle_kernel_exploit();
3922 +
3923 oops_exit();
3924 do_exit(err);
3925
3926 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
3927 index 137dc22..fe57a79 100644
3928 --- a/arch/powerpc/kernel/vdso.c
3929 +++ b/arch/powerpc/kernel/vdso.c
3930 @@ -36,6 +36,7 @@
3931 #include <asm/firmware.h>
3932 #include <asm/vdso.h>
3933 #include <asm/vdso_datapage.h>
3934 +#include <asm/mman.h>
3935
3936 #include "setup.h"
3937
3938 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3939 vdso_base = VDSO32_MBASE;
3940 #endif
3941
3942 - current->mm->context.vdso_base = 0;
3943 + current->mm->context.vdso_base = ~0UL;
3944
3945 /* vDSO has a problem and was disabled, just don't "enable" it for the
3946 * process
3947 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3948 vdso_base = get_unmapped_area(NULL, vdso_base,
3949 (vdso_pages << PAGE_SHIFT) +
3950 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
3951 - 0, 0);
3952 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
3953 if (IS_ERR_VALUE(vdso_base)) {
3954 rc = vdso_base;
3955 goto fail_mmapsem;
3956 diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
3957 index 77f6421..829564a 100644
3958 --- a/arch/powerpc/kernel/vio.c
3959 +++ b/arch/powerpc/kernel/vio.c
3960 @@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
3961 vio_cmo_dealloc(viodev, alloc_size);
3962 }
3963
3964 -struct dma_map_ops vio_dma_mapping_ops = {
3965 +static const struct dma_map_ops vio_dma_mapping_ops = {
3966 .alloc_coherent = vio_dma_iommu_alloc_coherent,
3967 .free_coherent = vio_dma_iommu_free_coherent,
3968 .map_sg = vio_dma_iommu_map_sg,
3969 .unmap_sg = vio_dma_iommu_unmap_sg,
3970 + .dma_supported = dma_iommu_dma_supported,
3971 .map_page = vio_dma_iommu_map_page,
3972 .unmap_page = vio_dma_iommu_unmap_page,
3973
3974 @@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev)
3975
3976 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
3977 {
3978 - vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
3979 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
3980 }
3981
3982 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
3983 index 5eea6f3..5d10396 100644
3984 --- a/arch/powerpc/lib/usercopy_64.c
3985 +++ b/arch/powerpc/lib/usercopy_64.c
3986 @@ -9,22 +9,6 @@
3987 #include <linux/module.h>
3988 #include <asm/uaccess.h>
3989
3990 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3991 -{
3992 - if (likely(access_ok(VERIFY_READ, from, n)))
3993 - n = __copy_from_user(to, from, n);
3994 - else
3995 - memset(to, 0, n);
3996 - return n;
3997 -}
3998 -
3999 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4000 -{
4001 - if (likely(access_ok(VERIFY_WRITE, to, n)))
4002 - n = __copy_to_user(to, from, n);
4003 - return n;
4004 -}
4005 -
4006 unsigned long copy_in_user(void __user *to, const void __user *from,
4007 unsigned long n)
4008 {
4009 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
4010 return n;
4011 }
4012
4013 -EXPORT_SYMBOL(copy_from_user);
4014 -EXPORT_SYMBOL(copy_to_user);
4015 EXPORT_SYMBOL(copy_in_user);
4016
4017 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
4018 index e7dae82..877ce0d 100644
4019 --- a/arch/powerpc/mm/fault.c
4020 +++ b/arch/powerpc/mm/fault.c
4021 @@ -30,6 +30,10 @@
4022 #include <linux/kprobes.h>
4023 #include <linux/kdebug.h>
4024 #include <linux/perf_event.h>
4025 +#include <linux/slab.h>
4026 +#include <linux/pagemap.h>
4027 +#include <linux/compiler.h>
4028 +#include <linux/unistd.h>
4029
4030 #include <asm/firmware.h>
4031 #include <asm/page.h>
4032 @@ -40,6 +44,7 @@
4033 #include <asm/uaccess.h>
4034 #include <asm/tlbflush.h>
4035 #include <asm/siginfo.h>
4036 +#include <asm/ptrace.h>
4037
4038
4039 #ifdef CONFIG_KPROBES
4040 @@ -64,6 +69,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
4041 }
4042 #endif
4043
4044 +#ifdef CONFIG_PAX_PAGEEXEC
4045 +/*
4046 + * PaX: decide what to do with offenders (regs->nip = fault address)
4047 + *
4048 + * returns 1 when task should be killed
4049 + */
4050 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4051 +{
4052 + return 1;
4053 +}
4054 +
4055 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4056 +{
4057 + unsigned long i;
4058 +
4059 + printk(KERN_ERR "PAX: bytes at PC: ");
4060 + for (i = 0; i < 5; i++) {
4061 + unsigned int c;
4062 + if (get_user(c, (unsigned int __user *)pc+i))
4063 + printk(KERN_CONT "???????? ");
4064 + else
4065 + printk(KERN_CONT "%08x ", c);
4066 + }
4067 + printk("\n");
4068 +}
4069 +#endif
4070 +
4071 /*
4072 * Check whether the instruction at regs->nip is a store using
4073 * an update addressing form which will update r1.
4074 @@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
4075 * indicate errors in DSISR but can validly be set in SRR1.
4076 */
4077 if (trap == 0x400)
4078 - error_code &= 0x48200000;
4079 + error_code &= 0x58200000;
4080 else
4081 is_write = error_code & DSISR_ISSTORE;
4082 #else
4083 @@ -250,7 +282,7 @@ good_area:
4084 * "undefined". Of those that can be set, this is the only
4085 * one which seems bad.
4086 */
4087 - if (error_code & 0x10000000)
4088 + if (error_code & DSISR_GUARDED)
4089 /* Guarded storage error. */
4090 goto bad_area;
4091 #endif /* CONFIG_8xx */
4092 @@ -265,7 +297,7 @@ good_area:
4093 * processors use the same I/D cache coherency mechanism
4094 * as embedded.
4095 */
4096 - if (error_code & DSISR_PROTFAULT)
4097 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4098 goto bad_area;
4099 #endif /* CONFIG_PPC_STD_MMU */
4100
4101 @@ -335,6 +367,23 @@ bad_area:
4102 bad_area_nosemaphore:
4103 /* User mode accesses cause a SIGSEGV */
4104 if (user_mode(regs)) {
4105 +
4106 +#ifdef CONFIG_PAX_PAGEEXEC
4107 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4108 +#ifdef CONFIG_PPC_STD_MMU
4109 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4110 +#else
4111 + if (is_exec && regs->nip == address) {
4112 +#endif
4113 + switch (pax_handle_fetch_fault(regs)) {
4114 + }
4115 +
4116 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4117 + do_group_exit(SIGKILL);
4118 + }
4119 + }
4120 +#endif
4121 +
4122 _exception(SIGSEGV, regs, code, address);
4123 return 0;
4124 }
4125 diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
4126 index 5973631..ad617af 100644
4127 --- a/arch/powerpc/mm/mem.c
4128 +++ b/arch/powerpc/mm/mem.c
4129 @@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(void)
4130 {
4131 unsigned long lmb_next_region_start_pfn,
4132 lmb_region_max_pfn;
4133 - int i;
4134 + unsigned int i;
4135
4136 for (i = 0; i < lmb.memory.cnt - 1; i++) {
4137 lmb_region_max_pfn =
4138 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
4139 index 0d957a4..26d968f 100644
4140 --- a/arch/powerpc/mm/mmap_64.c
4141 +++ b/arch/powerpc/mm/mmap_64.c
4142 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4143 */
4144 if (mmap_is_legacy()) {
4145 mm->mmap_base = TASK_UNMAPPED_BASE;
4146 +
4147 +#ifdef CONFIG_PAX_RANDMMAP
4148 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4149 + mm->mmap_base += mm->delta_mmap;
4150 +#endif
4151 +
4152 mm->get_unmapped_area = arch_get_unmapped_area;
4153 mm->unmap_area = arch_unmap_area;
4154 } else {
4155 mm->mmap_base = mmap_base();
4156 +
4157 +#ifdef CONFIG_PAX_RANDMMAP
4158 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4159 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4160 +#endif
4161 +
4162 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4163 mm->unmap_area = arch_unmap_area_topdown;
4164 }
4165 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4166 index ba51948..23009d9 100644
4167 --- a/arch/powerpc/mm/slice.c
4168 +++ b/arch/powerpc/mm/slice.c
4169 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
4170 if ((mm->task_size - len) < addr)
4171 return 0;
4172 vma = find_vma(mm, addr);
4173 - return (!vma || (addr + len) <= vma->vm_start);
4174 + return check_heap_stack_gap(vma, addr, len);
4175 }
4176
4177 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4178 @@ -256,7 +256,7 @@ full_search:
4179 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4180 continue;
4181 }
4182 - if (!vma || addr + len <= vma->vm_start) {
4183 + if (check_heap_stack_gap(vma, addr, len)) {
4184 /*
4185 * Remember the place where we stopped the search:
4186 */
4187 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4188 }
4189 }
4190
4191 - addr = mm->mmap_base;
4192 - while (addr > len) {
4193 + if (mm->mmap_base < len)
4194 + addr = -ENOMEM;
4195 + else
4196 + addr = mm->mmap_base - len;
4197 +
4198 + while (!IS_ERR_VALUE(addr)) {
4199 /* Go down by chunk size */
4200 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4201 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
4202
4203 /* Check for hit with different page size */
4204 mask = slice_range_to_mask(addr, len);
4205 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4206 * return with success:
4207 */
4208 vma = find_vma(mm, addr);
4209 - if (!vma || (addr + len) <= vma->vm_start) {
4210 + if (check_heap_stack_gap(vma, addr, len)) {
4211 /* remember the address as a hint for next time */
4212 if (use_cache)
4213 mm->free_area_cache = addr;
4214 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4215 mm->cached_hole_size = vma->vm_start - addr;
4216
4217 /* try just below the current vma->vm_start */
4218 - addr = vma->vm_start;
4219 + addr = skip_heap_stack_gap(vma, len);
4220 }
4221
4222 /*
4223 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
4224 if (fixed && addr > (mm->task_size - len))
4225 return -EINVAL;
4226
4227 +#ifdef CONFIG_PAX_RANDMMAP
4228 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4229 + addr = 0;
4230 +#endif
4231 +
4232 /* If hint, make sure it matches our alignment restrictions */
4233 if (!fixed && addr) {
4234 addr = _ALIGN_UP(addr, 1ul << pshift);
4235 diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
4236 index b5c753d..8f01abe 100644
4237 --- a/arch/powerpc/platforms/52xx/lite5200_pm.c
4238 +++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
4239 @@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
4240 lite5200_pm_target_state = PM_SUSPEND_ON;
4241 }
4242
4243 -static struct platform_suspend_ops lite5200_pm_ops = {
4244 +static const struct platform_suspend_ops lite5200_pm_ops = {
4245 .valid = lite5200_pm_valid,
4246 .begin = lite5200_pm_begin,
4247 .prepare = lite5200_pm_prepare,
4248 diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
4249 index a55b0b6..478c18e 100644
4250 --- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
4251 +++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
4252 @@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
4253 iounmap(mbar);
4254 }
4255
4256 -static struct platform_suspend_ops mpc52xx_pm_ops = {
4257 +static const struct platform_suspend_ops mpc52xx_pm_ops = {
4258 .valid = mpc52xx_pm_valid,
4259 .prepare = mpc52xx_pm_prepare,
4260 .enter = mpc52xx_pm_enter,
4261 diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
4262 index 08e65fc..643d3ac 100644
4263 --- a/arch/powerpc/platforms/83xx/suspend.c
4264 +++ b/arch/powerpc/platforms/83xx/suspend.c
4265 @@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
4266 return ret;
4267 }
4268
4269 -static struct platform_suspend_ops mpc83xx_suspend_ops = {
4270 +static const struct platform_suspend_ops mpc83xx_suspend_ops = {
4271 .valid = mpc83xx_suspend_valid,
4272 .begin = mpc83xx_suspend_begin,
4273 .enter = mpc83xx_suspend_enter,
4274 diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
4275 index ca5bfdf..1602e09 100644
4276 --- a/arch/powerpc/platforms/cell/iommu.c
4277 +++ b/arch/powerpc/platforms/cell/iommu.c
4278 @@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
4279
4280 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
4281
4282 -struct dma_map_ops dma_iommu_fixed_ops = {
4283 +const struct dma_map_ops dma_iommu_fixed_ops = {
4284 .alloc_coherent = dma_fixed_alloc_coherent,
4285 .free_coherent = dma_fixed_free_coherent,
4286 .map_sg = dma_fixed_map_sg,
4287 diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
4288 index e34b305..20e48ec 100644
4289 --- a/arch/powerpc/platforms/ps3/system-bus.c
4290 +++ b/arch/powerpc/platforms/ps3/system-bus.c
4291 @@ -694,7 +694,7 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
4292 return mask >= DMA_BIT_MASK(32);
4293 }
4294
4295 -static struct dma_map_ops ps3_sb_dma_ops = {
4296 +static const struct dma_map_ops ps3_sb_dma_ops = {
4297 .alloc_coherent = ps3_alloc_coherent,
4298 .free_coherent = ps3_free_coherent,
4299 .map_sg = ps3_sb_map_sg,
4300 @@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops = {
4301 .unmap_page = ps3_unmap_page,
4302 };
4303
4304 -static struct dma_map_ops ps3_ioc0_dma_ops = {
4305 +static const struct dma_map_ops ps3_ioc0_dma_ops = {
4306 .alloc_coherent = ps3_alloc_coherent,
4307 .free_coherent = ps3_free_coherent,
4308 .map_sg = ps3_ioc0_map_sg,
4309 diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
4310 index f0e6f28..60d53ed 100644
4311 --- a/arch/powerpc/platforms/pseries/Kconfig
4312 +++ b/arch/powerpc/platforms/pseries/Kconfig
4313 @@ -2,6 +2,8 @@ config PPC_PSERIES
4314 depends on PPC64 && PPC_BOOK3S
4315 bool "IBM pSeries & new (POWER5-based) iSeries"
4316 select MPIC
4317 + select PCI_MSI
4318 + select XICS
4319 select PPC_I8259
4320 select PPC_RTAS
4321 select RTAS_ERROR_LOGGING
4322 diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
4323 index 43c0aca..42c045b 100644
4324 --- a/arch/s390/Kconfig
4325 +++ b/arch/s390/Kconfig
4326 @@ -194,28 +194,26 @@ config AUDIT_ARCH
4327
4328 config S390_SWITCH_AMODE
4329 bool "Switch kernel/user addressing modes"
4330 + default y
4331 help
4332 This option allows to switch the addressing modes of kernel and user
4333 - space. The kernel parameter switch_amode=on will enable this feature,
4334 - default is disabled. Enabling this (via kernel parameter) on machines
4335 - earlier than IBM System z9-109 EC/BC will reduce system performance.
4336 + space. Enabling this on machines earlier than IBM System z9-109 EC/BC
4337 + will reduce system performance.
4338
4339 Note that this option will also be selected by selecting the execute
4340 - protection option below. Enabling the execute protection via the
4341 - noexec kernel parameter will also switch the addressing modes,
4342 - independent of the switch_amode kernel parameter.
4343 + protection option below. Enabling the execute protection will also
4344 + switch the addressing modes, independent of this option.
4345
4346
4347 config S390_EXEC_PROTECT
4348 bool "Data execute protection"
4349 + default y
4350 select S390_SWITCH_AMODE
4351 help
4352 This option allows to enable a buffer overflow protection for user
4353 space programs and it also selects the addressing mode option above.
4354 - The kernel parameter noexec=on will enable this feature and also
4355 - switch the addressing modes, default is disabled. Enabling this (via
4356 - kernel parameter) on machines earlier than IBM System z9-109 EC/BC
4357 - will reduce system performance.
4358 + Enabling this on machines earlier than IBM System z9-109 EC/BC will
4359 + reduce system performance.
4360
4361 comment "Code generation options"
4362
4363 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4364 index ae7c8f9..3f01a0c 100644
4365 --- a/arch/s390/include/asm/atomic.h
4366 +++ b/arch/s390/include/asm/atomic.h
4367 @@ -362,6 +362,16 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
4368 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4369 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4370
4371 +#define atomic64_read_unchecked(v) atomic64_read(v)
4372 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4373 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4374 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4375 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4376 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4377 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4378 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4379 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4380 +
4381 #define smp_mb__before_atomic_dec() smp_mb()
4382 #define smp_mb__after_atomic_dec() smp_mb()
4383 #define smp_mb__before_atomic_inc() smp_mb()
4384 diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4385 index 9b86681..c5140db 100644
4386 --- a/arch/s390/include/asm/cache.h
4387 +++ b/arch/s390/include/asm/cache.h
4388 @@ -11,8 +11,10 @@
4389 #ifndef __ARCH_S390_CACHE_H
4390 #define __ARCH_S390_CACHE_H
4391
4392 -#define L1_CACHE_BYTES 256
4393 +#include <linux/const.h>
4394 +
4395 #define L1_CACHE_SHIFT 8
4396 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4397
4398 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
4399
4400 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4401 index e885442..e3a2817 100644
4402 --- a/arch/s390/include/asm/elf.h
4403 +++ b/arch/s390/include/asm/elf.h
4404 @@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
4405 that it will "exec", and that there is sufficient room for the brk. */
4406 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4407
4408 +#ifdef CONFIG_PAX_ASLR
4409 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4410 +
4411 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4412 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4413 +#endif
4414 +
4415 /* This yields a mask that user programs can use to figure out what
4416 instruction set this CPU supports. */
4417
4418 diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
4419 index e37478e..9ce0e9f 100644
4420 --- a/arch/s390/include/asm/setup.h
4421 +++ b/arch/s390/include/asm/setup.h
4422 @@ -50,13 +50,13 @@ extern unsigned long memory_end;
4423 void detect_memory_layout(struct mem_chunk chunk[]);
4424
4425 #ifdef CONFIG_S390_SWITCH_AMODE
4426 -extern unsigned int switch_amode;
4427 +#define switch_amode (1)
4428 #else
4429 #define switch_amode (0)
4430 #endif
4431
4432 #ifdef CONFIG_S390_EXEC_PROTECT
4433 -extern unsigned int s390_noexec;
4434 +#define s390_noexec (1)
4435 #else
4436 #define s390_noexec (0)
4437 #endif
4438 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4439 index 8377e91..e28e6f1 100644
4440 --- a/arch/s390/include/asm/uaccess.h
4441 +++ b/arch/s390/include/asm/uaccess.h
4442 @@ -232,6 +232,10 @@ static inline unsigned long __must_check
4443 copy_to_user(void __user *to, const void *from, unsigned long n)
4444 {
4445 might_fault();
4446 +
4447 + if ((long)n < 0)
4448 + return n;
4449 +
4450 if (access_ok(VERIFY_WRITE, to, n))
4451 n = __copy_to_user(to, from, n);
4452 return n;
4453 @@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4454 static inline unsigned long __must_check
4455 __copy_from_user(void *to, const void __user *from, unsigned long n)
4456 {
4457 + if ((long)n < 0)
4458 + return n;
4459 +
4460 if (__builtin_constant_p(n) && (n <= 256))
4461 return uaccess.copy_from_user_small(n, from, to);
4462 else
4463 @@ -283,6 +290,10 @@ static inline unsigned long __must_check
4464 copy_from_user(void *to, const void __user *from, unsigned long n)
4465 {
4466 might_fault();
4467 +
4468 + if ((long)n < 0)
4469 + return n;
4470 +
4471 if (access_ok(VERIFY_READ, from, n))
4472 n = __copy_from_user(to, from, n);
4473 else
4474 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4475 index 639380a..72e3c02 100644
4476 --- a/arch/s390/kernel/module.c
4477 +++ b/arch/s390/kernel/module.c
4478 @@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4479
4480 /* Increase core size by size of got & plt and set start
4481 offsets for got and plt. */
4482 - me->core_size = ALIGN(me->core_size, 4);
4483 - me->arch.got_offset = me->core_size;
4484 - me->core_size += me->arch.got_size;
4485 - me->arch.plt_offset = me->core_size;
4486 - me->core_size += me->arch.plt_size;
4487 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
4488 + me->arch.got_offset = me->core_size_rw;
4489 + me->core_size_rw += me->arch.got_size;
4490 + me->arch.plt_offset = me->core_size_rx;
4491 + me->core_size_rx += me->arch.plt_size;
4492 return 0;
4493 }
4494
4495 @@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4496 if (info->got_initialized == 0) {
4497 Elf_Addr *gotent;
4498
4499 - gotent = me->module_core + me->arch.got_offset +
4500 + gotent = me->module_core_rw + me->arch.got_offset +
4501 info->got_offset;
4502 *gotent = val;
4503 info->got_initialized = 1;
4504 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4505 else if (r_type == R_390_GOTENT ||
4506 r_type == R_390_GOTPLTENT)
4507 *(unsigned int *) loc =
4508 - (val + (Elf_Addr) me->module_core - loc) >> 1;
4509 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4510 else if (r_type == R_390_GOT64 ||
4511 r_type == R_390_GOTPLT64)
4512 *(unsigned long *) loc = val;
4513 @@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4514 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4515 if (info->plt_initialized == 0) {
4516 unsigned int *ip;
4517 - ip = me->module_core + me->arch.plt_offset +
4518 + ip = me->module_core_rx + me->arch.plt_offset +
4519 info->plt_offset;
4520 #ifndef CONFIG_64BIT
4521 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4522 @@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4523 val - loc + 0xffffUL < 0x1ffffeUL) ||
4524 (r_type == R_390_PLT32DBL &&
4525 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4526 - val = (Elf_Addr) me->module_core +
4527 + val = (Elf_Addr) me->module_core_rx +
4528 me->arch.plt_offset +
4529 info->plt_offset;
4530 val += rela->r_addend - loc;
4531 @@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4532 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4533 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4534 val = val + rela->r_addend -
4535 - ((Elf_Addr) me->module_core + me->arch.got_offset);
4536 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4537 if (r_type == R_390_GOTOFF16)
4538 *(unsigned short *) loc = val;
4539 else if (r_type == R_390_GOTOFF32)
4540 @@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4541 break;
4542 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4543 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4544 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
4545 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4546 rela->r_addend - loc;
4547 if (r_type == R_390_GOTPC)
4548 *(unsigned int *) loc = val;
4549 diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
4550 index 0b2573a..71a22ec 100644
4551 --- a/arch/s390/kernel/setup.c
4552 +++ b/arch/s390/kernel/setup.c
4553 @@ -306,9 +306,6 @@ static int __init early_parse_mem(char *p)
4554 early_param("mem", early_parse_mem);
4555
4556 #ifdef CONFIG_S390_SWITCH_AMODE
4557 -unsigned int switch_amode = 0;
4558 -EXPORT_SYMBOL_GPL(switch_amode);
4559 -
4560 static int set_amode_and_uaccess(unsigned long user_amode,
4561 unsigned long user32_amode)
4562 {
4563 @@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigned long user_amode,
4564 return 0;
4565 }
4566 }
4567 -
4568 -/*
4569 - * Switch kernel/user addressing modes?
4570 - */
4571 -static int __init early_parse_switch_amode(char *p)
4572 -{
4573 - switch_amode = 1;
4574 - return 0;
4575 -}
4576 -early_param("switch_amode", early_parse_switch_amode);
4577 -
4578 #else /* CONFIG_S390_SWITCH_AMODE */
4579 static inline int set_amode_and_uaccess(unsigned long user_amode,
4580 unsigned long user32_amode)
4581 @@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(unsigned long user_amode,
4582 }
4583 #endif /* CONFIG_S390_SWITCH_AMODE */
4584
4585 -#ifdef CONFIG_S390_EXEC_PROTECT
4586 -unsigned int s390_noexec = 0;
4587 -EXPORT_SYMBOL_GPL(s390_noexec);
4588 -
4589 -/*
4590 - * Enable execute protection?
4591 - */
4592 -static int __init early_parse_noexec(char *p)
4593 -{
4594 - if (!strncmp(p, "off", 3))
4595 - return 0;
4596 - switch_amode = 1;
4597 - s390_noexec = 1;
4598 - return 0;
4599 -}
4600 -early_param("noexec", early_parse_noexec);
4601 -#endif /* CONFIG_S390_EXEC_PROTECT */
4602 -
4603 static void setup_addressing_mode(void)
4604 {
4605 if (s390_noexec) {
4606 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4607 index 0ab74ae..c8b68f9 100644
4608 --- a/arch/s390/mm/mmap.c
4609 +++ b/arch/s390/mm/mmap.c
4610 @@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4611 */
4612 if (mmap_is_legacy()) {
4613 mm->mmap_base = TASK_UNMAPPED_BASE;
4614 +
4615 +#ifdef CONFIG_PAX_RANDMMAP
4616 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4617 + mm->mmap_base += mm->delta_mmap;
4618 +#endif
4619 +
4620 mm->get_unmapped_area = arch_get_unmapped_area;
4621 mm->unmap_area = arch_unmap_area;
4622 } else {
4623 mm->mmap_base = mmap_base();
4624 +
4625 +#ifdef CONFIG_PAX_RANDMMAP
4626 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4627 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4628 +#endif
4629 +
4630 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4631 mm->unmap_area = arch_unmap_area_topdown;
4632 }
4633 @@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4634 */
4635 if (mmap_is_legacy()) {
4636 mm->mmap_base = TASK_UNMAPPED_BASE;
4637 +
4638 +#ifdef CONFIG_PAX_RANDMMAP
4639 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4640 + mm->mmap_base += mm->delta_mmap;
4641 +#endif
4642 +
4643 mm->get_unmapped_area = s390_get_unmapped_area;
4644 mm->unmap_area = arch_unmap_area;
4645 } else {
4646 mm->mmap_base = mmap_base();
4647 +
4648 +#ifdef CONFIG_PAX_RANDMMAP
4649 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4650 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4651 +#endif
4652 +
4653 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4654 mm->unmap_area = arch_unmap_area_topdown;
4655 }
4656 diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
4657 index ae3d59f..f65f075 100644
4658 --- a/arch/score/include/asm/cache.h
4659 +++ b/arch/score/include/asm/cache.h
4660 @@ -1,7 +1,9 @@
4661 #ifndef _ASM_SCORE_CACHE_H
4662 #define _ASM_SCORE_CACHE_H
4663
4664 +#include <linux/const.h>
4665 +
4666 #define L1_CACHE_SHIFT 4
4667 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4668 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4669
4670 #endif /* _ASM_SCORE_CACHE_H */
4671 diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
4672 index 589d5c7..669e274 100644
4673 --- a/arch/score/include/asm/system.h
4674 +++ b/arch/score/include/asm/system.h
4675 @@ -17,7 +17,7 @@ do { \
4676 #define finish_arch_switch(prev) do {} while (0)
4677
4678 typedef void (*vi_handler_t)(void);
4679 -extern unsigned long arch_align_stack(unsigned long sp);
4680 +#define arch_align_stack(x) (x)
4681
4682 #define mb() barrier()
4683 #define rmb() barrier()
4684 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4685 index 25d0803..d6c8e36 100644
4686 --- a/arch/score/kernel/process.c
4687 +++ b/arch/score/kernel/process.c
4688 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
4689
4690 return task_pt_regs(task)->cp0_epc;
4691 }
4692 -
4693 -unsigned long arch_align_stack(unsigned long sp)
4694 -{
4695 - return sp;
4696 -}
4697 diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
4698 index d936c1a..304a252 100644
4699 --- a/arch/sh/boards/mach-hp6xx/pm.c
4700 +++ b/arch/sh/boards/mach-hp6xx/pm.c
4701 @@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_t state)
4702 return 0;
4703 }
4704
4705 -static struct platform_suspend_ops hp6x0_pm_ops = {
4706 +static const struct platform_suspend_ops hp6x0_pm_ops = {
4707 .enter = hp6x0_pm_enter,
4708 .valid = suspend_valid_only_mem,
4709 };
4710 diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
4711 index 02df18e..ae3a793 100644
4712 --- a/arch/sh/include/asm/cache.h
4713 +++ b/arch/sh/include/asm/cache.h
4714 @@ -9,10 +9,11 @@
4715 #define __ASM_SH_CACHE_H
4716 #ifdef __KERNEL__
4717
4718 +#include <linux/const.h>
4719 #include <linux/init.h>
4720 #include <cpu/cache.h>
4721
4722 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4723 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4724
4725 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
4726
4727 diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
4728 index 8a8a993..7b3079b 100644
4729 --- a/arch/sh/kernel/cpu/sh4/sq.c
4730 +++ b/arch/sh/kernel/cpu/sh4/sq.c
4731 @@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[] = {
4732 NULL,
4733 };
4734
4735 -static struct sysfs_ops sq_sysfs_ops = {
4736 +static const struct sysfs_ops sq_sysfs_ops = {
4737 .show = sq_sysfs_show,
4738 .store = sq_sysfs_store,
4739 };
4740 diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
4741 index ee3c2aa..c49cee6 100644
4742 --- a/arch/sh/kernel/cpu/shmobile/pm.c
4743 +++ b/arch/sh/kernel/cpu/shmobile/pm.c
4744 @@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t state)
4745 return 0;
4746 }
4747
4748 -static struct platform_suspend_ops sh_pm_ops = {
4749 +static const struct platform_suspend_ops sh_pm_ops = {
4750 .enter = sh_pm_enter,
4751 .valid = suspend_valid_only_mem,
4752 };
4753 diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
4754 index 3e532d0..9faa306 100644
4755 --- a/arch/sh/kernel/kgdb.c
4756 +++ b/arch/sh/kernel/kgdb.c
4757 @@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
4758 {
4759 }
4760
4761 -struct kgdb_arch arch_kgdb_ops = {
4762 +const struct kgdb_arch arch_kgdb_ops = {
4763 /* Breakpoint instruction: trapa #0x3c */
4764 #ifdef CONFIG_CPU_LITTLE_ENDIAN
4765 .gdb_bpt_instr = { 0x3c, 0xc3 },
4766 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4767 index afeb710..d1d1289 100644
4768 --- a/arch/sh/mm/mmap.c
4769 +++ b/arch/sh/mm/mmap.c
4770 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4771 addr = PAGE_ALIGN(addr);
4772
4773 vma = find_vma(mm, addr);
4774 - if (TASK_SIZE - len >= addr &&
4775 - (!vma || addr + len <= vma->vm_start))
4776 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4777 return addr;
4778 }
4779
4780 @@ -106,7 +105,7 @@ full_search:
4781 }
4782 return -ENOMEM;
4783 }
4784 - if (likely(!vma || addr + len <= vma->vm_start)) {
4785 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4786 /*
4787 * Remember the place where we stopped the search:
4788 */
4789 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4790 addr = PAGE_ALIGN(addr);
4791
4792 vma = find_vma(mm, addr);
4793 - if (TASK_SIZE - len >= addr &&
4794 - (!vma || addr + len <= vma->vm_start))
4795 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4796 return addr;
4797 }
4798
4799 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4800 /* make sure it can fit in the remaining address space */
4801 if (likely(addr > len)) {
4802 vma = find_vma(mm, addr-len);
4803 - if (!vma || addr <= vma->vm_start) {
4804 + if (check_heap_stack_gap(vma, addr - len, len)) {
4805 /* remember the address as a hint for next time */
4806 return (mm->free_area_cache = addr-len);
4807 }
4808 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4809 if (unlikely(mm->mmap_base < len))
4810 goto bottomup;
4811
4812 - addr = mm->mmap_base-len;
4813 - if (do_colour_align)
4814 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4815 + addr = mm->mmap_base - len;
4816
4817 do {
4818 + if (do_colour_align)
4819 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4820 /*
4821 * Lookup failure means no vma is above this address,
4822 * else if new region fits below vma->vm_start,
4823 * return with success:
4824 */
4825 vma = find_vma(mm, addr);
4826 - if (likely(!vma || addr+len <= vma->vm_start)) {
4827 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4828 /* remember the address as a hint for next time */
4829 return (mm->free_area_cache = addr);
4830 }
4831 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4832 mm->cached_hole_size = vma->vm_start - addr;
4833
4834 /* try just below the current vma->vm_start */
4835 - addr = vma->vm_start-len;
4836 - if (do_colour_align)
4837 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4838 - } while (likely(len < vma->vm_start));
4839 + addr = skip_heap_stack_gap(vma, len);
4840 + } while (!IS_ERR_VALUE(addr));
4841
4842 bottomup:
4843 /*
4844 diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
4845 index 05ef538..dc9c857 100644
4846 --- a/arch/sparc/Kconfig
4847 +++ b/arch/sparc/Kconfig
4848 @@ -32,6 +32,7 @@ config SPARC
4849
4850 config SPARC32
4851 def_bool !64BIT
4852 + select GENERIC_ATOMIC64
4853
4854 config SPARC64
4855 def_bool 64BIT
4856 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
4857 index 113225b..7fd04e7 100644
4858 --- a/arch/sparc/Makefile
4859 +++ b/arch/sparc/Makefile
4860 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
4861 # Export what is needed by arch/sparc/boot/Makefile
4862 export VMLINUX_INIT VMLINUX_MAIN
4863 VMLINUX_INIT := $(head-y) $(init-y)
4864 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4865 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4866 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4867 VMLINUX_MAIN += $(drivers-y) $(net-y)
4868
4869 diff --git a/arch/sparc/include/asm/atomic.h b/arch/sparc/include/asm/atomic.h
4870 index 8ff83d8..4a459c2 100644
4871 --- a/arch/sparc/include/asm/atomic.h
4872 +++ b/arch/sparc/include/asm/atomic.h
4873 @@ -4,5 +4,6 @@
4874 #include <asm/atomic_64.h>
4875 #else
4876 #include <asm/atomic_32.h>
4877 +#include <asm-generic/atomic64.h>
4878 #endif
4879 #endif
4880 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
4881 index f5cc06f..f858d47 100644
4882 --- a/arch/sparc/include/asm/atomic_64.h
4883 +++ b/arch/sparc/include/asm/atomic_64.h
4884 @@ -14,18 +14,40 @@
4885 #define ATOMIC64_INIT(i) { (i) }
4886
4887 #define atomic_read(v) ((v)->counter)
4888 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
4889 +{
4890 + return v->counter;
4891 +}
4892 #define atomic64_read(v) ((v)->counter)
4893 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
4894 +{
4895 + return v->counter;
4896 +}
4897
4898 #define atomic_set(v, i) (((v)->counter) = i)
4899 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
4900 +{
4901 + v->counter = i;
4902 +}
4903 #define atomic64_set(v, i) (((v)->counter) = i)
4904 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
4905 +{
4906 + v->counter = i;
4907 +}
4908
4909 extern void atomic_add(int, atomic_t *);
4910 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
4911 extern void atomic64_add(long, atomic64_t *);
4912 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
4913 extern void atomic_sub(int, atomic_t *);
4914 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
4915 extern void atomic64_sub(long, atomic64_t *);
4916 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
4917
4918 extern int atomic_add_ret(int, atomic_t *);
4919 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
4920 extern long atomic64_add_ret(long, atomic64_t *);
4921 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
4922 extern int atomic_sub_ret(int, atomic_t *);
4923 extern long atomic64_sub_ret(long, atomic64_t *);
4924
4925 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4926 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
4927
4928 #define atomic_inc_return(v) atomic_add_ret(1, v)
4929 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
4930 +{
4931 + return atomic_add_ret_unchecked(1, v);
4932 +}
4933 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
4934 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
4935 +{
4936 + return atomic64_add_ret_unchecked(1, v);
4937 +}
4938
4939 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
4940 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
4941
4942 #define atomic_add_return(i, v) atomic_add_ret(i, v)
4943 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
4944 +{
4945 + return atomic_add_ret_unchecked(i, v);
4946 +}
4947 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
4948 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
4949 +{
4950 + return atomic64_add_ret_unchecked(i, v);
4951 +}
4952
4953 /*
4954 * atomic_inc_and_test - increment and test
4955 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4956 * other cases.
4957 */
4958 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
4959 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
4960 +{
4961 + return atomic_inc_return_unchecked(v) == 0;
4962 +}
4963 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
4964
4965 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
4966 @@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4967 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
4968
4969 #define atomic_inc(v) atomic_add(1, v)
4970 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
4971 +{
4972 + atomic_add_unchecked(1, v);
4973 +}
4974 #define atomic64_inc(v) atomic64_add(1, v)
4975 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
4976 +{
4977 + atomic64_add_unchecked(1, v);
4978 +}
4979
4980 #define atomic_dec(v) atomic_sub(1, v)
4981 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
4982 +{
4983 + atomic_sub_unchecked(1, v);
4984 +}
4985 #define atomic64_dec(v) atomic64_sub(1, v)
4986 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
4987 +{
4988 + atomic64_sub_unchecked(1, v);
4989 +}
4990
4991 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
4992 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
4993
4994 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
4995 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
4996 +{
4997 + return cmpxchg(&v->counter, old, new);
4998 +}
4999 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
5000 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5001 +{
5002 + return xchg(&v->counter, new);
5003 +}
5004
5005 static inline int atomic_add_unless(atomic_t *v, int a, int u)
5006 {
5007 - int c, old;
5008 + int c, old, new;
5009 c = atomic_read(v);
5010 for (;;) {
5011 - if (unlikely(c == (u)))
5012 + if (unlikely(c == u))
5013 break;
5014 - old = atomic_cmpxchg((v), c, c + (a));
5015 +
5016 + asm volatile("addcc %2, %0, %0\n"
5017 +
5018 +#ifdef CONFIG_PAX_REFCOUNT
5019 + "tvs %%icc, 6\n"
5020 +#endif
5021 +
5022 + : "=r" (new)
5023 + : "0" (c), "ir" (a)
5024 + : "cc");
5025 +
5026 + old = atomic_cmpxchg(v, c, new);
5027 if (likely(old == c))
5028 break;
5029 c = old;
5030 }
5031 - return c != (u);
5032 + return c != u;
5033 }
5034
5035 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
5036 @@ -90,20 +167,35 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
5037 #define atomic64_cmpxchg(v, o, n) \
5038 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
5039 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
5040 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
5041 +{
5042 + return xchg(&v->counter, new);
5043 +}
5044
5045 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
5046 {
5047 - long c, old;
5048 + long c, old, new;
5049 c = atomic64_read(v);
5050 for (;;) {
5051 - if (unlikely(c == (u)))
5052 + if (unlikely(c == u))
5053 break;
5054 - old = atomic64_cmpxchg((v), c, c + (a));
5055 +
5056 + asm volatile("addcc %2, %0, %0\n"
5057 +
5058 +#ifdef CONFIG_PAX_REFCOUNT
5059 + "tvs %%xcc, 6\n"
5060 +#endif
5061 +
5062 + : "=r" (new)
5063 + : "0" (c), "ir" (a)
5064 + : "cc");
5065 +
5066 + old = atomic64_cmpxchg(v, c, new);
5067 if (likely(old == c))
5068 break;
5069 c = old;
5070 }
5071 - return c != (u);
5072 + return c != u;
5073 }
5074
5075 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5076 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
5077 index 41f85ae..73b80b5 100644
5078 --- a/arch/sparc/include/asm/cache.h
5079 +++ b/arch/sparc/include/asm/cache.h
5080 @@ -7,8 +7,10 @@
5081 #ifndef _SPARC_CACHE_H
5082 #define _SPARC_CACHE_H
5083
5084 +#include <linux/const.h>
5085 +
5086 #define L1_CACHE_SHIFT 5
5087 -#define L1_CACHE_BYTES 32
5088 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5089 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
5090
5091 #ifdef CONFIG_SPARC32
5092 diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
5093 index 5a8c308..38def92 100644
5094 --- a/arch/sparc/include/asm/dma-mapping.h
5095 +++ b/arch/sparc/include/asm/dma-mapping.h
5096 @@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
5097 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
5098 #define dma_is_consistent(d, h) (1)
5099
5100 -extern struct dma_map_ops *dma_ops, pci32_dma_ops;
5101 +extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
5102 extern struct bus_type pci_bus_type;
5103
5104 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
5105 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
5106 {
5107 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
5108 if (dev->bus == &pci_bus_type)
5109 @@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
5110 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
5111 dma_addr_t *dma_handle, gfp_t flag)
5112 {
5113 - struct dma_map_ops *ops = get_dma_ops(dev);
5114 + const struct dma_map_ops *ops = get_dma_ops(dev);
5115 void *cpu_addr;
5116
5117 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
5118 @@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
5119 static inline void dma_free_coherent(struct device *dev, size_t size,
5120 void *cpu_addr, dma_addr_t dma_handle)
5121 {
5122 - struct dma_map_ops *ops = get_dma_ops(dev);
5123 + const struct dma_map_ops *ops = get_dma_ops(dev);
5124
5125 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
5126 ops->free_coherent(dev, size, cpu_addr, dma_handle);
5127 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
5128 index 381a1b5..b97e3ff 100644
5129 --- a/arch/sparc/include/asm/elf_32.h
5130 +++ b/arch/sparc/include/asm/elf_32.h
5131 @@ -116,6 +116,13 @@ typedef struct {
5132
5133 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
5134
5135 +#ifdef CONFIG_PAX_ASLR
5136 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
5137 +
5138 +#define PAX_DELTA_MMAP_LEN 16
5139 +#define PAX_DELTA_STACK_LEN 16
5140 +#endif
5141 +
5142 /* This yields a mask that user programs can use to figure out what
5143 instruction set this cpu supports. This can NOT be done in userspace
5144 on Sparc. */
5145 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
5146 index 9968085..c2106ef 100644
5147 --- a/arch/sparc/include/asm/elf_64.h
5148 +++ b/arch/sparc/include/asm/elf_64.h
5149 @@ -163,6 +163,12 @@ typedef struct {
5150 #define ELF_ET_DYN_BASE 0x0000010000000000UL
5151 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
5152
5153 +#ifdef CONFIG_PAX_ASLR
5154 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
5155 +
5156 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
5157 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
5158 +#endif
5159
5160 /* This yields a mask that user programs can use to figure out what
5161 instruction set this cpu supports. */
5162 diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
5163 index 156707b..aefa786 100644
5164 --- a/arch/sparc/include/asm/page_32.h
5165 +++ b/arch/sparc/include/asm/page_32.h
5166 @@ -8,6 +8,8 @@
5167 #ifndef _SPARC_PAGE_H
5168 #define _SPARC_PAGE_H
5169
5170 +#include <linux/const.h>
5171 +
5172 #define PAGE_SHIFT 12
5173
5174 #ifndef __ASSEMBLY__
5175 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
5176 index e0cabe7..efd60f1 100644
5177 --- a/arch/sparc/include/asm/pgtable_32.h
5178 +++ b/arch/sparc/include/asm/pgtable_32.h
5179 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
5180 BTFIXUPDEF_INT(page_none)
5181 BTFIXUPDEF_INT(page_copy)
5182 BTFIXUPDEF_INT(page_readonly)
5183 +
5184 +#ifdef CONFIG_PAX_PAGEEXEC
5185 +BTFIXUPDEF_INT(page_shared_noexec)
5186 +BTFIXUPDEF_INT(page_copy_noexec)
5187 +BTFIXUPDEF_INT(page_readonly_noexec)
5188 +#endif
5189 +
5190 BTFIXUPDEF_INT(page_kernel)
5191
5192 #define PMD_SHIFT SUN4C_PMD_SHIFT
5193 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
5194 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
5195 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
5196
5197 +#ifdef CONFIG_PAX_PAGEEXEC
5198 +extern pgprot_t PAGE_SHARED_NOEXEC;
5199 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
5200 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
5201 +#else
5202 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
5203 +# define PAGE_COPY_NOEXEC PAGE_COPY
5204 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
5205 +#endif
5206 +
5207 extern unsigned long page_kernel;
5208
5209 #ifdef MODULE
5210 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
5211 index 1407c07..7e10231 100644
5212 --- a/arch/sparc/include/asm/pgtsrmmu.h
5213 +++ b/arch/sparc/include/asm/pgtsrmmu.h
5214 @@ -115,6 +115,13 @@
5215 SRMMU_EXEC | SRMMU_REF)
5216 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
5217 SRMMU_EXEC | SRMMU_REF)
5218 +
5219 +#ifdef CONFIG_PAX_PAGEEXEC
5220 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
5221 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5222 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5223 +#endif
5224 +
5225 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
5226 SRMMU_DIRTY | SRMMU_REF)
5227
5228 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
5229 index 43e5147..47622a1 100644
5230 --- a/arch/sparc/include/asm/spinlock_64.h
5231 +++ b/arch/sparc/include/asm/spinlock_64.h
5232 @@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
5233
5234 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
5235
5236 -static void inline arch_read_lock(raw_rwlock_t *lock)
5237 +static inline void arch_read_lock(raw_rwlock_t *lock)
5238 {
5239 unsigned long tmp1, tmp2;
5240
5241 __asm__ __volatile__ (
5242 "1: ldsw [%2], %0\n"
5243 " brlz,pn %0, 2f\n"
5244 -"4: add %0, 1, %1\n"
5245 +"4: addcc %0, 1, %1\n"
5246 +
5247 +#ifdef CONFIG_PAX_REFCOUNT
5248 +" tvs %%icc, 6\n"
5249 +#endif
5250 +
5251 " cas [%2], %0, %1\n"
5252 " cmp %0, %1\n"
5253 " bne,pn %%icc, 1b\n"
5254 @@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
5255 " .previous"
5256 : "=&r" (tmp1), "=&r" (tmp2)
5257 : "r" (lock)
5258 - : "memory");
5259 + : "memory", "cc");
5260 }
5261
5262 -static int inline arch_read_trylock(raw_rwlock_t *lock)
5263 +static inline int arch_read_trylock(raw_rwlock_t *lock)
5264 {
5265 int tmp1, tmp2;
5266
5267 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
5268 "1: ldsw [%2], %0\n"
5269 " brlz,a,pn %0, 2f\n"
5270 " mov 0, %0\n"
5271 -" add %0, 1, %1\n"
5272 +" addcc %0, 1, %1\n"
5273 +
5274 +#ifdef CONFIG_PAX_REFCOUNT
5275 +" tvs %%icc, 6\n"
5276 +#endif
5277 +
5278 " cas [%2], %0, %1\n"
5279 " cmp %0, %1\n"
5280 " bne,pn %%icc, 1b\n"
5281 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
5282 return tmp1;
5283 }
5284
5285 -static void inline arch_read_unlock(raw_rwlock_t *lock)
5286 +static inline void arch_read_unlock(raw_rwlock_t *lock)
5287 {
5288 unsigned long tmp1, tmp2;
5289
5290 __asm__ __volatile__(
5291 "1: lduw [%2], %0\n"
5292 -" sub %0, 1, %1\n"
5293 +" subcc %0, 1, %1\n"
5294 +
5295 +#ifdef CONFIG_PAX_REFCOUNT
5296 +" tvs %%icc, 6\n"
5297 +#endif
5298 +
5299 " cas [%2], %0, %1\n"
5300 " cmp %0, %1\n"
5301 " bne,pn %%xcc, 1b\n"
5302 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
5303 : "memory");
5304 }
5305
5306 -static void inline arch_write_lock(raw_rwlock_t *lock)
5307 +static inline void arch_write_lock(raw_rwlock_t *lock)
5308 {
5309 unsigned long mask, tmp1, tmp2;
5310
5311 @@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
5312 : "memory");
5313 }
5314
5315 -static void inline arch_write_unlock(raw_rwlock_t *lock)
5316 +static inline void arch_write_unlock(raw_rwlock_t *lock)
5317 {
5318 __asm__ __volatile__(
5319 " stw %%g0, [%0]"
5320 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
5321 : "memory");
5322 }
5323
5324 -static int inline arch_write_trylock(raw_rwlock_t *lock)
5325 +static inline int arch_write_trylock(raw_rwlock_t *lock)
5326 {
5327 unsigned long mask, tmp1, tmp2, result;
5328
5329 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5330 index 844d73a..f787fb9 100644
5331 --- a/arch/sparc/include/asm/thread_info_32.h
5332 +++ b/arch/sparc/include/asm/thread_info_32.h
5333 @@ -50,6 +50,8 @@ struct thread_info {
5334 unsigned long w_saved;
5335
5336 struct restart_block restart_block;
5337 +
5338 + unsigned long lowest_stack;
5339 };
5340
5341 /*
5342 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5343 index f78ad9a..9f55fc7 100644
5344 --- a/arch/sparc/include/asm/thread_info_64.h
5345 +++ b/arch/sparc/include/asm/thread_info_64.h
5346 @@ -68,6 +68,8 @@ struct thread_info {
5347 struct pt_regs *kern_una_regs;
5348 unsigned int kern_una_insn;
5349
5350 + unsigned long lowest_stack;
5351 +
5352 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5353 };
5354
5355 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5356 index e88fbe5..96b0ce5 100644
5357 --- a/arch/sparc/include/asm/uaccess.h
5358 +++ b/arch/sparc/include/asm/uaccess.h
5359 @@ -1,5 +1,13 @@
5360 #ifndef ___ASM_SPARC_UACCESS_H
5361 #define ___ASM_SPARC_UACCESS_H
5362 +
5363 +#ifdef __KERNEL__
5364 +#ifndef __ASSEMBLY__
5365 +#include <linux/types.h>
5366 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
5367 +#endif
5368 +#endif
5369 +
5370 #if defined(__sparc__) && defined(__arch64__)
5371 #include <asm/uaccess_64.h>
5372 #else
5373 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5374 index 8303ac4..07f333d 100644
5375 --- a/arch/sparc/include/asm/uaccess_32.h
5376 +++ b/arch/sparc/include/asm/uaccess_32.h
5377 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
5378
5379 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5380 {
5381 - if (n && __access_ok((unsigned long) to, n))
5382 + if ((long)n < 0)
5383 + return n;
5384 +
5385 + if (n && __access_ok((unsigned long) to, n)) {
5386 + if (!__builtin_constant_p(n))
5387 + check_object_size(from, n, true);
5388 return __copy_user(to, (__force void __user *) from, n);
5389 - else
5390 + } else
5391 return n;
5392 }
5393
5394 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5395 {
5396 + if ((long)n < 0)
5397 + return n;
5398 +
5399 + if (!__builtin_constant_p(n))
5400 + check_object_size(from, n, true);
5401 +
5402 return __copy_user(to, (__force void __user *) from, n);
5403 }
5404
5405 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5406 {
5407 - if (n && __access_ok((unsigned long) from, n))
5408 + if ((long)n < 0)
5409 + return n;
5410 +
5411 + if (n && __access_ok((unsigned long) from, n)) {
5412 + if (!__builtin_constant_p(n))
5413 + check_object_size(to, n, false);
5414 return __copy_user((__force void __user *) to, from, n);
5415 - else
5416 + } else
5417 return n;
5418 }
5419
5420 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5421 {
5422 + if ((long)n < 0)
5423 + return n;
5424 +
5425 return __copy_user((__force void __user *) to, from, n);
5426 }
5427
5428 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5429 index 9ea271e..7b8a271 100644
5430 --- a/arch/sparc/include/asm/uaccess_64.h
5431 +++ b/arch/sparc/include/asm/uaccess_64.h
5432 @@ -9,6 +9,7 @@
5433 #include <linux/compiler.h>
5434 #include <linux/string.h>
5435 #include <linux/thread_info.h>
5436 +#include <linux/kernel.h>
5437 #include <asm/asi.h>
5438 #include <asm/system.h>
5439 #include <asm/spitfire.h>
5440 @@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5441 static inline unsigned long __must_check
5442 copy_from_user(void *to, const void __user *from, unsigned long size)
5443 {
5444 - unsigned long ret = ___copy_from_user(to, from, size);
5445 + unsigned long ret;
5446
5447 + if ((long)size < 0 || size > INT_MAX)
5448 + return size;
5449 +
5450 + if (!__builtin_constant_p(size))
5451 + check_object_size(to, size, false);
5452 +
5453 + ret = ___copy_from_user(to, from, size);
5454 if (unlikely(ret))
5455 ret = copy_from_user_fixup(to, from, size);
5456 return ret;
5457 @@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5458 static inline unsigned long __must_check
5459 copy_to_user(void __user *to, const void *from, unsigned long size)
5460 {
5461 - unsigned long ret = ___copy_to_user(to, from, size);
5462 + unsigned long ret;
5463
5464 + if ((long)size < 0 || size > INT_MAX)
5465 + return size;
5466 +
5467 + if (!__builtin_constant_p(size))
5468 + check_object_size(from, size, true);
5469 +
5470 + ret = ___copy_to_user(to, from, size);
5471 if (unlikely(ret))
5472 ret = copy_to_user_fixup(to, from, size);
5473 return ret;
5474 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5475 index 2782681..77ded84 100644
5476 --- a/arch/sparc/kernel/Makefile
5477 +++ b/arch/sparc/kernel/Makefile
5478 @@ -3,7 +3,7 @@
5479 #
5480
5481 asflags-y := -ansi
5482 -ccflags-y := -Werror
5483 +#ccflags-y := -Werror
5484
5485 extra-y := head_$(BITS).o
5486 extra-y += init_task.o
5487 diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
5488 index 7690cc2..ece64c9 100644
5489 --- a/arch/sparc/kernel/iommu.c
5490 +++ b/arch/sparc/kernel/iommu.c
5491 @@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
5492 spin_unlock_irqrestore(&iommu->lock, flags);
5493 }
5494
5495 -static struct dma_map_ops sun4u_dma_ops = {
5496 +static const struct dma_map_ops sun4u_dma_ops = {
5497 .alloc_coherent = dma_4u_alloc_coherent,
5498 .free_coherent = dma_4u_free_coherent,
5499 .map_page = dma_4u_map_page,
5500 @@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops = {
5501 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
5502 };
5503
5504 -struct dma_map_ops *dma_ops = &sun4u_dma_ops;
5505 +const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
5506 EXPORT_SYMBOL(dma_ops);
5507
5508 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
5509 diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
5510 index 9f61fd8..bd048db 100644
5511 --- a/arch/sparc/kernel/ioport.c
5512 +++ b/arch/sparc/kernel/ioport.c
5513 @@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
5514 BUG();
5515 }
5516
5517 -struct dma_map_ops sbus_dma_ops = {
5518 +const struct dma_map_ops sbus_dma_ops = {
5519 .alloc_coherent = sbus_alloc_coherent,
5520 .free_coherent = sbus_free_coherent,
5521 .map_page = sbus_map_page,
5522 @@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
5523 .sync_sg_for_device = sbus_sync_sg_for_device,
5524 };
5525
5526 -struct dma_map_ops *dma_ops = &sbus_dma_ops;
5527 +const struct dma_map_ops *dma_ops = &sbus_dma_ops;
5528 EXPORT_SYMBOL(dma_ops);
5529
5530 static int __init sparc_register_ioport(void)
5531 @@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
5532 }
5533 }
5534
5535 -struct dma_map_ops pci32_dma_ops = {
5536 +const struct dma_map_ops pci32_dma_ops = {
5537 .alloc_coherent = pci32_alloc_coherent,
5538 .free_coherent = pci32_free_coherent,
5539 .map_page = pci32_map_page,
5540 diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
5541 index 04df4ed..55c4b6e 100644
5542 --- a/arch/sparc/kernel/kgdb_32.c
5543 +++ b/arch/sparc/kernel/kgdb_32.c
5544 @@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
5545 {
5546 }
5547
5548 -struct kgdb_arch arch_kgdb_ops = {
5549 +const struct kgdb_arch arch_kgdb_ops = {
5550 /* Breakpoint instruction: ta 0x7d */
5551 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
5552 };
5553 diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
5554 index f5a0fd4..d886f71 100644
5555 --- a/arch/sparc/kernel/kgdb_64.c
5556 +++ b/arch/sparc/kernel/kgdb_64.c
5557 @@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
5558 {
5559 }
5560
5561 -struct kgdb_arch arch_kgdb_ops = {
5562 +const struct kgdb_arch arch_kgdb_ops = {
5563 /* Breakpoint instruction: ta 0x72 */
5564 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
5565 };
5566 diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
5567 index 23c33ff..d137fbd 100644
5568 --- a/arch/sparc/kernel/pci_sun4v.c
5569 +++ b/arch/sparc/kernel/pci_sun4v.c
5570 @@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
5571 spin_unlock_irqrestore(&iommu->lock, flags);
5572 }
5573
5574 -static struct dma_map_ops sun4v_dma_ops = {
5575 +static const struct dma_map_ops sun4v_dma_ops = {
5576 .alloc_coherent = dma_4v_alloc_coherent,
5577 .free_coherent = dma_4v_free_coherent,
5578 .map_page = dma_4v_map_page,
5579 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5580 index c49865b..b41a81b 100644
5581 --- a/arch/sparc/kernel/process_32.c
5582 +++ b/arch/sparc/kernel/process_32.c
5583 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
5584 rw->ins[4], rw->ins[5],
5585 rw->ins[6],
5586 rw->ins[7]);
5587 - printk("%pS\n", (void *) rw->ins[7]);
5588 + printk("%pA\n", (void *) rw->ins[7]);
5589 rw = (struct reg_window32 *) rw->ins[6];
5590 }
5591 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
5592 @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
5593
5594 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5595 r->psr, r->pc, r->npc, r->y, print_tainted());
5596 - printk("PC: <%pS>\n", (void *) r->pc);
5597 + printk("PC: <%pA>\n", (void *) r->pc);
5598 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5599 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5600 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5601 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5602 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5603 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5604 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5605 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5606
5607 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5608 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
5609 @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5610 rw = (struct reg_window32 *) fp;
5611 pc = rw->ins[7];
5612 printk("[%08lx : ", pc);
5613 - printk("%pS ] ", (void *) pc);
5614 + printk("%pA ] ", (void *) pc);
5615 fp = rw->ins[6];
5616 } while (++count < 16);
5617 printk("\n");
5618 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5619 index cb70476..3d0c191 100644
5620 --- a/arch/sparc/kernel/process_64.c
5621 +++ b/arch/sparc/kernel/process_64.c
5622 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
5623 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5624 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5625 if (regs->tstate & TSTATE_PRIV)
5626 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5627 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5628 }
5629
5630 void show_regs(struct pt_regs *regs)
5631 {
5632 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5633 regs->tpc, regs->tnpc, regs->y, print_tainted());
5634 - printk("TPC: <%pS>\n", (void *) regs->tpc);
5635 + printk("TPC: <%pA>\n", (void *) regs->tpc);
5636 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5637 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5638 regs->u_regs[3]);
5639 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
5640 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5641 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5642 regs->u_regs[15]);
5643 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5644 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5645 show_regwindow(regs);
5646 }
5647
5648 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
5649 ((tp && tp->task) ? tp->task->pid : -1));
5650
5651 if (gp->tstate & TSTATE_PRIV) {
5652 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5653 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5654 (void *) gp->tpc,
5655 (void *) gp->o7,
5656 (void *) gp->i7,
5657 diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
5658 index 6edc4e5..06a69b4 100644
5659 --- a/arch/sparc/kernel/sigutil_64.c
5660 +++ b/arch/sparc/kernel/sigutil_64.c
5661 @@ -2,6 +2,7 @@
5662 #include <linux/types.h>
5663 #include <linux/thread_info.h>
5664 #include <linux/uaccess.h>
5665 +#include <linux/errno.h>
5666
5667 #include <asm/sigcontext.h>
5668 #include <asm/fpumacro.h>
5669 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5670 index 3a82e65..ce0a53a 100644
5671 --- a/arch/sparc/kernel/sys_sparc_32.c
5672 +++ b/arch/sparc/kernel/sys_sparc_32.c
5673 @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5674 if (ARCH_SUN4C && len > 0x20000000)
5675 return -ENOMEM;
5676 if (!addr)
5677 - addr = TASK_UNMAPPED_BASE;
5678 + addr = current->mm->mmap_base;
5679
5680 if (flags & MAP_SHARED)
5681 addr = COLOUR_ALIGN(addr);
5682 @@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5683 }
5684 if (TASK_SIZE - PAGE_SIZE - len < addr)
5685 return -ENOMEM;
5686 - if (!vmm || addr + len <= vmm->vm_start)
5687 + if (check_heap_stack_gap(vmm, addr, len))
5688 return addr;
5689 addr = vmm->vm_end;
5690 if (flags & MAP_SHARED)
5691 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5692 index cfa0e19..98972ac 100644
5693 --- a/arch/sparc/kernel/sys_sparc_64.c
5694 +++ b/arch/sparc/kernel/sys_sparc_64.c
5695 @@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5696 /* We do not accept a shared mapping if it would violate
5697 * cache aliasing constraints.
5698 */
5699 - if ((flags & MAP_SHARED) &&
5700 + if ((filp || (flags & MAP_SHARED)) &&
5701 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5702 return -EINVAL;
5703 return addr;
5704 @@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5705 if (filp || (flags & MAP_SHARED))
5706 do_color_align = 1;
5707
5708 +#ifdef CONFIG_PAX_RANDMMAP
5709 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5710 +#endif
5711 +
5712 if (addr) {
5713 if (do_color_align)
5714 addr = COLOUR_ALIGN(addr, pgoff);
5715 @@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5716 addr = PAGE_ALIGN(addr);
5717
5718 vma = find_vma(mm, addr);
5719 - if (task_size - len >= addr &&
5720 - (!vma || addr + len <= vma->vm_start))
5721 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5722 return addr;
5723 }
5724
5725 if (len > mm->cached_hole_size) {
5726 - start_addr = addr = mm->free_area_cache;
5727 + start_addr = addr = mm->free_area_cache;
5728 } else {
5729 - start_addr = addr = TASK_UNMAPPED_BASE;
5730 + start_addr = addr = mm->mmap_base;
5731 mm->cached_hole_size = 0;
5732 }
5733
5734 @@ -175,14 +178,14 @@ full_search:
5735 vma = find_vma(mm, VA_EXCLUDE_END);
5736 }
5737 if (unlikely(task_size < addr)) {
5738 - if (start_addr != TASK_UNMAPPED_BASE) {
5739 - start_addr = addr = TASK_UNMAPPED_BASE;
5740 + if (start_addr != mm->mmap_base) {
5741 + start_addr = addr = mm->mmap_base;
5742 mm->cached_hole_size = 0;
5743 goto full_search;
5744 }
5745 return -ENOMEM;
5746 }
5747 - if (likely(!vma || addr + len <= vma->vm_start)) {
5748 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5749 /*
5750 * Remember the place where we stopped the search:
5751 */
5752 @@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5753 /* We do not accept a shared mapping if it would violate
5754 * cache aliasing constraints.
5755 */
5756 - if ((flags & MAP_SHARED) &&
5757 + if ((filp || (flags & MAP_SHARED)) &&
5758 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5759 return -EINVAL;
5760 return addr;
5761 @@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5762 addr = PAGE_ALIGN(addr);
5763
5764 vma = find_vma(mm, addr);
5765 - if (task_size - len >= addr &&
5766 - (!vma || addr + len <= vma->vm_start))
5767 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5768 return addr;
5769 }
5770
5771 @@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5772 /* make sure it can fit in the remaining address space */
5773 if (likely(addr > len)) {
5774 vma = find_vma(mm, addr-len);
5775 - if (!vma || addr <= vma->vm_start) {
5776 + if (check_heap_stack_gap(vma, addr - len, len)) {
5777 /* remember the address as a hint for next time */
5778 return (mm->free_area_cache = addr-len);
5779 }
5780 @@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5781 if (unlikely(mm->mmap_base < len))
5782 goto bottomup;
5783
5784 - addr = mm->mmap_base-len;
5785 - if (do_color_align)
5786 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5787 + addr = mm->mmap_base - len;
5788
5789 do {
5790 + if (do_color_align)
5791 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5792 /*
5793 * Lookup failure means no vma is above this address,
5794 * else if new region fits below vma->vm_start,
5795 * return with success:
5796 */
5797 vma = find_vma(mm, addr);
5798 - if (likely(!vma || addr+len <= vma->vm_start)) {
5799 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5800 /* remember the address as a hint for next time */
5801 return (mm->free_area_cache = addr);
5802 }
5803 @@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5804 mm->cached_hole_size = vma->vm_start - addr;
5805
5806 /* try just below the current vma->vm_start */
5807 - addr = vma->vm_start-len;
5808 - if (do_color_align)
5809 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5810 - } while (likely(len < vma->vm_start));
5811 + addr = skip_heap_stack_gap(vma, len);
5812 + } while (!IS_ERR_VALUE(addr));
5813
5814 bottomup:
5815 /*
5816 @@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5817 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
5818 sysctl_legacy_va_layout) {
5819 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5820 +
5821 +#ifdef CONFIG_PAX_RANDMMAP
5822 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5823 + mm->mmap_base += mm->delta_mmap;
5824 +#endif
5825 +
5826 mm->get_unmapped_area = arch_get_unmapped_area;
5827 mm->unmap_area = arch_unmap_area;
5828 } else {
5829 @@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5830 gap = (task_size / 6 * 5);
5831
5832 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5833 +
5834 +#ifdef CONFIG_PAX_RANDMMAP
5835 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5836 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5837 +#endif
5838 +
5839 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5840 mm->unmap_area = arch_unmap_area_topdown;
5841 }
5842 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
5843 index c0490c7..84959d1 100644
5844 --- a/arch/sparc/kernel/traps_32.c
5845 +++ b/arch/sparc/kernel/traps_32.c
5846 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
5847 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
5848 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
5849
5850 +extern void gr_handle_kernel_exploit(void);
5851 +
5852 void die_if_kernel(char *str, struct pt_regs *regs)
5853 {
5854 static int die_counter;
5855 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5856 count++ < 30 &&
5857 (((unsigned long) rw) >= PAGE_OFFSET) &&
5858 !(((unsigned long) rw) & 0x7)) {
5859 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
5860 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
5861 (void *) rw->ins[7]);
5862 rw = (struct reg_window32 *)rw->ins[6];
5863 }
5864 }
5865 printk("Instruction DUMP:");
5866 instruction_dump ((unsigned long *) regs->pc);
5867 - if(regs->psr & PSR_PS)
5868 + if(regs->psr & PSR_PS) {
5869 + gr_handle_kernel_exploit();
5870 do_exit(SIGKILL);
5871 + }
5872 do_exit(SIGSEGV);
5873 }
5874
5875 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
5876 index 10f7bb9..cdb6793 100644
5877 --- a/arch/sparc/kernel/traps_64.c
5878 +++ b/arch/sparc/kernel/traps_64.c
5879 @@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
5880 i + 1,
5881 p->trapstack[i].tstate, p->trapstack[i].tpc,
5882 p->trapstack[i].tnpc, p->trapstack[i].tt);
5883 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
5884 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
5885 }
5886 }
5887
5888 @@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
5889
5890 lvl -= 0x100;
5891 if (regs->tstate & TSTATE_PRIV) {
5892 +
5893 +#ifdef CONFIG_PAX_REFCOUNT
5894 + if (lvl == 6)
5895 + pax_report_refcount_overflow(regs);
5896 +#endif
5897 +
5898 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
5899 die_if_kernel(buffer, regs);
5900 }
5901 @@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
5902 void bad_trap_tl1(struct pt_regs *regs, long lvl)
5903 {
5904 char buffer[32];
5905 -
5906 +
5907 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
5908 0, lvl, SIGTRAP) == NOTIFY_STOP)
5909 return;
5910
5911 +#ifdef CONFIG_PAX_REFCOUNT
5912 + if (lvl == 6)
5913 + pax_report_refcount_overflow(regs);
5914 +#endif
5915 +
5916 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
5917
5918 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
5919 @@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
5920 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
5921 printk("%s" "ERROR(%d): ",
5922 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
5923 - printk("TPC<%pS>\n", (void *) regs->tpc);
5924 + printk("TPC<%pA>\n", (void *) regs->tpc);
5925 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
5926 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
5927 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
5928 @@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5929 smp_processor_id(),
5930 (type & 0x1) ? 'I' : 'D',
5931 regs->tpc);
5932 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
5933 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
5934 panic("Irrecoverable Cheetah+ parity error.");
5935 }
5936
5937 @@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5938 smp_processor_id(),
5939 (type & 0x1) ? 'I' : 'D',
5940 regs->tpc);
5941 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
5942 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
5943 }
5944
5945 struct sun4v_error_entry {
5946 @@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
5947
5948 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
5949 regs->tpc, tl);
5950 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
5951 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
5952 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5953 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
5954 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
5955 (void *) regs->u_regs[UREG_I7]);
5956 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
5957 "pte[%lx] error[%lx]\n",
5958 @@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
5959
5960 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
5961 regs->tpc, tl);
5962 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
5963 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
5964 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5965 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
5966 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
5967 (void *) regs->u_regs[UREG_I7]);
5968 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
5969 "pte[%lx] error[%lx]\n",
5970 @@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5971 fp = (unsigned long)sf->fp + STACK_BIAS;
5972 }
5973
5974 - printk(" [%016lx] %pS\n", pc, (void *) pc);
5975 + printk(" [%016lx] %pA\n", pc, (void *) pc);
5976 } while (++count < 16);
5977 }
5978
5979 @@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
5980 return (struct reg_window *) (fp + STACK_BIAS);
5981 }
5982
5983 +extern void gr_handle_kernel_exploit(void);
5984 +
5985 void die_if_kernel(char *str, struct pt_regs *regs)
5986 {
5987 static int die_counter;
5988 @@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5989 while (rw &&
5990 count++ < 30&&
5991 is_kernel_stack(current, rw)) {
5992 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
5993 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
5994 (void *) rw->ins[7]);
5995
5996 rw = kernel_stack_up(rw);
5997 @@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5998 }
5999 user_instruction_dump ((unsigned int __user *) regs->tpc);
6000 }
6001 - if (regs->tstate & TSTATE_PRIV)
6002 + if (regs->tstate & TSTATE_PRIV) {
6003 + gr_handle_kernel_exploit();
6004 do_exit(SIGKILL);
6005 + }
6006 +
6007 do_exit(SIGSEGV);
6008 }
6009 EXPORT_SYMBOL(die_if_kernel);
6010 diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S
6011 index be183fe..1c8d332 100644
6012 --- a/arch/sparc/kernel/una_asm_64.S
6013 +++ b/arch/sparc/kernel/una_asm_64.S
6014 @@ -127,7 +127,7 @@ do_int_load:
6015 wr %o5, 0x0, %asi
6016 retl
6017 mov 0, %o0
6018 - .size __do_int_load, .-__do_int_load
6019 + .size do_int_load, .-do_int_load
6020
6021 .section __ex_table,"a"
6022 .word 4b, __retl_efault
6023 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
6024 index 3792099..2af17d8 100644
6025 --- a/arch/sparc/kernel/unaligned_64.c
6026 +++ b/arch/sparc/kernel/unaligned_64.c
6027 @@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs *regs)
6028 if (count < 5) {
6029 last_time = jiffies;
6030 count++;
6031 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
6032 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
6033 regs->tpc, (void *) regs->tpc);
6034 }
6035 }
6036 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
6037 index e75faf0..24f12f9 100644
6038 --- a/arch/sparc/lib/Makefile
6039 +++ b/arch/sparc/lib/Makefile
6040 @@ -2,7 +2,7 @@
6041 #
6042
6043 asflags-y := -ansi -DST_DIV0=0x02
6044 -ccflags-y := -Werror
6045 +#ccflags-y := -Werror
6046
6047 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
6048 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
6049 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
6050 index 0268210..f0291ca 100644
6051 --- a/arch/sparc/lib/atomic_64.S
6052 +++ b/arch/sparc/lib/atomic_64.S
6053 @@ -18,7 +18,12 @@
6054 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6055 BACKOFF_SETUP(%o2)
6056 1: lduw [%o1], %g1
6057 - add %g1, %o0, %g7
6058 + addcc %g1, %o0, %g7
6059 +
6060 +#ifdef CONFIG_PAX_REFCOUNT
6061 + tvs %icc, 6
6062 +#endif
6063 +
6064 cas [%o1], %g1, %g7
6065 cmp %g1, %g7
6066 bne,pn %icc, 2f
6067 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6068 2: BACKOFF_SPIN(%o2, %o3, 1b)
6069 .size atomic_add, .-atomic_add
6070
6071 + .globl atomic_add_unchecked
6072 + .type atomic_add_unchecked,#function
6073 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6074 + BACKOFF_SETUP(%o2)
6075 +1: lduw [%o1], %g1
6076 + add %g1, %o0, %g7
6077 + cas [%o1], %g1, %g7
6078 + cmp %g1, %g7
6079 + bne,pn %icc, 2f
6080 + nop
6081 + retl
6082 + nop
6083 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6084 + .size atomic_add_unchecked, .-atomic_add_unchecked
6085 +
6086 .globl atomic_sub
6087 .type atomic_sub,#function
6088 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6089 BACKOFF_SETUP(%o2)
6090 1: lduw [%o1], %g1
6091 - sub %g1, %o0, %g7
6092 + subcc %g1, %o0, %g7
6093 +
6094 +#ifdef CONFIG_PAX_REFCOUNT
6095 + tvs %icc, 6
6096 +#endif
6097 +
6098 cas [%o1], %g1, %g7
6099 cmp %g1, %g7
6100 bne,pn %icc, 2f
6101 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6102 2: BACKOFF_SPIN(%o2, %o3, 1b)
6103 .size atomic_sub, .-atomic_sub
6104
6105 + .globl atomic_sub_unchecked
6106 + .type atomic_sub_unchecked,#function
6107 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6108 + BACKOFF_SETUP(%o2)
6109 +1: lduw [%o1], %g1
6110 + sub %g1, %o0, %g7
6111 + cas [%o1], %g1, %g7
6112 + cmp %g1, %g7
6113 + bne,pn %icc, 2f
6114 + nop
6115 + retl
6116 + nop
6117 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6118 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
6119 +
6120 .globl atomic_add_ret
6121 .type atomic_add_ret,#function
6122 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6123 BACKOFF_SETUP(%o2)
6124 1: lduw [%o1], %g1
6125 - add %g1, %o0, %g7
6126 + addcc %g1, %o0, %g7
6127 +
6128 +#ifdef CONFIG_PAX_REFCOUNT
6129 + tvs %icc, 6
6130 +#endif
6131 +
6132 cas [%o1], %g1, %g7
6133 cmp %g1, %g7
6134 bne,pn %icc, 2f
6135 @@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6136 2: BACKOFF_SPIN(%o2, %o3, 1b)
6137 .size atomic_add_ret, .-atomic_add_ret
6138
6139 + .globl atomic_add_ret_unchecked
6140 + .type atomic_add_ret_unchecked,#function
6141 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6142 + BACKOFF_SETUP(%o2)
6143 +1: lduw [%o1], %g1
6144 + addcc %g1, %o0, %g7
6145 + cas [%o1], %g1, %g7
6146 + cmp %g1, %g7
6147 + bne,pn %icc, 2f
6148 + add %g7, %o0, %g7
6149 + sra %g7, 0, %o0
6150 + retl
6151 + nop
6152 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6153 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
6154 +
6155 .globl atomic_sub_ret
6156 .type atomic_sub_ret,#function
6157 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6158 BACKOFF_SETUP(%o2)
6159 1: lduw [%o1], %g1
6160 - sub %g1, %o0, %g7
6161 + subcc %g1, %o0, %g7
6162 +
6163 +#ifdef CONFIG_PAX_REFCOUNT
6164 + tvs %icc, 6
6165 +#endif
6166 +
6167 cas [%o1], %g1, %g7
6168 cmp %g1, %g7
6169 bne,pn %icc, 2f
6170 @@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6171 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6172 BACKOFF_SETUP(%o2)
6173 1: ldx [%o1], %g1
6174 - add %g1, %o0, %g7
6175 + addcc %g1, %o0, %g7
6176 +
6177 +#ifdef CONFIG_PAX_REFCOUNT
6178 + tvs %xcc, 6
6179 +#endif
6180 +
6181 casx [%o1], %g1, %g7
6182 cmp %g1, %g7
6183 bne,pn %xcc, 2f
6184 @@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6185 2: BACKOFF_SPIN(%o2, %o3, 1b)
6186 .size atomic64_add, .-atomic64_add
6187
6188 + .globl atomic64_add_unchecked
6189 + .type atomic64_add_unchecked,#function
6190 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6191 + BACKOFF_SETUP(%o2)
6192 +1: ldx [%o1], %g1
6193 + addcc %g1, %o0, %g7
6194 + casx [%o1], %g1, %g7
6195 + cmp %g1, %g7
6196 + bne,pn %xcc, 2f
6197 + nop
6198 + retl
6199 + nop
6200 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6201 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
6202 +
6203 .globl atomic64_sub
6204 .type atomic64_sub,#function
6205 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6206 BACKOFF_SETUP(%o2)
6207 1: ldx [%o1], %g1
6208 - sub %g1, %o0, %g7
6209 + subcc %g1, %o0, %g7
6210 +
6211 +#ifdef CONFIG_PAX_REFCOUNT
6212 + tvs %xcc, 6
6213 +#endif
6214 +
6215 casx [%o1], %g1, %g7
6216 cmp %g1, %g7
6217 bne,pn %xcc, 2f
6218 @@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6219 2: BACKOFF_SPIN(%o2, %o3, 1b)
6220 .size atomic64_sub, .-atomic64_sub
6221
6222 + .globl atomic64_sub_unchecked
6223 + .type atomic64_sub_unchecked,#function
6224 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6225 + BACKOFF_SETUP(%o2)
6226 +1: ldx [%o1], %g1
6227 + subcc %g1, %o0, %g7
6228 + casx [%o1], %g1, %g7
6229 + cmp %g1, %g7
6230 + bne,pn %xcc, 2f
6231 + nop
6232 + retl
6233 + nop
6234 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6235 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
6236 +
6237 .globl atomic64_add_ret
6238 .type atomic64_add_ret,#function
6239 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6240 BACKOFF_SETUP(%o2)
6241 1: ldx [%o1], %g1
6242 - add %g1, %o0, %g7
6243 + addcc %g1, %o0, %g7
6244 +
6245 +#ifdef CONFIG_PAX_REFCOUNT
6246 + tvs %xcc, 6
6247 +#endif
6248 +
6249 casx [%o1], %g1, %g7
6250 cmp %g1, %g7
6251 bne,pn %xcc, 2f
6252 @@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6253 2: BACKOFF_SPIN(%o2, %o3, 1b)
6254 .size atomic64_add_ret, .-atomic64_add_ret
6255
6256 + .globl atomic64_add_ret_unchecked
6257 + .type atomic64_add_ret_unchecked,#function
6258 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6259 + BACKOFF_SETUP(%o2)
6260 +1: ldx [%o1], %g1
6261 + addcc %g1, %o0, %g7
6262 + casx [%o1], %g1, %g7
6263 + cmp %g1, %g7
6264 + bne,pn %xcc, 2f
6265 + add %g7, %o0, %g7
6266 + mov %g7, %o0
6267 + retl
6268 + nop
6269 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6270 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
6271 +
6272 .globl atomic64_sub_ret
6273 .type atomic64_sub_ret,#function
6274 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6275 BACKOFF_SETUP(%o2)
6276 1: ldx [%o1], %g1
6277 - sub %g1, %o0, %g7
6278 + subcc %g1, %o0, %g7
6279 +
6280 +#ifdef CONFIG_PAX_REFCOUNT
6281 + tvs %xcc, 6
6282 +#endif
6283 +
6284 casx [%o1], %g1, %g7
6285 cmp %g1, %g7
6286 bne,pn %xcc, 2f
6287 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
6288 index 704b126..2e79d76 100644
6289 --- a/arch/sparc/lib/ksyms.c
6290 +++ b/arch/sparc/lib/ksyms.c
6291 @@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
6292
6293 /* Atomic counter implementation. */
6294 EXPORT_SYMBOL(atomic_add);
6295 +EXPORT_SYMBOL(atomic_add_unchecked);
6296 EXPORT_SYMBOL(atomic_add_ret);
6297 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
6298 EXPORT_SYMBOL(atomic_sub);
6299 +EXPORT_SYMBOL(atomic_sub_unchecked);
6300 EXPORT_SYMBOL(atomic_sub_ret);
6301 EXPORT_SYMBOL(atomic64_add);
6302 +EXPORT_SYMBOL(atomic64_add_unchecked);
6303 EXPORT_SYMBOL(atomic64_add_ret);
6304 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
6305 EXPORT_SYMBOL(atomic64_sub);
6306 +EXPORT_SYMBOL(atomic64_sub_unchecked);
6307 EXPORT_SYMBOL(atomic64_sub_ret);
6308
6309 /* Atomic bit operations. */
6310 diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S
6311 index 91a7d29..ce75c29 100644
6312 --- a/arch/sparc/lib/rwsem_64.S
6313 +++ b/arch/sparc/lib/rwsem_64.S
6314 @@ -11,7 +11,12 @@
6315 .globl __down_read
6316 __down_read:
6317 1: lduw [%o0], %g1
6318 - add %g1, 1, %g7
6319 + addcc %g1, 1, %g7
6320 +
6321 +#ifdef CONFIG_PAX_REFCOUNT
6322 + tvs %icc, 6
6323 +#endif
6324 +
6325 cas [%o0], %g1, %g7
6326 cmp %g1, %g7
6327 bne,pn %icc, 1b
6328 @@ -33,7 +38,12 @@ __down_read:
6329 .globl __down_read_trylock
6330 __down_read_trylock:
6331 1: lduw [%o0], %g1
6332 - add %g1, 1, %g7
6333 + addcc %g1, 1, %g7
6334 +
6335 +#ifdef CONFIG_PAX_REFCOUNT
6336 + tvs %icc, 6
6337 +#endif
6338 +
6339 cmp %g7, 0
6340 bl,pn %icc, 2f
6341 mov 0, %o1
6342 @@ -51,7 +61,12 @@ __down_write:
6343 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
6344 1:
6345 lduw [%o0], %g3
6346 - add %g3, %g1, %g7
6347 + addcc %g3, %g1, %g7
6348 +
6349 +#ifdef CONFIG_PAX_REFCOUNT
6350 + tvs %icc, 6
6351 +#endif
6352 +
6353 cas [%o0], %g3, %g7
6354 cmp %g3, %g7
6355 bne,pn %icc, 1b
6356 @@ -77,7 +92,12 @@ __down_write_trylock:
6357 cmp %g3, 0
6358 bne,pn %icc, 2f
6359 mov 0, %o1
6360 - add %g3, %g1, %g7
6361 + addcc %g3, %g1, %g7
6362 +
6363 +#ifdef CONFIG_PAX_REFCOUNT
6364 + tvs %icc, 6
6365 +#endif
6366 +
6367 cas [%o0], %g3, %g7
6368 cmp %g3, %g7
6369 bne,pn %icc, 1b
6370 @@ -90,7 +110,12 @@ __down_write_trylock:
6371 __up_read:
6372 1:
6373 lduw [%o0], %g1
6374 - sub %g1, 1, %g7
6375 + subcc %g1, 1, %g7
6376 +
6377 +#ifdef CONFIG_PAX_REFCOUNT
6378 + tvs %icc, 6
6379 +#endif
6380 +
6381 cas [%o0], %g1, %g7
6382 cmp %g1, %g7
6383 bne,pn %icc, 1b
6384 @@ -118,7 +143,12 @@ __up_write:
6385 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
6386 1:
6387 lduw [%o0], %g3
6388 - sub %g3, %g1, %g7
6389 + subcc %g3, %g1, %g7
6390 +
6391 +#ifdef CONFIG_PAX_REFCOUNT
6392 + tvs %icc, 6
6393 +#endif
6394 +
6395 cas [%o0], %g3, %g7
6396 cmp %g3, %g7
6397 bne,pn %icc, 1b
6398 @@ -143,7 +173,12 @@ __downgrade_write:
6399 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
6400 1:
6401 lduw [%o0], %g3
6402 - sub %g3, %g1, %g7
6403 + subcc %g3, %g1, %g7
6404 +
6405 +#ifdef CONFIG_PAX_REFCOUNT
6406 + tvs %icc, 6
6407 +#endif
6408 +
6409 cas [%o0], %g3, %g7
6410 cmp %g3, %g7
6411 bne,pn %icc, 1b
6412 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
6413 index 79836a7..62f47a2 100644
6414 --- a/arch/sparc/mm/Makefile
6415 +++ b/arch/sparc/mm/Makefile
6416 @@ -2,7 +2,7 @@
6417 #
6418
6419 asflags-y := -ansi
6420 -ccflags-y := -Werror
6421 +#ccflags-y := -Werror
6422
6423 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
6424 obj-y += fault_$(BITS).o
6425 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
6426 index b99f81c..3453e93 100644
6427 --- a/arch/sparc/mm/fault_32.c
6428 +++ b/arch/sparc/mm/fault_32.c
6429 @@ -21,6 +21,9 @@
6430 #include <linux/interrupt.h>
6431 #include <linux/module.h>
6432 #include <linux/kdebug.h>
6433 +#include <linux/slab.h>
6434 +#include <linux/pagemap.h>
6435 +#include <linux/compiler.h>
6436
6437 #include <asm/system.h>
6438 #include <asm/page.h>
6439 @@ -167,6 +170,267 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
6440 return safe_compute_effective_address(regs, insn);
6441 }
6442
6443 +#ifdef CONFIG_PAX_PAGEEXEC
6444 +#ifdef CONFIG_PAX_DLRESOLVE
6445 +static void pax_emuplt_close(struct vm_area_struct *vma)
6446 +{
6447 + vma->vm_mm->call_dl_resolve = 0UL;
6448 +}
6449 +
6450 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6451 +{
6452 + unsigned int *kaddr;
6453 +
6454 + vmf->page = alloc_page(GFP_HIGHUSER);
6455 + if (!vmf->page)
6456 + return VM_FAULT_OOM;
6457 +
6458 + kaddr = kmap(vmf->page);
6459 + memset(kaddr, 0, PAGE_SIZE);
6460 + kaddr[0] = 0x9DE3BFA8U; /* save */
6461 + flush_dcache_page(vmf->page);
6462 + kunmap(vmf->page);
6463 + return VM_FAULT_MAJOR;
6464 +}
6465 +
6466 +static const struct vm_operations_struct pax_vm_ops = {
6467 + .close = pax_emuplt_close,
6468 + .fault = pax_emuplt_fault
6469 +};
6470 +
6471 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6472 +{
6473 + int ret;
6474 +
6475 + vma->vm_mm = current->mm;
6476 + vma->vm_start = addr;
6477 + vma->vm_end = addr + PAGE_SIZE;
6478 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6479 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6480 + vma->vm_ops = &pax_vm_ops;
6481 +
6482 + ret = insert_vm_struct(current->mm, vma);
6483 + if (ret)
6484 + return ret;
6485 +
6486 + ++current->mm->total_vm;
6487 + return 0;
6488 +}
6489 +#endif
6490 +
6491 +/*
6492 + * PaX: decide what to do with offenders (regs->pc = fault address)
6493 + *
6494 + * returns 1 when task should be killed
6495 + * 2 when patched PLT trampoline was detected
6496 + * 3 when unpatched PLT trampoline was detected
6497 + */
6498 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6499 +{
6500 +
6501 +#ifdef CONFIG_PAX_EMUPLT
6502 + int err;
6503 +
6504 + do { /* PaX: patched PLT emulation #1 */
6505 + unsigned int sethi1, sethi2, jmpl;
6506 +
6507 + err = get_user(sethi1, (unsigned int *)regs->pc);
6508 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6509 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6510 +
6511 + if (err)
6512 + break;
6513 +
6514 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6515 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6516 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6517 + {
6518 + unsigned int addr;
6519 +
6520 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6521 + addr = regs->u_regs[UREG_G1];
6522 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6523 + regs->pc = addr;
6524 + regs->npc = addr+4;
6525 + return 2;
6526 + }
6527 + } while (0);
6528 +
6529 + { /* PaX: patched PLT emulation #2 */
6530 + unsigned int ba;
6531 +
6532 + err = get_user(ba, (unsigned int *)regs->pc);
6533 +
6534 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6535 + unsigned int addr;
6536 +
6537 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6538 + regs->pc = addr;
6539 + regs->npc = addr+4;
6540 + return 2;
6541 + }
6542 + }
6543 +
6544 + do { /* PaX: patched PLT emulation #3 */
6545 + unsigned int sethi, jmpl, nop;
6546 +
6547 + err = get_user(sethi, (unsigned int *)regs->pc);
6548 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6549 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6550 +
6551 + if (err)
6552 + break;
6553 +
6554 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6555 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6556 + nop == 0x01000000U)
6557 + {
6558 + unsigned int addr;
6559 +
6560 + addr = (sethi & 0x003FFFFFU) << 10;
6561 + regs->u_regs[UREG_G1] = addr;
6562 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6563 + regs->pc = addr;
6564 + regs->npc = addr+4;
6565 + return 2;
6566 + }
6567 + } while (0);
6568 +
6569 + do { /* PaX: unpatched PLT emulation step 1 */
6570 + unsigned int sethi, ba, nop;
6571 +
6572 + err = get_user(sethi, (unsigned int *)regs->pc);
6573 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
6574 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6575 +
6576 + if (err)
6577 + break;
6578 +
6579 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6580 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6581 + nop == 0x01000000U)
6582 + {
6583 + unsigned int addr, save, call;
6584 +
6585 + if ((ba & 0xFFC00000U) == 0x30800000U)
6586 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6587 + else
6588 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6589 +
6590 + err = get_user(save, (unsigned int *)addr);
6591 + err |= get_user(call, (unsigned int *)(addr+4));
6592 + err |= get_user(nop, (unsigned int *)(addr+8));
6593 + if (err)
6594 + break;
6595 +
6596 +#ifdef CONFIG_PAX_DLRESOLVE
6597 + if (save == 0x9DE3BFA8U &&
6598 + (call & 0xC0000000U) == 0x40000000U &&
6599 + nop == 0x01000000U)
6600 + {
6601 + struct vm_area_struct *vma;
6602 + unsigned long call_dl_resolve;
6603 +
6604 + down_read(&current->mm->mmap_sem);
6605 + call_dl_resolve = current->mm->call_dl_resolve;
6606 + up_read(&current->mm->mmap_sem);
6607 + if (likely(call_dl_resolve))
6608 + goto emulate;
6609 +
6610 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6611 +
6612 + down_write(&current->mm->mmap_sem);
6613 + if (current->mm->call_dl_resolve) {
6614 + call_dl_resolve = current->mm->call_dl_resolve;
6615 + up_write(&current->mm->mmap_sem);
6616 + if (vma)
6617 + kmem_cache_free(vm_area_cachep, vma);
6618 + goto emulate;
6619 + }
6620 +
6621 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6622 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6623 + up_write(&current->mm->mmap_sem);
6624 + if (vma)
6625 + kmem_cache_free(vm_area_cachep, vma);
6626 + return 1;
6627 + }
6628 +
6629 + if (pax_insert_vma(vma, call_dl_resolve)) {
6630 + up_write(&current->mm->mmap_sem);
6631 + kmem_cache_free(vm_area_cachep, vma);
6632 + return 1;
6633 + }
6634 +
6635 + current->mm->call_dl_resolve = call_dl_resolve;
6636 + up_write(&current->mm->mmap_sem);
6637 +
6638 +emulate:
6639 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6640 + regs->pc = call_dl_resolve;
6641 + regs->npc = addr+4;
6642 + return 3;
6643 + }
6644 +#endif
6645 +
6646 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6647 + if ((save & 0xFFC00000U) == 0x05000000U &&
6648 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6649 + nop == 0x01000000U)
6650 + {
6651 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6652 + regs->u_regs[UREG_G2] = addr + 4;
6653 + addr = (save & 0x003FFFFFU) << 10;
6654 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6655 + regs->pc = addr;
6656 + regs->npc = addr+4;
6657 + return 3;
6658 + }
6659 + }
6660 + } while (0);
6661 +
6662 + do { /* PaX: unpatched PLT emulation step 2 */
6663 + unsigned int save, call, nop;
6664 +
6665 + err = get_user(save, (unsigned int *)(regs->pc-4));
6666 + err |= get_user(call, (unsigned int *)regs->pc);
6667 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
6668 + if (err)
6669 + break;
6670 +
6671 + if (save == 0x9DE3BFA8U &&
6672 + (call & 0xC0000000U) == 0x40000000U &&
6673 + nop == 0x01000000U)
6674 + {
6675 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6676 +
6677 + regs->u_regs[UREG_RETPC] = regs->pc;
6678 + regs->pc = dl_resolve;
6679 + regs->npc = dl_resolve+4;
6680 + return 3;
6681 + }
6682 + } while (0);
6683 +#endif
6684 +
6685 + return 1;
6686 +}
6687 +
6688 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6689 +{
6690 + unsigned long i;
6691 +
6692 + printk(KERN_ERR "PAX: bytes at PC: ");
6693 + for (i = 0; i < 8; i++) {
6694 + unsigned int c;
6695 + if (get_user(c, (unsigned int *)pc+i))
6696 + printk(KERN_CONT "???????? ");
6697 + else
6698 + printk(KERN_CONT "%08x ", c);
6699 + }
6700 + printk("\n");
6701 +}
6702 +#endif
6703 +
6704 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
6705 unsigned long address)
6706 {
6707 @@ -231,6 +495,24 @@ good_area:
6708 if(!(vma->vm_flags & VM_WRITE))
6709 goto bad_area;
6710 } else {
6711 +
6712 +#ifdef CONFIG_PAX_PAGEEXEC
6713 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6714 + up_read(&mm->mmap_sem);
6715 + switch (pax_handle_fetch_fault(regs)) {
6716 +
6717 +#ifdef CONFIG_PAX_EMUPLT
6718 + case 2:
6719 + case 3:
6720 + return;
6721 +#endif
6722 +
6723 + }
6724 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6725 + do_group_exit(SIGKILL);
6726 + }
6727 +#endif
6728 +
6729 /* Allow reads even for write-only mappings */
6730 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6731 goto bad_area;
6732 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6733 index 43b0da9..a0b78f9 100644
6734 --- a/arch/sparc/mm/fault_64.c
6735 +++ b/arch/sparc/mm/fault_64.c
6736 @@ -20,6 +20,9 @@
6737 #include <linux/kprobes.h>
6738 #include <linux/kdebug.h>
6739 #include <linux/percpu.h>
6740 +#include <linux/slab.h>
6741 +#include <linux/pagemap.h>
6742 +#include <linux/compiler.h>
6743
6744 #include <asm/page.h>
6745 #include <asm/pgtable.h>
6746 @@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6747 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6748 regs->tpc);
6749 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6750 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6751 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6752 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6753 dump_stack();
6754 unhandled_fault(regs->tpc, current, regs);
6755 @@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
6756 show_regs(regs);
6757 }
6758
6759 +#ifdef CONFIG_PAX_PAGEEXEC
6760 +#ifdef CONFIG_PAX_DLRESOLVE
6761 +static void pax_emuplt_close(struct vm_area_struct *vma)
6762 +{
6763 + vma->vm_mm->call_dl_resolve = 0UL;
6764 +}
6765 +
6766 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6767 +{
6768 + unsigned int *kaddr;
6769 +
6770 + vmf->page = alloc_page(GFP_HIGHUSER);
6771 + if (!vmf->page)
6772 + return VM_FAULT_OOM;
6773 +
6774 + kaddr = kmap(vmf->page);
6775 + memset(kaddr, 0, PAGE_SIZE);
6776 + kaddr[0] = 0x9DE3BFA8U; /* save */
6777 + flush_dcache_page(vmf->page);
6778 + kunmap(vmf->page);
6779 + return VM_FAULT_MAJOR;
6780 +}
6781 +
6782 +static const struct vm_operations_struct pax_vm_ops = {
6783 + .close = pax_emuplt_close,
6784 + .fault = pax_emuplt_fault
6785 +};
6786 +
6787 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6788 +{
6789 + int ret;
6790 +
6791 + vma->vm_mm = current->mm;
6792 + vma->vm_start = addr;
6793 + vma->vm_end = addr + PAGE_SIZE;
6794 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6795 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6796 + vma->vm_ops = &pax_vm_ops;
6797 +
6798 + ret = insert_vm_struct(current->mm, vma);
6799 + if (ret)
6800 + return ret;
6801 +
6802 + ++current->mm->total_vm;
6803 + return 0;
6804 +}
6805 +#endif
6806 +
6807 +/*
6808 + * PaX: decide what to do with offenders (regs->tpc = fault address)
6809 + *
6810 + * returns 1 when task should be killed
6811 + * 2 when patched PLT trampoline was detected
6812 + * 3 when unpatched PLT trampoline was detected
6813 + */
6814 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6815 +{
6816 +
6817 +#ifdef CONFIG_PAX_EMUPLT
6818 + int err;
6819 +
6820 + do { /* PaX: patched PLT emulation #1 */
6821 + unsigned int sethi1, sethi2, jmpl;
6822 +
6823 + err = get_user(sethi1, (unsigned int *)regs->tpc);
6824 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6825 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6826 +
6827 + if (err)
6828 + break;
6829 +
6830 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6831 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6832 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6833 + {
6834 + unsigned long addr;
6835 +
6836 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6837 + addr = regs->u_regs[UREG_G1];
6838 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6839 +
6840 + if (test_thread_flag(TIF_32BIT))
6841 + addr &= 0xFFFFFFFFUL;
6842 +
6843 + regs->tpc = addr;
6844 + regs->tnpc = addr+4;
6845 + return 2;
6846 + }
6847 + } while (0);
6848 +
6849 + { /* PaX: patched PLT emulation #2 */
6850 + unsigned int ba;
6851 +
6852 + err = get_user(ba, (unsigned int *)regs->tpc);
6853 +
6854 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6855 + unsigned long addr;
6856 +
6857 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6858 +
6859 + if (test_thread_flag(TIF_32BIT))
6860 + addr &= 0xFFFFFFFFUL;
6861 +
6862 + regs->tpc = addr;
6863 + regs->tnpc = addr+4;
6864 + return 2;
6865 + }
6866 + }
6867 +
6868 + do { /* PaX: patched PLT emulation #3 */
6869 + unsigned int sethi, jmpl, nop;
6870 +
6871 + err = get_user(sethi, (unsigned int *)regs->tpc);
6872 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6873 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6874 +
6875 + if (err)
6876 + break;
6877 +
6878 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6879 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6880 + nop == 0x01000000U)
6881 + {
6882 + unsigned long addr;
6883 +
6884 + addr = (sethi & 0x003FFFFFU) << 10;
6885 + regs->u_regs[UREG_G1] = addr;
6886 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6887 +
6888 + if (test_thread_flag(TIF_32BIT))
6889 + addr &= 0xFFFFFFFFUL;
6890 +
6891 + regs->tpc = addr;
6892 + regs->tnpc = addr+4;
6893 + return 2;
6894 + }
6895 + } while (0);
6896 +
6897 + do { /* PaX: patched PLT emulation #4 */
6898 + unsigned int sethi, mov1, call, mov2;
6899 +
6900 + err = get_user(sethi, (unsigned int *)regs->tpc);
6901 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6902 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
6903 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6904 +
6905 + if (err)
6906 + break;
6907 +
6908 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6909 + mov1 == 0x8210000FU &&
6910 + (call & 0xC0000000U) == 0x40000000U &&
6911 + mov2 == 0x9E100001U)
6912 + {
6913 + unsigned long addr;
6914 +
6915 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6916 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6917 +
6918 + if (test_thread_flag(TIF_32BIT))
6919 + addr &= 0xFFFFFFFFUL;
6920 +
6921 + regs->tpc = addr;
6922 + regs->tnpc = addr+4;
6923 + return 2;
6924 + }
6925 + } while (0);
6926 +
6927 + do { /* PaX: patched PLT emulation #5 */
6928 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6929 +
6930 + err = get_user(sethi, (unsigned int *)regs->tpc);
6931 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6932 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6933 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6934 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6935 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6936 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6937 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
6938 +
6939 + if (err)
6940 + break;
6941 +
6942 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6943 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6944 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6945 + (or1 & 0xFFFFE000U) == 0x82106000U &&
6946 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6947 + sllx == 0x83287020U &&
6948 + jmpl == 0x81C04005U &&
6949 + nop == 0x01000000U)
6950 + {
6951 + unsigned long addr;
6952 +
6953 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6954 + regs->u_regs[UREG_G1] <<= 32;
6955 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6956 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6957 + regs->tpc = addr;
6958 + regs->tnpc = addr+4;
6959 + return 2;
6960 + }
6961 + } while (0);
6962 +
6963 + do { /* PaX: patched PLT emulation #6 */
6964 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
6965 +
6966 + err = get_user(sethi, (unsigned int *)regs->tpc);
6967 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6968 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6969 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
6970 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
6971 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
6972 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
6973 +
6974 + if (err)
6975 + break;
6976 +
6977 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6978 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6979 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6980 + sllx == 0x83287020U &&
6981 + (or & 0xFFFFE000U) == 0x8A116000U &&
6982 + jmpl == 0x81C04005U &&
6983 + nop == 0x01000000U)
6984 + {
6985 + unsigned long addr;
6986 +
6987 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
6988 + regs->u_regs[UREG_G1] <<= 32;
6989 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
6990 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6991 + regs->tpc = addr;
6992 + regs->tnpc = addr+4;
6993 + return 2;
6994 + }
6995 + } while (0);
6996 +
6997 + do { /* PaX: unpatched PLT emulation step 1 */
6998 + unsigned int sethi, ba, nop;
6999 +
7000 + err = get_user(sethi, (unsigned int *)regs->tpc);
7001 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7002 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7003 +
7004 + if (err)
7005 + break;
7006 +
7007 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7008 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7009 + nop == 0x01000000U)
7010 + {
7011 + unsigned long addr;
7012 + unsigned int save, call;
7013 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
7014 +
7015 + if ((ba & 0xFFC00000U) == 0x30800000U)
7016 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7017 + else
7018 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7019 +
7020 + if (test_thread_flag(TIF_32BIT))
7021 + addr &= 0xFFFFFFFFUL;
7022 +
7023 + err = get_user(save, (unsigned int *)addr);
7024 + err |= get_user(call, (unsigned int *)(addr+4));
7025 + err |= get_user(nop, (unsigned int *)(addr+8));
7026 + if (err)
7027 + break;
7028 +
7029 +#ifdef CONFIG_PAX_DLRESOLVE
7030 + if (save == 0x9DE3BFA8U &&
7031 + (call & 0xC0000000U) == 0x40000000U &&
7032 + nop == 0x01000000U)
7033 + {
7034 + struct vm_area_struct *vma;
7035 + unsigned long call_dl_resolve;
7036 +
7037 + down_read(&current->mm->mmap_sem);
7038 + call_dl_resolve = current->mm->call_dl_resolve;
7039 + up_read(&current->mm->mmap_sem);
7040 + if (likely(call_dl_resolve))
7041 + goto emulate;
7042 +
7043 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7044 +
7045 + down_write(&current->mm->mmap_sem);
7046 + if (current->mm->call_dl_resolve) {
7047 + call_dl_resolve = current->mm->call_dl_resolve;
7048 + up_write(&current->mm->mmap_sem);
7049 + if (vma)
7050 + kmem_cache_free(vm_area_cachep, vma);
7051 + goto emulate;
7052 + }
7053 +
7054 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7055 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7056 + up_write(&current->mm->mmap_sem);
7057 + if (vma)
7058 + kmem_cache_free(vm_area_cachep, vma);
7059 + return 1;
7060 + }
7061 +
7062 + if (pax_insert_vma(vma, call_dl_resolve)) {
7063 + up_write(&current->mm->mmap_sem);
7064 + kmem_cache_free(vm_area_cachep, vma);
7065 + return 1;
7066 + }
7067 +
7068 + current->mm->call_dl_resolve = call_dl_resolve;
7069 + up_write(&current->mm->mmap_sem);
7070 +
7071 +emulate:
7072 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7073 + regs->tpc = call_dl_resolve;
7074 + regs->tnpc = addr+4;
7075 + return 3;
7076 + }
7077 +#endif
7078 +
7079 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7080 + if ((save & 0xFFC00000U) == 0x05000000U &&
7081 + (call & 0xFFFFE000U) == 0x85C0A000U &&
7082 + nop == 0x01000000U)
7083 + {
7084 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7085 + regs->u_regs[UREG_G2] = addr + 4;
7086 + addr = (save & 0x003FFFFFU) << 10;
7087 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7088 +
7089 + if (test_thread_flag(TIF_32BIT))
7090 + addr &= 0xFFFFFFFFUL;
7091 +
7092 + regs->tpc = addr;
7093 + regs->tnpc = addr+4;
7094 + return 3;
7095 + }
7096 +
7097 + /* PaX: 64-bit PLT stub */
7098 + err = get_user(sethi1, (unsigned int *)addr);
7099 + err |= get_user(sethi2, (unsigned int *)(addr+4));
7100 + err |= get_user(or1, (unsigned int *)(addr+8));
7101 + err |= get_user(or2, (unsigned int *)(addr+12));
7102 + err |= get_user(sllx, (unsigned int *)(addr+16));
7103 + err |= get_user(add, (unsigned int *)(addr+20));
7104 + err |= get_user(jmpl, (unsigned int *)(addr+24));
7105 + err |= get_user(nop, (unsigned int *)(addr+28));
7106 + if (err)
7107 + break;
7108 +
7109 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
7110 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7111 + (or1 & 0xFFFFE000U) == 0x88112000U &&
7112 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
7113 + sllx == 0x89293020U &&
7114 + add == 0x8A010005U &&
7115 + jmpl == 0x89C14000U &&
7116 + nop == 0x01000000U)
7117 + {
7118 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7119 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7120 + regs->u_regs[UREG_G4] <<= 32;
7121 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7122 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
7123 + regs->u_regs[UREG_G4] = addr + 24;
7124 + addr = regs->u_regs[UREG_G5];
7125 + regs->tpc = addr;
7126 + regs->tnpc = addr+4;
7127 + return 3;
7128 + }
7129 + }
7130 + } while (0);
7131 +
7132 +#ifdef CONFIG_PAX_DLRESOLVE
7133 + do { /* PaX: unpatched PLT emulation step 2 */
7134 + unsigned int save, call, nop;
7135 +
7136 + err = get_user(save, (unsigned int *)(regs->tpc-4));
7137 + err |= get_user(call, (unsigned int *)regs->tpc);
7138 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
7139 + if (err)
7140 + break;
7141 +
7142 + if (save == 0x9DE3BFA8U &&
7143 + (call & 0xC0000000U) == 0x40000000U &&
7144 + nop == 0x01000000U)
7145 + {
7146 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7147 +
7148 + if (test_thread_flag(TIF_32BIT))
7149 + dl_resolve &= 0xFFFFFFFFUL;
7150 +
7151 + regs->u_regs[UREG_RETPC] = regs->tpc;
7152 + regs->tpc = dl_resolve;
7153 + regs->tnpc = dl_resolve+4;
7154 + return 3;
7155 + }
7156 + } while (0);
7157 +#endif
7158 +
7159 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
7160 + unsigned int sethi, ba, nop;
7161 +
7162 + err = get_user(sethi, (unsigned int *)regs->tpc);
7163 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7164 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7165 +
7166 + if (err)
7167 + break;
7168 +
7169 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7170 + (ba & 0xFFF00000U) == 0x30600000U &&
7171 + nop == 0x01000000U)
7172 + {
7173 + unsigned long addr;
7174 +
7175 + addr = (sethi & 0x003FFFFFU) << 10;
7176 + regs->u_regs[UREG_G1] = addr;
7177 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7178 +
7179 + if (test_thread_flag(TIF_32BIT))
7180 + addr &= 0xFFFFFFFFUL;
7181 +
7182 + regs->tpc = addr;
7183 + regs->tnpc = addr+4;
7184 + return 2;
7185 + }
7186 + } while (0);
7187 +
7188 +#endif
7189 +
7190 + return 1;
7191 +}
7192 +
7193 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7194 +{
7195 + unsigned long i;
7196 +
7197 + printk(KERN_ERR "PAX: bytes at PC: ");
7198 + for (i = 0; i < 8; i++) {
7199 + unsigned int c;
7200 + if (get_user(c, (unsigned int *)pc+i))
7201 + printk(KERN_CONT "???????? ");
7202 + else
7203 + printk(KERN_CONT "%08x ", c);
7204 + }
7205 + printk("\n");
7206 +}
7207 +#endif
7208 +
7209 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7210 {
7211 struct mm_struct *mm = current->mm;
7212 @@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7213 if (!vma)
7214 goto bad_area;
7215
7216 +#ifdef CONFIG_PAX_PAGEEXEC
7217 + /* PaX: detect ITLB misses on non-exec pages */
7218 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
7219 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
7220 + {
7221 + if (address != regs->tpc)
7222 + goto good_area;
7223 +
7224 + up_read(&mm->mmap_sem);
7225 + switch (pax_handle_fetch_fault(regs)) {
7226 +
7227 +#ifdef CONFIG_PAX_EMUPLT
7228 + case 2:
7229 + case 3:
7230 + return;
7231 +#endif
7232 +
7233 + }
7234 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
7235 + do_group_exit(SIGKILL);
7236 + }
7237 +#endif
7238 +
7239 /* Pure DTLB misses do not tell us whether the fault causing
7240 * load/store/atomic was a write or not, it only says that there
7241 * was no match. So in such a case we (carefully) read the
7242 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
7243 index f27d103..1b06377 100644
7244 --- a/arch/sparc/mm/hugetlbpage.c
7245 +++ b/arch/sparc/mm/hugetlbpage.c
7246 @@ -69,7 +69,7 @@ full_search:
7247 }
7248 return -ENOMEM;
7249 }
7250 - if (likely(!vma || addr + len <= vma->vm_start)) {
7251 + if (likely(check_heap_stack_gap(vma, addr, len))) {
7252 /*
7253 * Remember the place where we stopped the search:
7254 */
7255 @@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7256 /* make sure it can fit in the remaining address space */
7257 if (likely(addr > len)) {
7258 vma = find_vma(mm, addr-len);
7259 - if (!vma || addr <= vma->vm_start) {
7260 + if (check_heap_stack_gap(vma, addr - len, len)) {
7261 /* remember the address as a hint for next time */
7262 return (mm->free_area_cache = addr-len);
7263 }
7264 @@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7265 if (unlikely(mm->mmap_base < len))
7266 goto bottomup;
7267
7268 - addr = (mm->mmap_base-len) & HPAGE_MASK;
7269 + addr = mm->mmap_base - len;
7270
7271 do {
7272 + addr &= HPAGE_MASK;
7273 /*
7274 * Lookup failure means no vma is above this address,
7275 * else if new region fits below vma->vm_start,
7276 * return with success:
7277 */
7278 vma = find_vma(mm, addr);
7279 - if (likely(!vma || addr+len <= vma->vm_start)) {
7280 + if (likely(check_heap_stack_gap(vma, addr, len))) {
7281 /* remember the address as a hint for next time */
7282 return (mm->free_area_cache = addr);
7283 }
7284 @@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7285 mm->cached_hole_size = vma->vm_start - addr;
7286
7287 /* try just below the current vma->vm_start */
7288 - addr = (vma->vm_start-len) & HPAGE_MASK;
7289 - } while (likely(len < vma->vm_start));
7290 + addr = skip_heap_stack_gap(vma, len);
7291 + } while (!IS_ERR_VALUE(addr));
7292
7293 bottomup:
7294 /*
7295 @@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
7296 if (addr) {
7297 addr = ALIGN(addr, HPAGE_SIZE);
7298 vma = find_vma(mm, addr);
7299 - if (task_size - len >= addr &&
7300 - (!vma || addr + len <= vma->vm_start))
7301 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
7302 return addr;
7303 }
7304 if (mm->get_unmapped_area == arch_get_unmapped_area)
7305 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
7306 index dc7c3b1..34c0070 100644
7307 --- a/arch/sparc/mm/init_32.c
7308 +++ b/arch/sparc/mm/init_32.c
7309 @@ -317,6 +317,9 @@ extern void device_scan(void);
7310 pgprot_t PAGE_SHARED __read_mostly;
7311 EXPORT_SYMBOL(PAGE_SHARED);
7312
7313 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
7314 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
7315 +
7316 void __init paging_init(void)
7317 {
7318 switch(sparc_cpu_model) {
7319 @@ -345,17 +348,17 @@ void __init paging_init(void)
7320
7321 /* Initialize the protection map with non-constant, MMU dependent values. */
7322 protection_map[0] = PAGE_NONE;
7323 - protection_map[1] = PAGE_READONLY;
7324 - protection_map[2] = PAGE_COPY;
7325 - protection_map[3] = PAGE_COPY;
7326 + protection_map[1] = PAGE_READONLY_NOEXEC;
7327 + protection_map[2] = PAGE_COPY_NOEXEC;
7328 + protection_map[3] = PAGE_COPY_NOEXEC;
7329 protection_map[4] = PAGE_READONLY;
7330 protection_map[5] = PAGE_READONLY;
7331 protection_map[6] = PAGE_COPY;
7332 protection_map[7] = PAGE_COPY;
7333 protection_map[8] = PAGE_NONE;
7334 - protection_map[9] = PAGE_READONLY;
7335 - protection_map[10] = PAGE_SHARED;
7336 - protection_map[11] = PAGE_SHARED;
7337 + protection_map[9] = PAGE_READONLY_NOEXEC;
7338 + protection_map[10] = PAGE_SHARED_NOEXEC;
7339 + protection_map[11] = PAGE_SHARED_NOEXEC;
7340 protection_map[12] = PAGE_READONLY;
7341 protection_map[13] = PAGE_READONLY;
7342 protection_map[14] = PAGE_SHARED;
7343 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
7344 index 509b1ff..bfd7118 100644
7345 --- a/arch/sparc/mm/srmmu.c
7346 +++ b/arch/sparc/mm/srmmu.c
7347 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
7348 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
7349 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
7350 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
7351 +
7352 +#ifdef CONFIG_PAX_PAGEEXEC
7353 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
7354 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
7355 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
7356 +#endif
7357 +
7358 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
7359 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
7360
7361 diff --git a/arch/um/Makefile b/arch/um/Makefile
7362 index fc633db..5e1a1c2 100644
7363 --- a/arch/um/Makefile
7364 +++ b/arch/um/Makefile
7365 @@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
7366 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
7367 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
7368
7369 +ifdef CONSTIFY_PLUGIN
7370 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7371 +endif
7372 +
7373 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
7374
7375 #This will adjust *FLAGS accordingly to the platform.
7376 diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
7377 index 19e1bdd..3665b77 100644
7378 --- a/arch/um/include/asm/cache.h
7379 +++ b/arch/um/include/asm/cache.h
7380 @@ -1,6 +1,7 @@
7381 #ifndef __UM_CACHE_H
7382 #define __UM_CACHE_H
7383
7384 +#include <linux/const.h>
7385
7386 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
7387 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7388 @@ -12,6 +13,6 @@
7389 # define L1_CACHE_SHIFT 5
7390 #endif
7391
7392 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7393 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7394
7395 #endif
7396 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
7397 index 6c03acd..a5e0215 100644
7398 --- a/arch/um/include/asm/kmap_types.h
7399 +++ b/arch/um/include/asm/kmap_types.h
7400 @@ -23,6 +23,7 @@ enum km_type {
7401 KM_IRQ1,
7402 KM_SOFTIRQ0,
7403 KM_SOFTIRQ1,
7404 + KM_CLEARPAGE,
7405 KM_TYPE_NR
7406 };
7407
7408 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
7409 index 4cc9b6c..02e5029 100644
7410 --- a/arch/um/include/asm/page.h
7411 +++ b/arch/um/include/asm/page.h
7412 @@ -14,6 +14,9 @@
7413 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
7414 #define PAGE_MASK (~(PAGE_SIZE-1))
7415
7416 +#define ktla_ktva(addr) (addr)
7417 +#define ktva_ktla(addr) (addr)
7418 +
7419 #ifndef __ASSEMBLY__
7420
7421 struct page;
7422 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
7423 index 4a28a15..654dc2a 100644
7424 --- a/arch/um/kernel/process.c
7425 +++ b/arch/um/kernel/process.c
7426 @@ -393,22 +393,6 @@ int singlestepping(void * t)
7427 return 2;
7428 }
7429
7430 -/*
7431 - * Only x86 and x86_64 have an arch_align_stack().
7432 - * All other arches have "#define arch_align_stack(x) (x)"
7433 - * in their asm/system.h
7434 - * As this is included in UML from asm-um/system-generic.h,
7435 - * we can use it to behave as the subarch does.
7436 - */
7437 -#ifndef arch_align_stack
7438 -unsigned long arch_align_stack(unsigned long sp)
7439 -{
7440 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7441 - sp -= get_random_int() % 8192;
7442 - return sp & ~0xf;
7443 -}
7444 -#endif
7445 -
7446 unsigned long get_wchan(struct task_struct *p)
7447 {
7448 unsigned long stack_page, sp, ip;
7449 diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h
7450 index d1b93c4..ae1b7fd 100644
7451 --- a/arch/um/sys-i386/shared/sysdep/system.h
7452 +++ b/arch/um/sys-i386/shared/sysdep/system.h
7453 @@ -17,7 +17,7 @@
7454 # define AT_VECTOR_SIZE_ARCH 1
7455 #endif
7456
7457 -extern unsigned long arch_align_stack(unsigned long sp);
7458 +#define arch_align_stack(x) ((x) & ~0xfUL)
7459
7460 void default_idle(void);
7461
7462 diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
7463 index 857ca0b..9a2669d 100644
7464 --- a/arch/um/sys-i386/syscalls.c
7465 +++ b/arch/um/sys-i386/syscalls.c
7466 @@ -11,6 +11,21 @@
7467 #include "asm/uaccess.h"
7468 #include "asm/unistd.h"
7469
7470 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
7471 +{
7472 + unsigned long pax_task_size = TASK_SIZE;
7473 +
7474 +#ifdef CONFIG_PAX_SEGMEXEC
7475 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
7476 + pax_task_size = SEGMEXEC_TASK_SIZE;
7477 +#endif
7478 +
7479 + if (len > pax_task_size || addr > pax_task_size - len)
7480 + return -EINVAL;
7481 +
7482 + return 0;
7483 +}
7484 +
7485 /*
7486 * Perform the select(nd, in, out, ex, tv) and mmap() system
7487 * calls. Linux/i386 didn't use to be able to handle more than
7488 diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h
7489 index d1b93c4..ae1b7fd 100644
7490 --- a/arch/um/sys-x86_64/shared/sysdep/system.h
7491 +++ b/arch/um/sys-x86_64/shared/sysdep/system.h
7492 @@ -17,7 +17,7 @@
7493 # define AT_VECTOR_SIZE_ARCH 1
7494 #endif
7495
7496 -extern unsigned long arch_align_stack(unsigned long sp);
7497 +#define arch_align_stack(x) ((x) & ~0xfUL)
7498
7499 void default_idle(void);
7500
7501 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7502 index 73ae02a..f932de5 100644
7503 --- a/arch/x86/Kconfig
7504 +++ b/arch/x86/Kconfig
7505 @@ -223,7 +223,7 @@ config X86_TRAMPOLINE
7506
7507 config X86_32_LAZY_GS
7508 def_bool y
7509 - depends on X86_32 && !CC_STACKPROTECTOR
7510 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7511
7512 config KTIME_SCALAR
7513 def_bool X86_32
7514 @@ -1008,7 +1008,7 @@ choice
7515
7516 config NOHIGHMEM
7517 bool "off"
7518 - depends on !X86_NUMAQ
7519 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7520 ---help---
7521 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7522 However, the address space of 32-bit x86 processors is only 4
7523 @@ -1045,7 +1045,7 @@ config NOHIGHMEM
7524
7525 config HIGHMEM4G
7526 bool "4GB"
7527 - depends on !X86_NUMAQ
7528 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7529 ---help---
7530 Select this if you have a 32-bit processor and between 1 and 4
7531 gigabytes of physical RAM.
7532 @@ -1099,7 +1099,7 @@ config PAGE_OFFSET
7533 hex
7534 default 0xB0000000 if VMSPLIT_3G_OPT
7535 default 0x80000000 if VMSPLIT_2G
7536 - default 0x78000000 if VMSPLIT_2G_OPT
7537 + default 0x70000000 if VMSPLIT_2G_OPT
7538 default 0x40000000 if VMSPLIT_1G
7539 default 0xC0000000
7540 depends on X86_32
7541 @@ -1460,6 +1460,7 @@ config SECCOMP
7542
7543 config CC_STACKPROTECTOR
7544 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7545 + depends on X86_64 || !PAX_MEMORY_UDEREF
7546 ---help---
7547 This option turns on the -fstack-protector GCC feature. This
7548 feature puts, at the beginning of functions, a canary value on
7549 @@ -1517,6 +1518,7 @@ config KEXEC_JUMP
7550 config PHYSICAL_START
7551 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
7552 default "0x1000000"
7553 + range 0x400000 0x40000000
7554 ---help---
7555 This gives the physical address where the kernel is loaded.
7556
7557 @@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
7558 hex
7559 prompt "Alignment value to which kernel should be aligned" if X86_32
7560 default "0x1000000"
7561 + range 0x400000 0x1000000 if PAX_KERNEXEC
7562 range 0x2000 0x1000000
7563 ---help---
7564 This value puts the alignment restrictions on physical address
7565 @@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
7566 Say N if you want to disable CPU hotplug.
7567
7568 config COMPAT_VDSO
7569 - def_bool y
7570 + def_bool n
7571 prompt "Compat VDSO support"
7572 depends on X86_32 || IA32_EMULATION
7573 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7574 ---help---
7575 Map the 32-bit VDSO to the predictable old-style address too.
7576 ---help---
7577 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
7578 index 0e566103..1a6b57e 100644
7579 --- a/arch/x86/Kconfig.cpu
7580 +++ b/arch/x86/Kconfig.cpu
7581 @@ -340,7 +340,7 @@ config X86_PPRO_FENCE
7582
7583 config X86_F00F_BUG
7584 def_bool y
7585 - depends on M586MMX || M586TSC || M586 || M486 || M386
7586 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7587
7588 config X86_WP_WORKS_OK
7589 def_bool y
7590 @@ -360,7 +360,7 @@ config X86_POPAD_OK
7591
7592 config X86_ALIGNMENT_16
7593 def_bool y
7594 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7595 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7596
7597 config X86_INTEL_USERCOPY
7598 def_bool y
7599 @@ -406,7 +406,7 @@ config X86_CMPXCHG64
7600 # generates cmov.
7601 config X86_CMOV
7602 def_bool y
7603 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
7604 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
7605
7606 config X86_MINIMUM_CPU_FAMILY
7607 int
7608 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
7609 index d105f29..c928727 100644
7610 --- a/arch/x86/Kconfig.debug
7611 +++ b/arch/x86/Kconfig.debug
7612 @@ -99,7 +99,7 @@ config X86_PTDUMP
7613 config DEBUG_RODATA
7614 bool "Write protect kernel read-only data structures"
7615 default y
7616 - depends on DEBUG_KERNEL
7617 + depends on DEBUG_KERNEL && BROKEN
7618 ---help---
7619 Mark the kernel read-only data as write-protected in the pagetables,
7620 in order to catch accidental (and incorrect) writes to such const
7621 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
7622 index d2d24c9..0f21f8d 100644
7623 --- a/arch/x86/Makefile
7624 +++ b/arch/x86/Makefile
7625 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
7626 else
7627 BITS := 64
7628 UTS_MACHINE := x86_64
7629 + biarch := $(call cc-option,-m64)
7630 CHECKFLAGS += -D__x86_64__ -m64
7631
7632 KBUILD_AFLAGS += -m64
7633 @@ -189,3 +190,12 @@ define archhelp
7634 echo ' FDARGS="..." arguments for the booted kernel'
7635 echo ' FDINITRD=file initrd for the booted kernel'
7636 endef
7637 +
7638 +define OLD_LD
7639 +
7640 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7641 +*** Please upgrade your binutils to 2.18 or newer
7642 +endef
7643 +
7644 +archprepare:
7645 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7646 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
7647 index ec749c2..bbb5319 100644
7648 --- a/arch/x86/boot/Makefile
7649 +++ b/arch/x86/boot/Makefile
7650 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7651 $(call cc-option, -fno-stack-protector) \
7652 $(call cc-option, -mpreferred-stack-boundary=2)
7653 KBUILD_CFLAGS += $(call cc-option, -m32)
7654 +ifdef CONSTIFY_PLUGIN
7655 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7656 +endif
7657 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7658 GCOV_PROFILE := n
7659
7660 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7661 index 878e4b9..20537ab 100644
7662 --- a/arch/x86/boot/bitops.h
7663 +++ b/arch/x86/boot/bitops.h
7664 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7665 u8 v;
7666 const u32 *p = (const u32 *)addr;
7667
7668 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7669 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7670 return v;
7671 }
7672
7673 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7674
7675 static inline void set_bit(int nr, void *addr)
7676 {
7677 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7678 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7679 }
7680
7681 #endif /* BOOT_BITOPS_H */
7682 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7683 index 98239d2..f40214c 100644
7684 --- a/arch/x86/boot/boot.h
7685 +++ b/arch/x86/boot/boot.h
7686 @@ -82,7 +82,7 @@ static inline void io_delay(void)
7687 static inline u16 ds(void)
7688 {
7689 u16 seg;
7690 - asm("movw %%ds,%0" : "=rm" (seg));
7691 + asm volatile("movw %%ds,%0" : "=rm" (seg));
7692 return seg;
7693 }
7694
7695 @@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7696 static inline int memcmp(const void *s1, const void *s2, size_t len)
7697 {
7698 u8 diff;
7699 - asm("repe; cmpsb; setnz %0"
7700 + asm volatile("repe; cmpsb; setnz %0"
7701 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7702 return diff;
7703 }
7704 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7705 index f8ed065..5bf5ff3 100644
7706 --- a/arch/x86/boot/compressed/Makefile
7707 +++ b/arch/x86/boot/compressed/Makefile
7708 @@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7709 KBUILD_CFLAGS += $(cflags-y)
7710 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7711 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7712 +ifdef CONSTIFY_PLUGIN
7713 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7714 +endif
7715
7716 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7717 GCOV_PROFILE := n
7718 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7719 index f543b70..b60fba8 100644
7720 --- a/arch/x86/boot/compressed/head_32.S
7721 +++ b/arch/x86/boot/compressed/head_32.S
7722 @@ -76,7 +76,7 @@ ENTRY(startup_32)
7723 notl %eax
7724 andl %eax, %ebx
7725 #else
7726 - movl $LOAD_PHYSICAL_ADDR, %ebx
7727 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7728 #endif
7729
7730 /* Target address to relocate to for decompression */
7731 @@ -149,7 +149,7 @@ relocated:
7732 * and where it was actually loaded.
7733 */
7734 movl %ebp, %ebx
7735 - subl $LOAD_PHYSICAL_ADDR, %ebx
7736 + subl $____LOAD_PHYSICAL_ADDR, %ebx
7737 jz 2f /* Nothing to be done if loaded at compiled addr. */
7738 /*
7739 * Process relocations.
7740 @@ -157,8 +157,7 @@ relocated:
7741
7742 1: subl $4, %edi
7743 movl (%edi), %ecx
7744 - testl %ecx, %ecx
7745 - jz 2f
7746 + jecxz 2f
7747 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7748 jmp 1b
7749 2:
7750 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7751 index 077e1b6..2c6b13b 100644
7752 --- a/arch/x86/boot/compressed/head_64.S
7753 +++ b/arch/x86/boot/compressed/head_64.S
7754 @@ -91,7 +91,7 @@ ENTRY(startup_32)
7755 notl %eax
7756 andl %eax, %ebx
7757 #else
7758 - movl $LOAD_PHYSICAL_ADDR, %ebx
7759 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7760 #endif
7761
7762 /* Target address to relocate to for decompression */
7763 @@ -183,7 +183,7 @@ no_longmode:
7764 hlt
7765 jmp 1b
7766
7767 -#include "../../kernel/verify_cpu_64.S"
7768 +#include "../../kernel/verify_cpu.S"
7769
7770 /*
7771 * Be careful here startup_64 needs to be at a predictable
7772 @@ -234,7 +234,7 @@ ENTRY(startup_64)
7773 notq %rax
7774 andq %rax, %rbp
7775 #else
7776 - movq $LOAD_PHYSICAL_ADDR, %rbp
7777 + movq $____LOAD_PHYSICAL_ADDR, %rbp
7778 #endif
7779
7780 /* Target address to relocate to for decompression */
7781 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7782 index 842b2a3..f00178b 100644
7783 --- a/arch/x86/boot/compressed/misc.c
7784 +++ b/arch/x86/boot/compressed/misc.c
7785 @@ -288,7 +288,7 @@ static void parse_elf(void *output)
7786 case PT_LOAD:
7787 #ifdef CONFIG_RELOCATABLE
7788 dest = output;
7789 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7790 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7791 #else
7792 dest = (void *)(phdr->p_paddr);
7793 #endif
7794 @@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7795 error("Destination address too large");
7796 #endif
7797 #ifndef CONFIG_RELOCATABLE
7798 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7799 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7800 error("Wrong destination address");
7801 #endif
7802
7803 diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
7804 index bcbd36c..b1754af 100644
7805 --- a/arch/x86/boot/compressed/mkpiggy.c
7806 +++ b/arch/x86/boot/compressed/mkpiggy.c
7807 @@ -74,7 +74,7 @@ int main(int argc, char *argv[])
7808
7809 offs = (olen > ilen) ? olen - ilen : 0;
7810 offs += olen >> 12; /* Add 8 bytes for each 32K block */
7811 - offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
7812 + offs += 64*1024; /* Add 64K bytes slack */
7813 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
7814
7815 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
7816 diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
7817 index bbeb0c3..f5167ab 100644
7818 --- a/arch/x86/boot/compressed/relocs.c
7819 +++ b/arch/x86/boot/compressed/relocs.c
7820 @@ -10,8 +10,11 @@
7821 #define USE_BSD
7822 #include <endian.h>
7823
7824 +#include "../../../../include/linux/autoconf.h"
7825 +
7826 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
7827 static Elf32_Ehdr ehdr;
7828 +static Elf32_Phdr *phdr;
7829 static unsigned long reloc_count, reloc_idx;
7830 static unsigned long *relocs;
7831
7832 @@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
7833
7834 static int is_safe_abs_reloc(const char* sym_name)
7835 {
7836 - int i;
7837 + unsigned int i;
7838
7839 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
7840 if (!strcmp(sym_name, safe_abs_relocs[i]))
7841 @@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
7842 }
7843 }
7844
7845 +static void read_phdrs(FILE *fp)
7846 +{
7847 + unsigned int i;
7848 +
7849 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
7850 + if (!phdr) {
7851 + die("Unable to allocate %d program headers\n",
7852 + ehdr.e_phnum);
7853 + }
7854 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
7855 + die("Seek to %d failed: %s\n",
7856 + ehdr.e_phoff, strerror(errno));
7857 + }
7858 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
7859 + die("Cannot read ELF program headers: %s\n",
7860 + strerror(errno));
7861 + }
7862 + for(i = 0; i < ehdr.e_phnum; i++) {
7863 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
7864 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
7865 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
7866 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
7867 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
7868 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
7869 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
7870 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
7871 + }
7872 +
7873 +}
7874 +
7875 static void read_shdrs(FILE *fp)
7876 {
7877 - int i;
7878 + unsigned int i;
7879 Elf32_Shdr shdr;
7880
7881 secs = calloc(ehdr.e_shnum, sizeof(struct section));
7882 @@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
7883
7884 static void read_strtabs(FILE *fp)
7885 {
7886 - int i;
7887 + unsigned int i;
7888 for (i = 0; i < ehdr.e_shnum; i++) {
7889 struct section *sec = &secs[i];
7890 if (sec->shdr.sh_type != SHT_STRTAB) {
7891 @@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
7892
7893 static void read_symtabs(FILE *fp)
7894 {
7895 - int i,j;
7896 + unsigned int i,j;
7897 for (i = 0; i < ehdr.e_shnum; i++) {
7898 struct section *sec = &secs[i];
7899 if (sec->shdr.sh_type != SHT_SYMTAB) {
7900 @@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
7901
7902 static void read_relocs(FILE *fp)
7903 {
7904 - int i,j;
7905 + unsigned int i,j;
7906 + uint32_t base;
7907 +
7908 for (i = 0; i < ehdr.e_shnum; i++) {
7909 struct section *sec = &secs[i];
7910 if (sec->shdr.sh_type != SHT_REL) {
7911 @@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
7912 die("Cannot read symbol table: %s\n",
7913 strerror(errno));
7914 }
7915 + base = 0;
7916 + for (j = 0; j < ehdr.e_phnum; j++) {
7917 + if (phdr[j].p_type != PT_LOAD )
7918 + continue;
7919 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
7920 + continue;
7921 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
7922 + break;
7923 + }
7924 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
7925 Elf32_Rel *rel = &sec->reltab[j];
7926 - rel->r_offset = elf32_to_cpu(rel->r_offset);
7927 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
7928 rel->r_info = elf32_to_cpu(rel->r_info);
7929 }
7930 }
7931 @@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
7932
7933 static void print_absolute_symbols(void)
7934 {
7935 - int i;
7936 + unsigned int i;
7937 printf("Absolute symbols\n");
7938 printf(" Num: Value Size Type Bind Visibility Name\n");
7939 for (i = 0; i < ehdr.e_shnum; i++) {
7940 struct section *sec = &secs[i];
7941 char *sym_strtab;
7942 Elf32_Sym *sh_symtab;
7943 - int j;
7944 + unsigned int j;
7945
7946 if (sec->shdr.sh_type != SHT_SYMTAB) {
7947 continue;
7948 @@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
7949
7950 static void print_absolute_relocs(void)
7951 {
7952 - int i, printed = 0;
7953 + unsigned int i, printed = 0;
7954
7955 for (i = 0; i < ehdr.e_shnum; i++) {
7956 struct section *sec = &secs[i];
7957 struct section *sec_applies, *sec_symtab;
7958 char *sym_strtab;
7959 Elf32_Sym *sh_symtab;
7960 - int j;
7961 + unsigned int j;
7962 if (sec->shdr.sh_type != SHT_REL) {
7963 continue;
7964 }
7965 @@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
7966
7967 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7968 {
7969 - int i;
7970 + unsigned int i;
7971 /* Walk through the relocations */
7972 for (i = 0; i < ehdr.e_shnum; i++) {
7973 char *sym_strtab;
7974 Elf32_Sym *sh_symtab;
7975 struct section *sec_applies, *sec_symtab;
7976 - int j;
7977 + unsigned int j;
7978 struct section *sec = &secs[i];
7979
7980 if (sec->shdr.sh_type != SHT_REL) {
7981 @@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7982 if (sym->st_shndx == SHN_ABS) {
7983 continue;
7984 }
7985 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
7986 + if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
7987 + continue;
7988 +
7989 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
7990 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
7991 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
7992 + continue;
7993 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
7994 + continue;
7995 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
7996 + continue;
7997 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
7998 + continue;
7999 +#endif
8000 if (r_type == R_386_NONE || r_type == R_386_PC32) {
8001 /*
8002 * NONE can be ignored and and PC relative
8003 @@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, const void *vb)
8004
8005 static void emit_relocs(int as_text)
8006 {
8007 - int i;
8008 + unsigned int i;
8009 /* Count how many relocations I have and allocate space for them. */
8010 reloc_count = 0;
8011 walk_relocs(count_reloc);
8012 @@ -634,6 +693,7 @@ int main(int argc, char **argv)
8013 fname, strerror(errno));
8014 }
8015 read_ehdr(fp);
8016 + read_phdrs(fp);
8017 read_shdrs(fp);
8018 read_strtabs(fp);
8019 read_symtabs(fp);
8020 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
8021 index 4d3ff03..e4972ff 100644
8022 --- a/arch/x86/boot/cpucheck.c
8023 +++ b/arch/x86/boot/cpucheck.c
8024 @@ -74,7 +74,7 @@ static int has_fpu(void)
8025 u16 fcw = -1, fsw = -1;
8026 u32 cr0;
8027
8028 - asm("movl %%cr0,%0" : "=r" (cr0));
8029 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
8030 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
8031 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
8032 asm volatile("movl %0,%%cr0" : : "r" (cr0));
8033 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
8034 {
8035 u32 f0, f1;
8036
8037 - asm("pushfl ; "
8038 + asm volatile("pushfl ; "
8039 "pushfl ; "
8040 "popl %0 ; "
8041 "movl %0,%1 ; "
8042 @@ -115,7 +115,7 @@ static void get_flags(void)
8043 set_bit(X86_FEATURE_FPU, cpu.flags);
8044
8045 if (has_eflag(X86_EFLAGS_ID)) {
8046 - asm("cpuid"
8047 + asm volatile("cpuid"
8048 : "=a" (max_intel_level),
8049 "=b" (cpu_vendor[0]),
8050 "=d" (cpu_vendor[1]),
8051 @@ -124,7 +124,7 @@ static void get_flags(void)
8052
8053 if (max_intel_level >= 0x00000001 &&
8054 max_intel_level <= 0x0000ffff) {
8055 - asm("cpuid"
8056 + asm volatile("cpuid"
8057 : "=a" (tfms),
8058 "=c" (cpu.flags[4]),
8059 "=d" (cpu.flags[0])
8060 @@ -136,7 +136,7 @@ static void get_flags(void)
8061 cpu.model += ((tfms >> 16) & 0xf) << 4;
8062 }
8063
8064 - asm("cpuid"
8065 + asm volatile("cpuid"
8066 : "=a" (max_amd_level)
8067 : "a" (0x80000000)
8068 : "ebx", "ecx", "edx");
8069 @@ -144,7 +144,7 @@ static void get_flags(void)
8070 if (max_amd_level >= 0x80000001 &&
8071 max_amd_level <= 0x8000ffff) {
8072 u32 eax = 0x80000001;
8073 - asm("cpuid"
8074 + asm volatile("cpuid"
8075 : "+a" (eax),
8076 "=c" (cpu.flags[6]),
8077 "=d" (cpu.flags[1])
8078 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8079 u32 ecx = MSR_K7_HWCR;
8080 u32 eax, edx;
8081
8082 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8083 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8084 eax &= ~(1 << 15);
8085 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8086 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8087
8088 get_flags(); /* Make sure it really did something */
8089 err = check_flags();
8090 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8091 u32 ecx = MSR_VIA_FCR;
8092 u32 eax, edx;
8093
8094 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8095 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8096 eax |= (1<<1)|(1<<7);
8097 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8098 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8099
8100 set_bit(X86_FEATURE_CX8, cpu.flags);
8101 err = check_flags();
8102 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8103 u32 eax, edx;
8104 u32 level = 1;
8105
8106 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8107 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8108 - asm("cpuid"
8109 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8110 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8111 + asm volatile("cpuid"
8112 : "+a" (level), "=d" (cpu.flags[0])
8113 : : "ecx", "ebx");
8114 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8115 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8116
8117 err = check_flags();
8118 }
8119 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
8120 index b31cc54..8d69237 100644
8121 --- a/arch/x86/boot/header.S
8122 +++ b/arch/x86/boot/header.S
8123 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
8124 # single linked list of
8125 # struct setup_data
8126
8127 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
8128 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
8129
8130 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
8131 #define VO_INIT_SIZE (VO__end - VO__text)
8132 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
8133 index cae3feb..ff8ff2a 100644
8134 --- a/arch/x86/boot/memory.c
8135 +++ b/arch/x86/boot/memory.c
8136 @@ -19,7 +19,7 @@
8137
8138 static int detect_memory_e820(void)
8139 {
8140 - int count = 0;
8141 + unsigned int count = 0;
8142 struct biosregs ireg, oreg;
8143 struct e820entry *desc = boot_params.e820_map;
8144 static struct e820entry buf; /* static so it is zeroed */
8145 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
8146 index 11e8c6e..fdbb1ed 100644
8147 --- a/arch/x86/boot/video-vesa.c
8148 +++ b/arch/x86/boot/video-vesa.c
8149 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
8150
8151 boot_params.screen_info.vesapm_seg = oreg.es;
8152 boot_params.screen_info.vesapm_off = oreg.di;
8153 + boot_params.screen_info.vesapm_size = oreg.cx;
8154 }
8155
8156 /*
8157 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
8158 index d42da38..787cdf3 100644
8159 --- a/arch/x86/boot/video.c
8160 +++ b/arch/x86/boot/video.c
8161 @@ -90,7 +90,7 @@ static void store_mode_params(void)
8162 static unsigned int get_entry(void)
8163 {
8164 char entry_buf[4];
8165 - int i, len = 0;
8166 + unsigned int i, len = 0;
8167 int key;
8168 unsigned int v;
8169
8170 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
8171 index 5b577d5..3c1fed4 100644
8172 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
8173 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
8174 @@ -8,6 +8,8 @@
8175 * including this sentence is retained in full.
8176 */
8177
8178 +#include <asm/alternative-asm.h>
8179 +
8180 .extern crypto_ft_tab
8181 .extern crypto_it_tab
8182 .extern crypto_fl_tab
8183 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
8184 je B192; \
8185 leaq 32(r9),r9;
8186
8187 +#define ret pax_force_retaddr 0, 1; ret
8188 +
8189 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
8190 movq r1,r2; \
8191 movq r3,r4; \
8192 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
8193 index eb0566e..e3ebad8 100644
8194 --- a/arch/x86/crypto/aesni-intel_asm.S
8195 +++ b/arch/x86/crypto/aesni-intel_asm.S
8196 @@ -16,6 +16,7 @@
8197 */
8198
8199 #include <linux/linkage.h>
8200 +#include <asm/alternative-asm.h>
8201
8202 .text
8203
8204 @@ -52,6 +53,7 @@ _key_expansion_256a:
8205 pxor %xmm1, %xmm0
8206 movaps %xmm0, (%rcx)
8207 add $0x10, %rcx
8208 + pax_force_retaddr_bts
8209 ret
8210
8211 _key_expansion_192a:
8212 @@ -75,6 +77,7 @@ _key_expansion_192a:
8213 shufps $0b01001110, %xmm2, %xmm1
8214 movaps %xmm1, 16(%rcx)
8215 add $0x20, %rcx
8216 + pax_force_retaddr_bts
8217 ret
8218
8219 _key_expansion_192b:
8220 @@ -93,6 +96,7 @@ _key_expansion_192b:
8221
8222 movaps %xmm0, (%rcx)
8223 add $0x10, %rcx
8224 + pax_force_retaddr_bts
8225 ret
8226
8227 _key_expansion_256b:
8228 @@ -104,6 +108,7 @@ _key_expansion_256b:
8229 pxor %xmm1, %xmm2
8230 movaps %xmm2, (%rcx)
8231 add $0x10, %rcx
8232 + pax_force_retaddr_bts
8233 ret
8234
8235 /*
8236 @@ -239,7 +244,9 @@ ENTRY(aesni_set_key)
8237 cmp %rcx, %rdi
8238 jb .Ldec_key_loop
8239 xor %rax, %rax
8240 + pax_force_retaddr 0, 1
8241 ret
8242 +ENDPROC(aesni_set_key)
8243
8244 /*
8245 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
8246 @@ -249,7 +256,9 @@ ENTRY(aesni_enc)
8247 movups (INP), STATE # input
8248 call _aesni_enc1
8249 movups STATE, (OUTP) # output
8250 + pax_force_retaddr 0, 1
8251 ret
8252 +ENDPROC(aesni_enc)
8253
8254 /*
8255 * _aesni_enc1: internal ABI
8256 @@ -319,6 +328,7 @@ _aesni_enc1:
8257 movaps 0x70(TKEYP), KEY
8258 # aesenclast KEY, STATE # last round
8259 .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2
8260 + pax_force_retaddr_bts
8261 ret
8262
8263 /*
8264 @@ -482,6 +492,7 @@ _aesni_enc4:
8265 .byte 0x66, 0x0f, 0x38, 0xdd, 0xea
8266 # aesenclast KEY, STATE4
8267 .byte 0x66, 0x0f, 0x38, 0xdd, 0xf2
8268 + pax_force_retaddr_bts
8269 ret
8270
8271 /*
8272 @@ -493,7 +504,9 @@ ENTRY(aesni_dec)
8273 movups (INP), STATE # input
8274 call _aesni_dec1
8275 movups STATE, (OUTP) #output
8276 + pax_force_retaddr 0, 1
8277 ret
8278 +ENDPROC(aesni_dec)
8279
8280 /*
8281 * _aesni_dec1: internal ABI
8282 @@ -563,6 +576,7 @@ _aesni_dec1:
8283 movaps 0x70(TKEYP), KEY
8284 # aesdeclast KEY, STATE # last round
8285 .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2
8286 + pax_force_retaddr_bts
8287 ret
8288
8289 /*
8290 @@ -726,6 +740,7 @@ _aesni_dec4:
8291 .byte 0x66, 0x0f, 0x38, 0xdf, 0xea
8292 # aesdeclast KEY, STATE4
8293 .byte 0x66, 0x0f, 0x38, 0xdf, 0xf2
8294 + pax_force_retaddr_bts
8295 ret
8296
8297 /*
8298 @@ -769,7 +784,9 @@ ENTRY(aesni_ecb_enc)
8299 cmp $16, LEN
8300 jge .Lecb_enc_loop1
8301 .Lecb_enc_ret:
8302 + pax_force_retaddr 0, 1
8303 ret
8304 +ENDPROC(aesni_ecb_enc)
8305
8306 /*
8307 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8308 @@ -813,7 +830,9 @@ ENTRY(aesni_ecb_dec)
8309 cmp $16, LEN
8310 jge .Lecb_dec_loop1
8311 .Lecb_dec_ret:
8312 + pax_force_retaddr 0, 1
8313 ret
8314 +ENDPROC(aesni_ecb_dec)
8315
8316 /*
8317 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8318 @@ -837,7 +856,9 @@ ENTRY(aesni_cbc_enc)
8319 jge .Lcbc_enc_loop
8320 movups STATE, (IVP)
8321 .Lcbc_enc_ret:
8322 + pax_force_retaddr 0, 1
8323 ret
8324 +ENDPROC(aesni_cbc_enc)
8325
8326 /*
8327 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8328 @@ -894,4 +915,6 @@ ENTRY(aesni_cbc_dec)
8329 .Lcbc_dec_ret:
8330 movups IV, (IVP)
8331 .Lcbc_dec_just_ret:
8332 + pax_force_retaddr 0, 1
8333 ret
8334 +ENDPROC(aesni_cbc_dec)
8335 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8336 index 6214a9b..1f4fc9a 100644
8337 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
8338 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8339 @@ -1,3 +1,5 @@
8340 +#include <asm/alternative-asm.h>
8341 +
8342 # enter ECRYPT_encrypt_bytes
8343 .text
8344 .p2align 5
8345 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
8346 add %r11,%rsp
8347 mov %rdi,%rax
8348 mov %rsi,%rdx
8349 + pax_force_retaddr 0, 1
8350 ret
8351 # bytesatleast65:
8352 ._bytesatleast65:
8353 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
8354 add %r11,%rsp
8355 mov %rdi,%rax
8356 mov %rsi,%rdx
8357 + pax_force_retaddr
8358 ret
8359 # enter ECRYPT_ivsetup
8360 .text
8361 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
8362 add %r11,%rsp
8363 mov %rdi,%rax
8364 mov %rsi,%rdx
8365 + pax_force_retaddr
8366 ret
8367 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
8368 index 35974a5..5662ae2 100644
8369 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8370 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
8371 @@ -21,6 +21,7 @@
8372 .text
8373
8374 #include <asm/asm-offsets.h>
8375 +#include <asm/alternative-asm.h>
8376
8377 #define a_offset 0
8378 #define b_offset 4
8379 @@ -269,6 +270,7 @@ twofish_enc_blk:
8380
8381 popq R1
8382 movq $1,%rax
8383 + pax_force_retaddr 0, 1
8384 ret
8385
8386 twofish_dec_blk:
8387 @@ -321,4 +323,5 @@ twofish_dec_blk:
8388
8389 popq R1
8390 movq $1,%rax
8391 + pax_force_retaddr 0, 1
8392 ret
8393 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
8394 index 14531ab..bc68a7b 100644
8395 --- a/arch/x86/ia32/ia32_aout.c
8396 +++ b/arch/x86/ia32/ia32_aout.c
8397 @@ -169,6 +169,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8398 unsigned long dump_start, dump_size;
8399 struct user32 dump;
8400
8401 + memset(&dump, 0, sizeof(dump));
8402 +
8403 fs = get_fs();
8404 set_fs(KERNEL_DS);
8405 has_dumped = 1;
8406 @@ -218,12 +220,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8407 dump_size = dump.u_ssize << PAGE_SHIFT;
8408 DUMP_WRITE(dump_start, dump_size);
8409 }
8410 - /*
8411 - * Finally dump the task struct. Not be used by gdb, but
8412 - * could be useful
8413 - */
8414 - set_fs(KERNEL_DS);
8415 - DUMP_WRITE(current, sizeof(*current));
8416 end_coredump:
8417 set_fs(fs);
8418 return has_dumped;
8419 @@ -327,6 +323,13 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
8420 current->mm->free_area_cache = TASK_UNMAPPED_BASE;
8421 current->mm->cached_hole_size = 0;
8422
8423 + retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
8424 + if (retval < 0) {
8425 + /* Someone check-me: is this error path enough? */
8426 + send_sig(SIGKILL, current, 0);
8427 + return retval;
8428 + }
8429 +
8430 install_exec_creds(bprm);
8431 current->flags &= ~PF_FORKNOEXEC;
8432
8433 @@ -422,13 +425,6 @@ beyond_if:
8434
8435 set_brk(current->mm->start_brk, current->mm->brk);
8436
8437 - retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
8438 - if (retval < 0) {
8439 - /* Someone check-me: is this error path enough? */
8440 - send_sig(SIGKILL, current, 0);
8441 - return retval;
8442 - }
8443 -
8444 current->mm->start_stack =
8445 (unsigned long)create_aout_tables((char __user *)bprm->p, bprm);
8446 /* start thread */
8447 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8448 index 588a7aa..a3468b0 100644
8449 --- a/arch/x86/ia32/ia32_signal.c
8450 +++ b/arch/x86/ia32/ia32_signal.c
8451 @@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
8452 }
8453 seg = get_fs();
8454 set_fs(KERNEL_DS);
8455 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8456 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8457 set_fs(seg);
8458 if (ret >= 0 && uoss_ptr) {
8459 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8460 @@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8461 */
8462 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8463 size_t frame_size,
8464 - void **fpstate)
8465 + void __user **fpstate)
8466 {
8467 unsigned long sp;
8468
8469 @@ -395,7 +395,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8470
8471 if (used_math()) {
8472 sp = sp - sig_xstate_ia32_size;
8473 - *fpstate = (struct _fpstate_ia32 *) sp;
8474 + *fpstate = (struct _fpstate_ia32 __user *) sp;
8475 if (save_i387_xstate_ia32(*fpstate) < 0)
8476 return (void __user *) -1L;
8477 }
8478 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8479 sp -= frame_size;
8480 /* Align the stack pointer according to the i386 ABI,
8481 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8482 - sp = ((sp + 4) & -16ul) - 4;
8483 + sp = ((sp - 12) & -16ul) - 4;
8484 return (void __user *) sp;
8485 }
8486
8487 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8488 * These are actually not used anymore, but left because some
8489 * gdb versions depend on them as a marker.
8490 */
8491 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8492 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8493 } put_user_catch(err);
8494
8495 if (err)
8496 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8497 0xb8,
8498 __NR_ia32_rt_sigreturn,
8499 0x80cd,
8500 - 0,
8501 + 0
8502 };
8503
8504 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8505 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8506
8507 if (ka->sa.sa_flags & SA_RESTORER)
8508 restorer = ka->sa.sa_restorer;
8509 + else if (current->mm->context.vdso)
8510 + /* Return stub is in 32bit vsyscall page */
8511 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8512 else
8513 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8514 - rt_sigreturn);
8515 + restorer = &frame->retcode;
8516 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8517
8518 /*
8519 * Not actually used anymore, but left because some gdb
8520 * versions need it.
8521 */
8522 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8523 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8524 } put_user_catch(err);
8525
8526 if (err)
8527 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8528 index 4edd8eb..29124b4 100644
8529 --- a/arch/x86/ia32/ia32entry.S
8530 +++ b/arch/x86/ia32/ia32entry.S
8531 @@ -13,7 +13,9 @@
8532 #include <asm/thread_info.h>
8533 #include <asm/segment.h>
8534 #include <asm/irqflags.h>
8535 +#include <asm/pgtable.h>
8536 #include <linux/linkage.h>
8537 +#include <asm/alternative-asm.h>
8538
8539 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8540 #include <linux/elf-em.h>
8541 @@ -93,6 +95,32 @@ ENTRY(native_irq_enable_sysexit)
8542 ENDPROC(native_irq_enable_sysexit)
8543 #endif
8544
8545 + .macro pax_enter_kernel_user
8546 + pax_set_fptr_mask
8547 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8548 + call pax_enter_kernel_user
8549 +#endif
8550 + .endm
8551 +
8552 + .macro pax_exit_kernel_user
8553 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8554 + call pax_exit_kernel_user
8555 +#endif
8556 +#ifdef CONFIG_PAX_RANDKSTACK
8557 + pushq %rax
8558 + pushq %r11
8559 + call pax_randomize_kstack
8560 + popq %r11
8561 + popq %rax
8562 +#endif
8563 + .endm
8564 +
8565 +.macro pax_erase_kstack
8566 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8567 + call pax_erase_kstack
8568 +#endif
8569 +.endm
8570 +
8571 /*
8572 * 32bit SYSENTER instruction entry.
8573 *
8574 @@ -119,12 +147,6 @@ ENTRY(ia32_sysenter_target)
8575 CFI_REGISTER rsp,rbp
8576 SWAPGS_UNSAFE_STACK
8577 movq PER_CPU_VAR(kernel_stack), %rsp
8578 - addq $(KERNEL_STACK_OFFSET),%rsp
8579 - /*
8580 - * No need to follow this irqs on/off section: the syscall
8581 - * disabled irqs, here we enable it straight after entry:
8582 - */
8583 - ENABLE_INTERRUPTS(CLBR_NONE)
8584 movl %ebp,%ebp /* zero extension */
8585 pushq $__USER32_DS
8586 CFI_ADJUST_CFA_OFFSET 8
8587 @@ -135,28 +157,42 @@ ENTRY(ia32_sysenter_target)
8588 pushfq
8589 CFI_ADJUST_CFA_OFFSET 8
8590 /*CFI_REL_OFFSET rflags,0*/
8591 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
8592 - CFI_REGISTER rip,r10
8593 + orl $X86_EFLAGS_IF,(%rsp)
8594 + GET_THREAD_INFO(%r11)
8595 + movl TI_sysenter_return(%r11), %r11d
8596 + CFI_REGISTER rip,r11
8597 pushq $__USER32_CS
8598 CFI_ADJUST_CFA_OFFSET 8
8599 /*CFI_REL_OFFSET cs,0*/
8600 movl %eax, %eax
8601 - pushq %r10
8602 + pushq %r11
8603 CFI_ADJUST_CFA_OFFSET 8
8604 CFI_REL_OFFSET rip,0
8605 pushq %rax
8606 CFI_ADJUST_CFA_OFFSET 8
8607 cld
8608 SAVE_ARGS 0,0,1
8609 + pax_enter_kernel_user
8610 + /*
8611 + * No need to follow this irqs on/off section: the syscall
8612 + * disabled irqs, here we enable it straight after entry:
8613 + */
8614 + ENABLE_INTERRUPTS(CLBR_NONE)
8615 /* no need to do an access_ok check here because rbp has been
8616 32bit zero extended */
8617 +
8618 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8619 + mov $PAX_USER_SHADOW_BASE,%r11
8620 + add %r11,%rbp
8621 +#endif
8622 +
8623 1: movl (%rbp),%ebp
8624 .section __ex_table,"a"
8625 .quad 1b,ia32_badarg
8626 .previous
8627 - GET_THREAD_INFO(%r10)
8628 - orl $TS_COMPAT,TI_status(%r10)
8629 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8630 + GET_THREAD_INFO(%r11)
8631 + orl $TS_COMPAT,TI_status(%r11)
8632 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8633 CFI_REMEMBER_STATE
8634 jnz sysenter_tracesys
8635 cmpq $(IA32_NR_syscalls-1),%rax
8636 @@ -166,13 +202,15 @@ sysenter_do_call:
8637 sysenter_dispatch:
8638 call *ia32_sys_call_table(,%rax,8)
8639 movq %rax,RAX-ARGOFFSET(%rsp)
8640 - GET_THREAD_INFO(%r10)
8641 + GET_THREAD_INFO(%r11)
8642 DISABLE_INTERRUPTS(CLBR_NONE)
8643 TRACE_IRQS_OFF
8644 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
8645 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8646 jnz sysexit_audit
8647 sysexit_from_sys_call:
8648 - andl $~TS_COMPAT,TI_status(%r10)
8649 + pax_exit_kernel_user
8650 + pax_erase_kstack
8651 + andl $~TS_COMPAT,TI_status(%r11)
8652 /* clear IF, that popfq doesn't enable interrupts early */
8653 andl $~0x200,EFLAGS-R11(%rsp)
8654 movl RIP-R11(%rsp),%edx /* User %eip */
8655 @@ -200,6 +238,9 @@ sysexit_from_sys_call:
8656 movl %eax,%esi /* 2nd arg: syscall number */
8657 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
8658 call audit_syscall_entry
8659 +
8660 + pax_erase_kstack
8661 +
8662 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8663 cmpq $(IA32_NR_syscalls-1),%rax
8664 ja ia32_badsys
8665 @@ -211,7 +252,7 @@ sysexit_from_sys_call:
8666 .endm
8667
8668 .macro auditsys_exit exit
8669 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8670 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8671 jnz ia32_ret_from_sys_call
8672 TRACE_IRQS_ON
8673 sti
8674 @@ -221,12 +262,12 @@ sysexit_from_sys_call:
8675 movzbl %al,%edi /* zero-extend that into %edi */
8676 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
8677 call audit_syscall_exit
8678 - GET_THREAD_INFO(%r10)
8679 + GET_THREAD_INFO(%r11)
8680 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
8681 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8682 cli
8683 TRACE_IRQS_OFF
8684 - testl %edi,TI_flags(%r10)
8685 + testl %edi,TI_flags(%r11)
8686 jz \exit
8687 CLEAR_RREGS -ARGOFFSET
8688 jmp int_with_check
8689 @@ -244,7 +285,7 @@ sysexit_audit:
8690
8691 sysenter_tracesys:
8692 #ifdef CONFIG_AUDITSYSCALL
8693 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8694 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8695 jz sysenter_auditsys
8696 #endif
8697 SAVE_REST
8698 @@ -252,6 +293,9 @@ sysenter_tracesys:
8699 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8700 movq %rsp,%rdi /* &pt_regs -> arg1 */
8701 call syscall_trace_enter
8702 +
8703 + pax_erase_kstack
8704 +
8705 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8706 RESTORE_REST
8707 cmpq $(IA32_NR_syscalls-1),%rax
8708 @@ -283,19 +327,20 @@ ENDPROC(ia32_sysenter_target)
8709 ENTRY(ia32_cstar_target)
8710 CFI_STARTPROC32 simple
8711 CFI_SIGNAL_FRAME
8712 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8713 + CFI_DEF_CFA rsp,0
8714 CFI_REGISTER rip,rcx
8715 /*CFI_REGISTER rflags,r11*/
8716 SWAPGS_UNSAFE_STACK
8717 movl %esp,%r8d
8718 CFI_REGISTER rsp,r8
8719 movq PER_CPU_VAR(kernel_stack),%rsp
8720 + SAVE_ARGS 8*6,1,1
8721 + pax_enter_kernel_user
8722 /*
8723 * No need to follow this irqs on/off section: the syscall
8724 * disabled irqs and here we enable it straight after entry:
8725 */
8726 ENABLE_INTERRUPTS(CLBR_NONE)
8727 - SAVE_ARGS 8,1,1
8728 movl %eax,%eax /* zero extension */
8729 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8730 movq %rcx,RIP-ARGOFFSET(%rsp)
8731 @@ -311,13 +356,19 @@ ENTRY(ia32_cstar_target)
8732 /* no need to do an access_ok check here because r8 has been
8733 32bit zero extended */
8734 /* hardware stack frame is complete now */
8735 +
8736 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8737 + mov $PAX_USER_SHADOW_BASE,%r11
8738 + add %r11,%r8
8739 +#endif
8740 +
8741 1: movl (%r8),%r9d
8742 .section __ex_table,"a"
8743 .quad 1b,ia32_badarg
8744 .previous
8745 - GET_THREAD_INFO(%r10)
8746 - orl $TS_COMPAT,TI_status(%r10)
8747 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8748 + GET_THREAD_INFO(%r11)
8749 + orl $TS_COMPAT,TI_status(%r11)
8750 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8751 CFI_REMEMBER_STATE
8752 jnz cstar_tracesys
8753 cmpq $IA32_NR_syscalls-1,%rax
8754 @@ -327,13 +378,15 @@ cstar_do_call:
8755 cstar_dispatch:
8756 call *ia32_sys_call_table(,%rax,8)
8757 movq %rax,RAX-ARGOFFSET(%rsp)
8758 - GET_THREAD_INFO(%r10)
8759 + GET_THREAD_INFO(%r11)
8760 DISABLE_INTERRUPTS(CLBR_NONE)
8761 TRACE_IRQS_OFF
8762 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
8763 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8764 jnz sysretl_audit
8765 sysretl_from_sys_call:
8766 - andl $~TS_COMPAT,TI_status(%r10)
8767 + pax_exit_kernel_user
8768 + pax_erase_kstack
8769 + andl $~TS_COMPAT,TI_status(%r11)
8770 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
8771 movl RIP-ARGOFFSET(%rsp),%ecx
8772 CFI_REGISTER rip,rcx
8773 @@ -361,7 +414,7 @@ sysretl_audit:
8774
8775 cstar_tracesys:
8776 #ifdef CONFIG_AUDITSYSCALL
8777 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8778 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8779 jz cstar_auditsys
8780 #endif
8781 xchgl %r9d,%ebp
8782 @@ -370,6 +423,9 @@ cstar_tracesys:
8783 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8784 movq %rsp,%rdi /* &pt_regs -> arg1 */
8785 call syscall_trace_enter
8786 +
8787 + pax_erase_kstack
8788 +
8789 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8790 RESTORE_REST
8791 xchgl %ebp,%r9d
8792 @@ -415,11 +471,6 @@ ENTRY(ia32_syscall)
8793 CFI_REL_OFFSET rip,RIP-RIP
8794 PARAVIRT_ADJUST_EXCEPTION_FRAME
8795 SWAPGS
8796 - /*
8797 - * No need to follow this irqs on/off section: the syscall
8798 - * disabled irqs and here we enable it straight after entry:
8799 - */
8800 - ENABLE_INTERRUPTS(CLBR_NONE)
8801 movl %eax,%eax
8802 pushq %rax
8803 CFI_ADJUST_CFA_OFFSET 8
8804 @@ -427,9 +478,15 @@ ENTRY(ia32_syscall)
8805 /* note the registers are not zero extended to the sf.
8806 this could be a problem. */
8807 SAVE_ARGS 0,0,1
8808 - GET_THREAD_INFO(%r10)
8809 - orl $TS_COMPAT,TI_status(%r10)
8810 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8811 + pax_enter_kernel_user
8812 + /*
8813 + * No need to follow this irqs on/off section: the syscall
8814 + * disabled irqs and here we enable it straight after entry:
8815 + */
8816 + ENABLE_INTERRUPTS(CLBR_NONE)
8817 + GET_THREAD_INFO(%r11)
8818 + orl $TS_COMPAT,TI_status(%r11)
8819 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8820 jnz ia32_tracesys
8821 cmpq $(IA32_NR_syscalls-1),%rax
8822 ja ia32_badsys
8823 @@ -448,6 +505,9 @@ ia32_tracesys:
8824 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8825 movq %rsp,%rdi /* &pt_regs -> arg1 */
8826 call syscall_trace_enter
8827 +
8828 + pax_erase_kstack
8829 +
8830 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8831 RESTORE_REST
8832 cmpq $(IA32_NR_syscalls-1),%rax
8833 @@ -462,6 +522,7 @@ ia32_badsys:
8834
8835 quiet_ni_syscall:
8836 movq $-ENOSYS,%rax
8837 + pax_force_retaddr
8838 ret
8839 CFI_ENDPROC
8840
8841 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8842 index 016218c..47ccbdd 100644
8843 --- a/arch/x86/ia32/sys_ia32.c
8844 +++ b/arch/x86/ia32/sys_ia32.c
8845 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8846 */
8847 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8848 {
8849 - typeof(ubuf->st_uid) uid = 0;
8850 - typeof(ubuf->st_gid) gid = 0;
8851 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
8852 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
8853 SET_UID(uid, stat->uid);
8854 SET_GID(gid, stat->gid);
8855 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8856 @@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
8857 }
8858 set_fs(KERNEL_DS);
8859 ret = sys_rt_sigprocmask(how,
8860 - set ? (sigset_t __user *)&s : NULL,
8861 - oset ? (sigset_t __user *)&s : NULL,
8862 + set ? (sigset_t __force_user *)&s : NULL,
8863 + oset ? (sigset_t __force_user *)&s : NULL,
8864 sigsetsize);
8865 set_fs(old_fs);
8866 if (ret)
8867 @@ -371,7 +371,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8868 mm_segment_t old_fs = get_fs();
8869
8870 set_fs(KERNEL_DS);
8871 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8872 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8873 set_fs(old_fs);
8874 if (put_compat_timespec(&t, interval))
8875 return -EFAULT;
8876 @@ -387,7 +387,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8877 mm_segment_t old_fs = get_fs();
8878
8879 set_fs(KERNEL_DS);
8880 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8881 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8882 set_fs(old_fs);
8883 if (!ret) {
8884 switch (_NSIG_WORDS) {
8885 @@ -412,7 +412,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8886 if (copy_siginfo_from_user32(&info, uinfo))
8887 return -EFAULT;
8888 set_fs(KERNEL_DS);
8889 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8890 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8891 set_fs(old_fs);
8892 return ret;
8893 }
8894 @@ -513,7 +513,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8895 return -EFAULT;
8896
8897 set_fs(KERNEL_DS);
8898 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
8899 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
8900 count);
8901 set_fs(old_fs);
8902
8903 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
8904 index e2077d3..17d07ad 100644
8905 --- a/arch/x86/include/asm/alternative-asm.h
8906 +++ b/arch/x86/include/asm/alternative-asm.h
8907 @@ -8,10 +8,10 @@
8908
8909 #ifdef CONFIG_SMP
8910 .macro LOCK_PREFIX
8911 -1: lock
8912 +672: lock
8913 .section .smp_locks,"a"
8914 .align 4
8915 - X86_ALIGN 1b
8916 + X86_ALIGN 672b
8917 .previous
8918 .endm
8919 #else
8920 @@ -19,4 +19,43 @@
8921 .endm
8922 #endif
8923
8924 +#ifdef KERNEXEC_PLUGIN
8925 + .macro pax_force_retaddr_bts rip=0
8926 + btsq $63,\rip(%rsp)
8927 + .endm
8928 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8929 + .macro pax_force_retaddr rip=0, reload=0
8930 + btsq $63,\rip(%rsp)
8931 + .endm
8932 + .macro pax_force_fptr ptr
8933 + btsq $63,\ptr
8934 + .endm
8935 + .macro pax_set_fptr_mask
8936 + .endm
8937 +#endif
8938 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
8939 + .macro pax_force_retaddr rip=0, reload=0
8940 + .if \reload
8941 + pax_set_fptr_mask
8942 + .endif
8943 + orq %r10,\rip(%rsp)
8944 + .endm
8945 + .macro pax_force_fptr ptr
8946 + orq %r10,\ptr
8947 + .endm
8948 + .macro pax_set_fptr_mask
8949 + movabs $0x8000000000000000,%r10
8950 + .endm
8951 +#endif
8952 +#else
8953 + .macro pax_force_retaddr rip=0, reload=0
8954 + .endm
8955 + .macro pax_force_fptr ptr
8956 + .endm
8957 + .macro pax_force_retaddr_bts rip=0
8958 + .endm
8959 + .macro pax_set_fptr_mask
8960 + .endm
8961 +#endif
8962 +
8963 #endif /* __ASSEMBLY__ */
8964 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
8965 index c240efc..fdfadf3 100644
8966 --- a/arch/x86/include/asm/alternative.h
8967 +++ b/arch/x86/include/asm/alternative.h
8968 @@ -85,7 +85,7 @@ static inline void alternatives_smp_switch(int smp) {}
8969 " .byte 662b-661b\n" /* sourcelen */ \
8970 " .byte 664f-663f\n" /* replacementlen */ \
8971 ".previous\n" \
8972 - ".section .altinstr_replacement, \"ax\"\n" \
8973 + ".section .altinstr_replacement, \"a\"\n" \
8974 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
8975 ".previous"
8976
8977 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
8978 index 474d80d..1f97d58 100644
8979 --- a/arch/x86/include/asm/apic.h
8980 +++ b/arch/x86/include/asm/apic.h
8981 @@ -46,7 +46,7 @@ static inline void generic_apic_probe(void)
8982
8983 #ifdef CONFIG_X86_LOCAL_APIC
8984
8985 -extern unsigned int apic_verbosity;
8986 +extern int apic_verbosity;
8987 extern int local_apic_timer_c2_ok;
8988
8989 extern int disable_apic;
8990 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
8991 index 20370c6..a2eb9b0 100644
8992 --- a/arch/x86/include/asm/apm.h
8993 +++ b/arch/x86/include/asm/apm.h
8994 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
8995 __asm__ __volatile__(APM_DO_ZERO_SEGS
8996 "pushl %%edi\n\t"
8997 "pushl %%ebp\n\t"
8998 - "lcall *%%cs:apm_bios_entry\n\t"
8999 + "lcall *%%ss:apm_bios_entry\n\t"
9000 "setc %%al\n\t"
9001 "popl %%ebp\n\t"
9002 "popl %%edi\n\t"
9003 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
9004 __asm__ __volatile__(APM_DO_ZERO_SEGS
9005 "pushl %%edi\n\t"
9006 "pushl %%ebp\n\t"
9007 - "lcall *%%cs:apm_bios_entry\n\t"
9008 + "lcall *%%ss:apm_bios_entry\n\t"
9009 "setc %%bl\n\t"
9010 "popl %%ebp\n\t"
9011 "popl %%edi\n\t"
9012 diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h
9013 index dc5a667..939040c 100644
9014 --- a/arch/x86/include/asm/atomic_32.h
9015 +++ b/arch/x86/include/asm/atomic_32.h
9016 @@ -25,6 +25,17 @@ static inline int atomic_read(const atomic_t *v)
9017 }
9018
9019 /**
9020 + * atomic_read_unchecked - read atomic variable
9021 + * @v: pointer of type atomic_unchecked_t
9022 + *
9023 + * Atomically reads the value of @v.
9024 + */
9025 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9026 +{
9027 + return v->counter;
9028 +}
9029 +
9030 +/**
9031 * atomic_set - set atomic variable
9032 * @v: pointer of type atomic_t
9033 * @i: required value
9034 @@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *v, int i)
9035 }
9036
9037 /**
9038 + * atomic_set_unchecked - set atomic variable
9039 + * @v: pointer of type atomic_unchecked_t
9040 + * @i: required value
9041 + *
9042 + * Atomically sets the value of @v to @i.
9043 + */
9044 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9045 +{
9046 + v->counter = i;
9047 +}
9048 +
9049 +/**
9050 * atomic_add - add integer to atomic variable
9051 * @i: integer value to add
9052 * @v: pointer of type atomic_t
9053 @@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *v, int i)
9054 */
9055 static inline void atomic_add(int i, atomic_t *v)
9056 {
9057 - asm volatile(LOCK_PREFIX "addl %1,%0"
9058 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9059 +
9060 +#ifdef CONFIG_PAX_REFCOUNT
9061 + "jno 0f\n"
9062 + LOCK_PREFIX "subl %1,%0\n"
9063 + "int $4\n0:\n"
9064 + _ASM_EXTABLE(0b, 0b)
9065 +#endif
9066 +
9067 + : "+m" (v->counter)
9068 + : "ir" (i));
9069 +}
9070 +
9071 +/**
9072 + * atomic_add_unchecked - add integer to atomic variable
9073 + * @i: integer value to add
9074 + * @v: pointer of type atomic_unchecked_t
9075 + *
9076 + * Atomically adds @i to @v.
9077 + */
9078 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
9079 +{
9080 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9081 : "+m" (v->counter)
9082 : "ir" (i));
9083 }
9084 @@ -59,7 +104,29 @@ static inline void atomic_add(int i, atomic_t *v)
9085 */
9086 static inline void atomic_sub(int i, atomic_t *v)
9087 {
9088 - asm volatile(LOCK_PREFIX "subl %1,%0"
9089 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9090 +
9091 +#ifdef CONFIG_PAX_REFCOUNT
9092 + "jno 0f\n"
9093 + LOCK_PREFIX "addl %1,%0\n"
9094 + "int $4\n0:\n"
9095 + _ASM_EXTABLE(0b, 0b)
9096 +#endif
9097 +
9098 + : "+m" (v->counter)
9099 + : "ir" (i));
9100 +}
9101 +
9102 +/**
9103 + * atomic_sub_unchecked - subtract integer from atomic variable
9104 + * @i: integer value to subtract
9105 + * @v: pointer of type atomic_unchecked_t
9106 + *
9107 + * Atomically subtracts @i from @v.
9108 + */
9109 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
9110 +{
9111 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9112 : "+m" (v->counter)
9113 : "ir" (i));
9114 }
9115 @@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9116 {
9117 unsigned char c;
9118
9119 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
9120 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
9121 +
9122 +#ifdef CONFIG_PAX_REFCOUNT
9123 + "jno 0f\n"
9124 + LOCK_PREFIX "addl %2,%0\n"
9125 + "int $4\n0:\n"
9126 + _ASM_EXTABLE(0b, 0b)
9127 +#endif
9128 +
9129 + "sete %1\n"
9130 : "+m" (v->counter), "=qm" (c)
9131 : "ir" (i) : "memory");
9132 return c;
9133 @@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9134 */
9135 static inline void atomic_inc(atomic_t *v)
9136 {
9137 - asm volatile(LOCK_PREFIX "incl %0"
9138 + asm volatile(LOCK_PREFIX "incl %0\n"
9139 +
9140 +#ifdef CONFIG_PAX_REFCOUNT
9141 + "jno 0f\n"
9142 + LOCK_PREFIX "decl %0\n"
9143 + "int $4\n0:\n"
9144 + _ASM_EXTABLE(0b, 0b)
9145 +#endif
9146 +
9147 + : "+m" (v->counter));
9148 +}
9149 +
9150 +/**
9151 + * atomic_inc_unchecked - increment atomic variable
9152 + * @v: pointer of type atomic_unchecked_t
9153 + *
9154 + * Atomically increments @v by 1.
9155 + */
9156 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9157 +{
9158 + asm volatile(LOCK_PREFIX "incl %0\n"
9159 : "+m" (v->counter));
9160 }
9161
9162 @@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *v)
9163 */
9164 static inline void atomic_dec(atomic_t *v)
9165 {
9166 - asm volatile(LOCK_PREFIX "decl %0"
9167 + asm volatile(LOCK_PREFIX "decl %0\n"
9168 +
9169 +#ifdef CONFIG_PAX_REFCOUNT
9170 + "jno 0f\n"
9171 + LOCK_PREFIX "incl %0\n"
9172 + "int $4\n0:\n"
9173 + _ASM_EXTABLE(0b, 0b)
9174 +#endif
9175 +
9176 + : "+m" (v->counter));
9177 +}
9178 +
9179 +/**
9180 + * atomic_dec_unchecked - decrement atomic variable
9181 + * @v: pointer of type atomic_unchecked_t
9182 + *
9183 + * Atomically decrements @v by 1.
9184 + */
9185 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9186 +{
9187 + asm volatile(LOCK_PREFIX "decl %0\n"
9188 : "+m" (v->counter));
9189 }
9190
9191 @@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9192 {
9193 unsigned char c;
9194
9195 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
9196 + asm volatile(LOCK_PREFIX "decl %0\n"
9197 +
9198 +#ifdef CONFIG_PAX_REFCOUNT
9199 + "jno 0f\n"
9200 + LOCK_PREFIX "incl %0\n"
9201 + "int $4\n0:\n"
9202 + _ASM_EXTABLE(0b, 0b)
9203 +#endif
9204 +
9205 + "sete %1\n"
9206 : "+m" (v->counter), "=qm" (c)
9207 : : "memory");
9208 return c != 0;
9209 @@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9210 {
9211 unsigned char c;
9212
9213 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
9214 + asm volatile(LOCK_PREFIX "incl %0\n"
9215 +
9216 +#ifdef CONFIG_PAX_REFCOUNT
9217 + "jno 0f\n"
9218 + LOCK_PREFIX "decl %0\n"
9219 + "into\n0:\n"
9220 + _ASM_EXTABLE(0b, 0b)
9221 +#endif
9222 +
9223 + "sete %1\n"
9224 + : "+m" (v->counter), "=qm" (c)
9225 + : : "memory");
9226 + return c != 0;
9227 +}
9228 +
9229 +/**
9230 + * atomic_inc_and_test_unchecked - increment and test
9231 + * @v: pointer of type atomic_unchecked_t
9232 + *
9233 + * Atomically increments @v by 1
9234 + * and returns true if the result is zero, or false for all
9235 + * other cases.
9236 + */
9237 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9238 +{
9239 + unsigned char c;
9240 +
9241 + asm volatile(LOCK_PREFIX "incl %0\n"
9242 + "sete %1\n"
9243 : "+m" (v->counter), "=qm" (c)
9244 : : "memory");
9245 return c != 0;
9246 @@ -156,7 +309,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9247 {
9248 unsigned char c;
9249
9250 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9251 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
9252 +
9253 +#ifdef CONFIG_PAX_REFCOUNT
9254 + "jno 0f\n"
9255 + LOCK_PREFIX "subl %2,%0\n"
9256 + "int $4\n0:\n"
9257 + _ASM_EXTABLE(0b, 0b)
9258 +#endif
9259 +
9260 + "sets %1\n"
9261 : "+m" (v->counter), "=qm" (c)
9262 : "ir" (i) : "memory");
9263 return c;
9264 @@ -179,7 +341,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
9265 #endif
9266 /* Modern 486+ processor */
9267 __i = i;
9268 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
9269 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9270 +
9271 +#ifdef CONFIG_PAX_REFCOUNT
9272 + "jno 0f\n"
9273 + "movl %0, %1\n"
9274 + "int $4\n0:\n"
9275 + _ASM_EXTABLE(0b, 0b)
9276 +#endif
9277 +
9278 : "+r" (i), "+m" (v->counter)
9279 : : "memory");
9280 return i + __i;
9281 @@ -195,6 +365,38 @@ no_xadd: /* Legacy 386 processor */
9282 }
9283
9284 /**
9285 + * atomic_add_return_unchecked - add integer and return
9286 + * @v: pointer of type atomic_unchecked_t
9287 + * @i: integer value to add
9288 + *
9289 + * Atomically adds @i to @v and returns @i + @v
9290 + */
9291 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9292 +{
9293 + int __i;
9294 +#ifdef CONFIG_M386
9295 + unsigned long flags;
9296 + if (unlikely(boot_cpu_data.x86 <= 3))
9297 + goto no_xadd;
9298 +#endif
9299 + /* Modern 486+ processor */
9300 + __i = i;
9301 + asm volatile(LOCK_PREFIX "xaddl %0, %1"
9302 + : "+r" (i), "+m" (v->counter)
9303 + : : "memory");
9304 + return i + __i;
9305 +
9306 +#ifdef CONFIG_M386
9307 +no_xadd: /* Legacy 386 processor */
9308 + local_irq_save(flags);
9309 + __i = atomic_read_unchecked(v);
9310 + atomic_set_unchecked(v, i + __i);
9311 + local_irq_restore(flags);
9312 + return i + __i;
9313 +#endif
9314 +}
9315 +
9316 +/**
9317 * atomic_sub_return - subtract integer and return
9318 * @v: pointer of type atomic_t
9319 * @i: integer value to subtract
9320 @@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9321 return cmpxchg(&v->counter, old, new);
9322 }
9323
9324 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9325 +{
9326 + return cmpxchg(&v->counter, old, new);
9327 +}
9328 +
9329 static inline int atomic_xchg(atomic_t *v, int new)
9330 {
9331 return xchg(&v->counter, new);
9332 }
9333
9334 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9335 +{
9336 + return xchg(&v->counter, new);
9337 +}
9338 +
9339 /**
9340 * atomic_add_unless - add unless the number is already a given value
9341 * @v: pointer of type atomic_t
9342 @@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *v, int new)
9343 */
9344 static inline int atomic_add_unless(atomic_t *v, int a, int u)
9345 {
9346 - int c, old;
9347 + int c, old, new;
9348 c = atomic_read(v);
9349 for (;;) {
9350 - if (unlikely(c == (u)))
9351 + if (unlikely(c == u))
9352 break;
9353 - old = atomic_cmpxchg((v), c, c + (a));
9354 +
9355 + asm volatile("addl %2,%0\n"
9356 +
9357 +#ifdef CONFIG_PAX_REFCOUNT
9358 + "jno 0f\n"
9359 + "subl %2,%0\n"
9360 + "int $4\n0:\n"
9361 + _ASM_EXTABLE(0b, 0b)
9362 +#endif
9363 +
9364 + : "=r" (new)
9365 + : "0" (c), "ir" (a));
9366 +
9367 + old = atomic_cmpxchg(v, c, new);
9368 if (likely(old == c))
9369 break;
9370 c = old;
9371 }
9372 - return c != (u);
9373 + return c != u;
9374 }
9375
9376 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
9377
9378 #define atomic_inc_return(v) (atomic_add_return(1, v))
9379 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9380 +{
9381 + return atomic_add_return_unchecked(1, v);
9382 +}
9383 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9384
9385 /* These are x86-specific, used by some header files */
9386 @@ -266,9 +495,18 @@ typedef struct {
9387 u64 __aligned(8) counter;
9388 } atomic64_t;
9389
9390 +#ifdef CONFIG_PAX_REFCOUNT
9391 +typedef struct {
9392 + u64 __aligned(8) counter;
9393 +} atomic64_unchecked_t;
9394 +#else
9395 +typedef atomic64_t atomic64_unchecked_t;
9396 +#endif
9397 +
9398 #define ATOMIC64_INIT(val) { (val) }
9399
9400 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
9401 +extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
9402
9403 /**
9404 * atomic64_xchg - xchg atomic64 variable
9405 @@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
9406 * the old value.
9407 */
9408 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
9409 +extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
9410
9411 /**
9412 * atomic64_set - set atomic64 variable
9413 @@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
9414 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
9415
9416 /**
9417 + * atomic64_unchecked_set - set atomic64 variable
9418 + * @ptr: pointer to type atomic64_unchecked_t
9419 + * @new_val: value to assign
9420 + *
9421 + * Atomically sets the value of @ptr to @new_val.
9422 + */
9423 +extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
9424 +
9425 +/**
9426 * atomic64_read - read atomic64 variable
9427 * @ptr: pointer to type atomic64_t
9428 *
9429 @@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64_t *ptr)
9430 return res;
9431 }
9432
9433 -extern u64 atomic64_read(atomic64_t *ptr);
9434 +/**
9435 + * atomic64_read_unchecked - read atomic64 variable
9436 + * @ptr: pointer to type atomic64_unchecked_t
9437 + *
9438 + * Atomically reads the value of @ptr and returns it.
9439 + */
9440 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
9441 +{
9442 + u64 res;
9443 +
9444 + /*
9445 + * Note, we inline this atomic64_unchecked_t primitive because
9446 + * it only clobbers EAX/EDX and leaves the others
9447 + * untouched. We also (somewhat subtly) rely on the
9448 + * fact that cmpxchg8b returns the current 64-bit value
9449 + * of the memory location we are touching:
9450 + */
9451 + asm volatile(
9452 + "mov %%ebx, %%eax\n\t"
9453 + "mov %%ecx, %%edx\n\t"
9454 + LOCK_PREFIX "cmpxchg8b %1\n"
9455 + : "=&A" (res)
9456 + : "m" (*ptr)
9457 + );
9458 +
9459 + return res;
9460 +}
9461
9462 /**
9463 * atomic64_add_return - add and return
9464 @@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
9465 * Other variants with different arithmetic operators:
9466 */
9467 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
9468 +extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9469 extern u64 atomic64_inc_return(atomic64_t *ptr);
9470 +extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
9471 extern u64 atomic64_dec_return(atomic64_t *ptr);
9472 +extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
9473
9474 /**
9475 * atomic64_add - add integer to atomic64 variable
9476 @@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_t *ptr);
9477 extern void atomic64_add(u64 delta, atomic64_t *ptr);
9478
9479 /**
9480 + * atomic64_add_unchecked - add integer to atomic64 variable
9481 + * @delta: integer value to add
9482 + * @ptr: pointer to type atomic64_unchecked_t
9483 + *
9484 + * Atomically adds @delta to @ptr.
9485 + */
9486 +extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9487 +
9488 +/**
9489 * atomic64_sub - subtract the atomic64 variable
9490 * @delta: integer value to subtract
9491 * @ptr: pointer to type atomic64_t
9492 @@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atomic64_t *ptr);
9493 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
9494
9495 /**
9496 + * atomic64_sub_unchecked - subtract the atomic64 variable
9497 + * @delta: integer value to subtract
9498 + * @ptr: pointer to type atomic64_unchecked_t
9499 + *
9500 + * Atomically subtracts @delta from @ptr.
9501 + */
9502 +extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9503 +
9504 +/**
9505 * atomic64_sub_and_test - subtract value from variable and test result
9506 * @delta: integer value to subtract
9507 * @ptr: pointer to type atomic64_t
9508 @@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
9509 extern void atomic64_inc(atomic64_t *ptr);
9510
9511 /**
9512 + * atomic64_inc_unchecked - increment atomic64 variable
9513 + * @ptr: pointer to type atomic64_unchecked_t
9514 + *
9515 + * Atomically increments @ptr by 1.
9516 + */
9517 +extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
9518 +
9519 +/**
9520 * atomic64_dec - decrement atomic64 variable
9521 * @ptr: pointer to type atomic64_t
9522 *
9523 @@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr);
9524 extern void atomic64_dec(atomic64_t *ptr);
9525
9526 /**
9527 + * atomic64_dec_unchecked - decrement atomic64 variable
9528 + * @ptr: pointer to type atomic64_unchecked_t
9529 + *
9530 + * Atomically decrements @ptr by 1.
9531 + */
9532 +extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
9533 +
9534 +/**
9535 * atomic64_dec_and_test - decrement and test
9536 * @ptr: pointer to type atomic64_t
9537 *
9538 diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h
9539 index d605dc2..fafd7bd 100644
9540 --- a/arch/x86/include/asm/atomic_64.h
9541 +++ b/arch/x86/include/asm/atomic_64.h
9542 @@ -24,6 +24,17 @@ static inline int atomic_read(const atomic_t *v)
9543 }
9544
9545 /**
9546 + * atomic_read_unchecked - read atomic variable
9547 + * @v: pointer of type atomic_unchecked_t
9548 + *
9549 + * Atomically reads the value of @v.
9550 + */
9551 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9552 +{
9553 + return v->counter;
9554 +}
9555 +
9556 +/**
9557 * atomic_set - set atomic variable
9558 * @v: pointer of type atomic_t
9559 * @i: required value
9560 @@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *v, int i)
9561 }
9562
9563 /**
9564 + * atomic_set_unchecked - set atomic variable
9565 + * @v: pointer of type atomic_unchecked_t
9566 + * @i: required value
9567 + *
9568 + * Atomically sets the value of @v to @i.
9569 + */
9570 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9571 +{
9572 + v->counter = i;
9573 +}
9574 +
9575 +/**
9576 * atomic_add - add integer to atomic variable
9577 * @i: integer value to add
9578 * @v: pointer of type atomic_t
9579 @@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *v, int i)
9580 */
9581 static inline void atomic_add(int i, atomic_t *v)
9582 {
9583 - asm volatile(LOCK_PREFIX "addl %1,%0"
9584 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9585 +
9586 +#ifdef CONFIG_PAX_REFCOUNT
9587 + "jno 0f\n"
9588 + LOCK_PREFIX "subl %1,%0\n"
9589 + "int $4\n0:\n"
9590 + _ASM_EXTABLE(0b, 0b)
9591 +#endif
9592 +
9593 + : "=m" (v->counter)
9594 + : "ir" (i), "m" (v->counter));
9595 +}
9596 +
9597 +/**
9598 + * atomic_add_unchecked - add integer to atomic variable
9599 + * @i: integer value to add
9600 + * @v: pointer of type atomic_unchecked_t
9601 + *
9602 + * Atomically adds @i to @v.
9603 + */
9604 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
9605 +{
9606 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9607 : "=m" (v->counter)
9608 : "ir" (i), "m" (v->counter));
9609 }
9610 @@ -58,7 +103,29 @@ static inline void atomic_add(int i, atomic_t *v)
9611 */
9612 static inline void atomic_sub(int i, atomic_t *v)
9613 {
9614 - asm volatile(LOCK_PREFIX "subl %1,%0"
9615 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9616 +
9617 +#ifdef CONFIG_PAX_REFCOUNT
9618 + "jno 0f\n"
9619 + LOCK_PREFIX "addl %1,%0\n"
9620 + "int $4\n0:\n"
9621 + _ASM_EXTABLE(0b, 0b)
9622 +#endif
9623 +
9624 + : "=m" (v->counter)
9625 + : "ir" (i), "m" (v->counter));
9626 +}
9627 +
9628 +/**
9629 + * atomic_sub_unchecked - subtract the atomic variable
9630 + * @i: integer value to subtract
9631 + * @v: pointer of type atomic_unchecked_t
9632 + *
9633 + * Atomically subtracts @i from @v.
9634 + */
9635 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
9636 +{
9637 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9638 : "=m" (v->counter)
9639 : "ir" (i), "m" (v->counter));
9640 }
9641 @@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9642 {
9643 unsigned char c;
9644
9645 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
9646 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
9647 +
9648 +#ifdef CONFIG_PAX_REFCOUNT
9649 + "jno 0f\n"
9650 + LOCK_PREFIX "addl %2,%0\n"
9651 + "int $4\n0:\n"
9652 + _ASM_EXTABLE(0b, 0b)
9653 +#endif
9654 +
9655 + "sete %1\n"
9656 : "=m" (v->counter), "=qm" (c)
9657 : "ir" (i), "m" (v->counter) : "memory");
9658 return c;
9659 @@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9660 */
9661 static inline void atomic_inc(atomic_t *v)
9662 {
9663 - asm volatile(LOCK_PREFIX "incl %0"
9664 + asm volatile(LOCK_PREFIX "incl %0\n"
9665 +
9666 +#ifdef CONFIG_PAX_REFCOUNT
9667 + "jno 0f\n"
9668 + LOCK_PREFIX "decl %0\n"
9669 + "int $4\n0:\n"
9670 + _ASM_EXTABLE(0b, 0b)
9671 +#endif
9672 +
9673 + : "=m" (v->counter)
9674 + : "m" (v->counter));
9675 +}
9676 +
9677 +/**
9678 + * atomic_inc_unchecked - increment atomic variable
9679 + * @v: pointer of type atomic_unchecked_t
9680 + *
9681 + * Atomically increments @v by 1.
9682 + */
9683 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9684 +{
9685 + asm volatile(LOCK_PREFIX "incl %0\n"
9686 : "=m" (v->counter)
9687 : "m" (v->counter));
9688 }
9689 @@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *v)
9690 */
9691 static inline void atomic_dec(atomic_t *v)
9692 {
9693 - asm volatile(LOCK_PREFIX "decl %0"
9694 + asm volatile(LOCK_PREFIX "decl %0\n"
9695 +
9696 +#ifdef CONFIG_PAX_REFCOUNT
9697 + "jno 0f\n"
9698 + LOCK_PREFIX "incl %0\n"
9699 + "int $4\n0:\n"
9700 + _ASM_EXTABLE(0b, 0b)
9701 +#endif
9702 +
9703 + : "=m" (v->counter)
9704 + : "m" (v->counter));
9705 +}
9706 +
9707 +/**
9708 + * atomic_dec_unchecked - decrement atomic variable
9709 + * @v: pointer of type atomic_unchecked_t
9710 + *
9711 + * Atomically decrements @v by 1.
9712 + */
9713 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9714 +{
9715 + asm volatile(LOCK_PREFIX "decl %0\n"
9716 : "=m" (v->counter)
9717 : "m" (v->counter));
9718 }
9719 @@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9720 {
9721 unsigned char c;
9722
9723 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
9724 + asm volatile(LOCK_PREFIX "decl %0\n"
9725 +
9726 +#ifdef CONFIG_PAX_REFCOUNT
9727 + "jno 0f\n"
9728 + LOCK_PREFIX "incl %0\n"
9729 + "int $4\n0:\n"
9730 + _ASM_EXTABLE(0b, 0b)
9731 +#endif
9732 +
9733 + "sete %1\n"
9734 : "=m" (v->counter), "=qm" (c)
9735 : "m" (v->counter) : "memory");
9736 return c != 0;
9737 @@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9738 {
9739 unsigned char c;
9740
9741 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
9742 + asm volatile(LOCK_PREFIX "incl %0\n"
9743 +
9744 +#ifdef CONFIG_PAX_REFCOUNT
9745 + "jno 0f\n"
9746 + LOCK_PREFIX "decl %0\n"
9747 + "int $4\n0:\n"
9748 + _ASM_EXTABLE(0b, 0b)
9749 +#endif
9750 +
9751 + "sete %1\n"
9752 + : "=m" (v->counter), "=qm" (c)
9753 + : "m" (v->counter) : "memory");
9754 + return c != 0;
9755 +}
9756 +
9757 +/**
9758 + * atomic_inc_and_test_unchecked - increment and test
9759 + * @v: pointer of type atomic_unchecked_t
9760 + *
9761 + * Atomically increments @v by 1
9762 + * and returns true if the result is zero, or false for all
9763 + * other cases.
9764 + */
9765 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9766 +{
9767 + unsigned char c;
9768 +
9769 + asm volatile(LOCK_PREFIX "incl %0\n"
9770 + "sete %1\n"
9771 : "=m" (v->counter), "=qm" (c)
9772 : "m" (v->counter) : "memory");
9773 return c != 0;
9774 @@ -157,7 +312,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9775 {
9776 unsigned char c;
9777
9778 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9779 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
9780 +
9781 +#ifdef CONFIG_PAX_REFCOUNT
9782 + "jno 0f\n"
9783 + LOCK_PREFIX "subl %2,%0\n"
9784 + "int $4\n0:\n"
9785 + _ASM_EXTABLE(0b, 0b)
9786 +#endif
9787 +
9788 + "sets %1\n"
9789 : "=m" (v->counter), "=qm" (c)
9790 : "ir" (i), "m" (v->counter) : "memory");
9791 return c;
9792 @@ -173,7 +337,31 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9793 static inline int atomic_add_return(int i, atomic_t *v)
9794 {
9795 int __i = i;
9796 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
9797 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9798 +
9799 +#ifdef CONFIG_PAX_REFCOUNT
9800 + "jno 0f\n"
9801 + "movl %0, %1\n"
9802 + "int $4\n0:\n"
9803 + _ASM_EXTABLE(0b, 0b)
9804 +#endif
9805 +
9806 + : "+r" (i), "+m" (v->counter)
9807 + : : "memory");
9808 + return i + __i;
9809 +}
9810 +
9811 +/**
9812 + * atomic_add_return_unchecked - add and return
9813 + * @i: integer value to add
9814 + * @v: pointer of type atomic_unchecked_t
9815 + *
9816 + * Atomically adds @i to @v and returns @i + @v
9817 + */
9818 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9819 +{
9820 + int __i = i;
9821 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9822 : "+r" (i), "+m" (v->counter)
9823 : : "memory");
9824 return i + __i;
9825 @@ -185,6 +373,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9826 }
9827
9828 #define atomic_inc_return(v) (atomic_add_return(1, v))
9829 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9830 +{
9831 + return atomic_add_return_unchecked(1, v);
9832 +}
9833 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9834
9835 /* The 64-bit atomic type */
9836 @@ -204,6 +396,18 @@ static inline long atomic64_read(const atomic64_t *v)
9837 }
9838
9839 /**
9840 + * atomic64_read_unchecked - read atomic64 variable
9841 + * @v: pointer of type atomic64_unchecked_t
9842 + *
9843 + * Atomically reads the value of @v.
9844 + * Doesn't imply a read memory barrier.
9845 + */
9846 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9847 +{
9848 + return v->counter;
9849 +}
9850 +
9851 +/**
9852 * atomic64_set - set atomic64 variable
9853 * @v: pointer to type atomic64_t
9854 * @i: required value
9855 @@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9856 }
9857
9858 /**
9859 + * atomic64_set_unchecked - set atomic64 variable
9860 + * @v: pointer to type atomic64_unchecked_t
9861 + * @i: required value
9862 + *
9863 + * Atomically sets the value of @v to @i.
9864 + */
9865 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9866 +{
9867 + v->counter = i;
9868 +}
9869 +
9870 +/**
9871 * atomic64_add - add integer to atomic64 variable
9872 * @i: integer value to add
9873 * @v: pointer to type atomic64_t
9874 @@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9875 */
9876 static inline void atomic64_add(long i, atomic64_t *v)
9877 {
9878 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
9879 +
9880 +#ifdef CONFIG_PAX_REFCOUNT
9881 + "jno 0f\n"
9882 + LOCK_PREFIX "subq %1,%0\n"
9883 + "int $4\n0:\n"
9884 + _ASM_EXTABLE(0b, 0b)
9885 +#endif
9886 +
9887 + : "=m" (v->counter)
9888 + : "er" (i), "m" (v->counter));
9889 +}
9890 +
9891 +/**
9892 + * atomic64_add_unchecked - add integer to atomic64 variable
9893 + * @i: integer value to add
9894 + * @v: pointer to type atomic64_unchecked_t
9895 + *
9896 + * Atomically adds @i to @v.
9897 + */
9898 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9899 +{
9900 asm volatile(LOCK_PREFIX "addq %1,%0"
9901 : "=m" (v->counter)
9902 : "er" (i), "m" (v->counter));
9903 @@ -238,7 +476,15 @@ static inline void atomic64_add(long i, atomic64_t *v)
9904 */
9905 static inline void atomic64_sub(long i, atomic64_t *v)
9906 {
9907 - asm volatile(LOCK_PREFIX "subq %1,%0"
9908 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9909 +
9910 +#ifdef CONFIG_PAX_REFCOUNT
9911 + "jno 0f\n"
9912 + LOCK_PREFIX "addq %1,%0\n"
9913 + "int $4\n0:\n"
9914 + _ASM_EXTABLE(0b, 0b)
9915 +#endif
9916 +
9917 : "=m" (v->counter)
9918 : "er" (i), "m" (v->counter));
9919 }
9920 @@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9921 {
9922 unsigned char c;
9923
9924 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9925 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
9926 +
9927 +#ifdef CONFIG_PAX_REFCOUNT
9928 + "jno 0f\n"
9929 + LOCK_PREFIX "addq %2,%0\n"
9930 + "int $4\n0:\n"
9931 + _ASM_EXTABLE(0b, 0b)
9932 +#endif
9933 +
9934 + "sete %1\n"
9935 : "=m" (v->counter), "=qm" (c)
9936 : "er" (i), "m" (v->counter) : "memory");
9937 return c;
9938 @@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9939 */
9940 static inline void atomic64_inc(atomic64_t *v)
9941 {
9942 + asm volatile(LOCK_PREFIX "incq %0\n"
9943 +
9944 +#ifdef CONFIG_PAX_REFCOUNT
9945 + "jno 0f\n"
9946 + LOCK_PREFIX "decq %0\n"
9947 + "int $4\n0:\n"
9948 + _ASM_EXTABLE(0b, 0b)
9949 +#endif
9950 +
9951 + : "=m" (v->counter)
9952 + : "m" (v->counter));
9953 +}
9954 +
9955 +/**
9956 + * atomic64_inc_unchecked - increment atomic64 variable
9957 + * @v: pointer to type atomic64_unchecked_t
9958 + *
9959 + * Atomically increments @v by 1.
9960 + */
9961 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9962 +{
9963 asm volatile(LOCK_PREFIX "incq %0"
9964 : "=m" (v->counter)
9965 : "m" (v->counter));
9966 @@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64_t *v)
9967 */
9968 static inline void atomic64_dec(atomic64_t *v)
9969 {
9970 - asm volatile(LOCK_PREFIX "decq %0"
9971 + asm volatile(LOCK_PREFIX "decq %0\n"
9972 +
9973 +#ifdef CONFIG_PAX_REFCOUNT
9974 + "jno 0f\n"
9975 + LOCK_PREFIX "incq %0\n"
9976 + "int $4\n0:\n"
9977 + _ASM_EXTABLE(0b, 0b)
9978 +#endif
9979 +
9980 + : "=m" (v->counter)
9981 + : "m" (v->counter));
9982 +}
9983 +
9984 +/**
9985 + * atomic64_dec_unchecked - decrement atomic64 variable
9986 + * @v: pointer to type atomic64_t
9987 + *
9988 + * Atomically decrements @v by 1.
9989 + */
9990 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9991 +{
9992 + asm volatile(LOCK_PREFIX "decq %0\n"
9993 : "=m" (v->counter)
9994 : "m" (v->counter));
9995 }
9996 @@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9997 {
9998 unsigned char c;
9999
10000 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
10001 + asm volatile(LOCK_PREFIX "decq %0\n"
10002 +
10003 +#ifdef CONFIG_PAX_REFCOUNT
10004 + "jno 0f\n"
10005 + LOCK_PREFIX "incq %0\n"
10006 + "int $4\n0:\n"
10007 + _ASM_EXTABLE(0b, 0b)
10008 +#endif
10009 +
10010 + "sete %1\n"
10011 : "=m" (v->counter), "=qm" (c)
10012 : "m" (v->counter) : "memory");
10013 return c != 0;
10014 @@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
10015 {
10016 unsigned char c;
10017
10018 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
10019 + asm volatile(LOCK_PREFIX "incq %0\n"
10020 +
10021 +#ifdef CONFIG_PAX_REFCOUNT
10022 + "jno 0f\n"
10023 + LOCK_PREFIX "decq %0\n"
10024 + "int $4\n0:\n"
10025 + _ASM_EXTABLE(0b, 0b)
10026 +#endif
10027 +
10028 + "sete %1\n"
10029 : "=m" (v->counter), "=qm" (c)
10030 : "m" (v->counter) : "memory");
10031 return c != 0;
10032 @@ -337,7 +652,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
10033 {
10034 unsigned char c;
10035
10036 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
10037 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
10038 +
10039 +#ifdef CONFIG_PAX_REFCOUNT
10040 + "jno 0f\n"
10041 + LOCK_PREFIX "subq %2,%0\n"
10042 + "int $4\n0:\n"
10043 + _ASM_EXTABLE(0b, 0b)
10044 +#endif
10045 +
10046 + "sets %1\n"
10047 : "=m" (v->counter), "=qm" (c)
10048 : "er" (i), "m" (v->counter) : "memory");
10049 return c;
10050 @@ -353,7 +677,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
10051 static inline long atomic64_add_return(long i, atomic64_t *v)
10052 {
10053 long __i = i;
10054 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
10055 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
10056 +
10057 +#ifdef CONFIG_PAX_REFCOUNT
10058 + "jno 0f\n"
10059 + "movq %0, %1\n"
10060 + "int $4\n0:\n"
10061 + _ASM_EXTABLE(0b, 0b)
10062 +#endif
10063 +
10064 + : "+r" (i), "+m" (v->counter)
10065 + : : "memory");
10066 + return i + __i;
10067 +}
10068 +
10069 +/**
10070 + * atomic64_add_return_unchecked - add and return
10071 + * @i: integer value to add
10072 + * @v: pointer to type atomic64_unchecked_t
10073 + *
10074 + * Atomically adds @i to @v and returns @i + @v
10075 + */
10076 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
10077 +{
10078 + long __i = i;
10079 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
10080 : "+r" (i), "+m" (v->counter)
10081 : : "memory");
10082 return i + __i;
10083 @@ -365,6 +713,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
10084 }
10085
10086 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
10087 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
10088 +{
10089 + return atomic64_add_return_unchecked(1, v);
10090 +}
10091 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
10092
10093 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10094 @@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10095 return cmpxchg(&v->counter, old, new);
10096 }
10097
10098 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
10099 +{
10100 + return cmpxchg(&v->counter, old, new);
10101 +}
10102 +
10103 static inline long atomic64_xchg(atomic64_t *v, long new)
10104 {
10105 return xchg(&v->counter, new);
10106 }
10107
10108 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10109 +{
10110 + return xchg(&v->counter, new);
10111 +}
10112 +
10113 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
10114 {
10115 return cmpxchg(&v->counter, old, new);
10116 }
10117
10118 +static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
10119 +{
10120 + return cmpxchg(&v->counter, old, new);
10121 +}
10122 +
10123 static inline long atomic_xchg(atomic_t *v, int new)
10124 {
10125 return xchg(&v->counter, new);
10126 }
10127
10128 +static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
10129 +{
10130 + return xchg(&v->counter, new);
10131 +}
10132 +
10133 /**
10134 * atomic_add_unless - add unless the number is a given value
10135 * @v: pointer of type atomic_t
10136 @@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t *v, int new)
10137 */
10138 static inline int atomic_add_unless(atomic_t *v, int a, int u)
10139 {
10140 - int c, old;
10141 + int c, old, new;
10142 c = atomic_read(v);
10143 for (;;) {
10144 - if (unlikely(c == (u)))
10145 + if (unlikely(c == u))
10146 break;
10147 - old = atomic_cmpxchg((v), c, c + (a));
10148 +
10149 + asm volatile("addl %2,%0\n"
10150 +
10151 +#ifdef CONFIG_PAX_REFCOUNT
10152 + "jno 0f\n"
10153 + "subl %2,%0\n"
10154 + "int $4\n0:\n"
10155 + _ASM_EXTABLE(0b, 0b)
10156 +#endif
10157 +
10158 + : "=r" (new)
10159 + : "0" (c), "ir" (a));
10160 +
10161 + old = atomic_cmpxchg(v, c, new);
10162 if (likely(old == c))
10163 break;
10164 c = old;
10165 }
10166 - return c != (u);
10167 + return c != u;
10168 }
10169
10170 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
10171 @@ -424,17 +809,30 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
10172 */
10173 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
10174 {
10175 - long c, old;
10176 + long c, old, new;
10177 c = atomic64_read(v);
10178 for (;;) {
10179 - if (unlikely(c == (u)))
10180 + if (unlikely(c == u))
10181 break;
10182 - old = atomic64_cmpxchg((v), c, c + (a));
10183 +
10184 + asm volatile("addq %2,%0\n"
10185 +
10186 +#ifdef CONFIG_PAX_REFCOUNT
10187 + "jno 0f\n"
10188 + "subq %2,%0\n"
10189 + "int $4\n0:\n"
10190 + _ASM_EXTABLE(0b, 0b)
10191 +#endif
10192 +
10193 + : "=r" (new)
10194 + : "0" (c), "er" (a));
10195 +
10196 + old = atomic64_cmpxchg(v, c, new);
10197 if (likely(old == c))
10198 break;
10199 c = old;
10200 }
10201 - return c != (u);
10202 + return c != u;
10203 }
10204
10205 /**
10206 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
10207 index 02b47a6..d5c4b15 100644
10208 --- a/arch/x86/include/asm/bitops.h
10209 +++ b/arch/x86/include/asm/bitops.h
10210 @@ -38,7 +38,7 @@
10211 * a mask operation on a byte.
10212 */
10213 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
10214 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
10215 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
10216 #define CONST_MASK(nr) (1 << ((nr) & 7))
10217
10218 /**
10219 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
10220 index 7a10659..8bbf355 100644
10221 --- a/arch/x86/include/asm/boot.h
10222 +++ b/arch/x86/include/asm/boot.h
10223 @@ -11,10 +11,15 @@
10224 #include <asm/pgtable_types.h>
10225
10226 /* Physical address where kernel should be loaded. */
10227 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10228 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10229 + (CONFIG_PHYSICAL_ALIGN - 1)) \
10230 & ~(CONFIG_PHYSICAL_ALIGN - 1))
10231
10232 +#ifndef __ASSEMBLY__
10233 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
10234 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
10235 +#endif
10236 +
10237 /* Minimum kernel alignment, as a power of two */
10238 #ifdef CONFIG_X86_64
10239 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
10240 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
10241 index 549860d..7d45f68 100644
10242 --- a/arch/x86/include/asm/cache.h
10243 +++ b/arch/x86/include/asm/cache.h
10244 @@ -5,9 +5,10 @@
10245
10246 /* L1 cache line size */
10247 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10248 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10249 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10250
10251 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
10252 +#define __read_only __attribute__((__section__(".data.read_only")))
10253
10254 #ifdef CONFIG_X86_VSMP
10255 /* vSMP Internode cacheline shift */
10256 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
10257 index b54f6af..5b376a6 100644
10258 --- a/arch/x86/include/asm/cacheflush.h
10259 +++ b/arch/x86/include/asm/cacheflush.h
10260 @@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
10261 static inline unsigned long get_page_memtype(struct page *pg)
10262 {
10263 if (!PageUncached(pg) && !PageWC(pg))
10264 - return -1;
10265 + return ~0UL;
10266 else if (!PageUncached(pg) && PageWC(pg))
10267 return _PAGE_CACHE_WC;
10268 else if (PageUncached(pg) && !PageWC(pg))
10269 @@ -85,7 +85,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype)
10270 SetPageWC(pg);
10271 break;
10272 default:
10273 - case -1:
10274 + case ~0UL:
10275 ClearPageUncached(pg);
10276 ClearPageWC(pg);
10277 break;
10278 diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
10279 index 0e63c9a..ab8d972 100644
10280 --- a/arch/x86/include/asm/calling.h
10281 +++ b/arch/x86/include/asm/calling.h
10282 @@ -52,32 +52,32 @@ For 32-bit we have the following conventions - kernel is built with
10283 * for assembly code:
10284 */
10285
10286 -#define R15 0
10287 -#define R14 8
10288 -#define R13 16
10289 -#define R12 24
10290 -#define RBP 32
10291 -#define RBX 40
10292 +#define R15 (0)
10293 +#define R14 (8)
10294 +#define R13 (16)
10295 +#define R12 (24)
10296 +#define RBP (32)
10297 +#define RBX (40)
10298
10299 /* arguments: interrupts/non tracing syscalls only save up to here: */
10300 -#define R11 48
10301 -#define R10 56
10302 -#define R9 64
10303 -#define R8 72
10304 -#define RAX 80
10305 -#define RCX 88
10306 -#define RDX 96
10307 -#define RSI 104
10308 -#define RDI 112
10309 -#define ORIG_RAX 120 /* + error_code */
10310 +#define R11 (48)
10311 +#define R10 (56)
10312 +#define R9 (64)
10313 +#define R8 (72)
10314 +#define RAX (80)
10315 +#define RCX (88)
10316 +#define RDX (96)
10317 +#define RSI (104)
10318 +#define RDI (112)
10319 +#define ORIG_RAX (120) /* + error_code */
10320 /* end of arguments */
10321
10322 /* cpu exception frame or undefined in case of fast syscall: */
10323 -#define RIP 128
10324 -#define CS 136
10325 -#define EFLAGS 144
10326 -#define RSP 152
10327 -#define SS 160
10328 +#define RIP (128)
10329 +#define CS (136)
10330 +#define EFLAGS (144)
10331 +#define RSP (152)
10332 +#define SS (160)
10333
10334 #define ARGOFFSET R11
10335 #define SWFRAME ORIG_RAX
10336 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
10337 index 46fc474..b02b0f9 100644
10338 --- a/arch/x86/include/asm/checksum_32.h
10339 +++ b/arch/x86/include/asm/checksum_32.h
10340 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
10341 int len, __wsum sum,
10342 int *src_err_ptr, int *dst_err_ptr);
10343
10344 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
10345 + int len, __wsum sum,
10346 + int *src_err_ptr, int *dst_err_ptr);
10347 +
10348 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
10349 + int len, __wsum sum,
10350 + int *src_err_ptr, int *dst_err_ptr);
10351 +
10352 /*
10353 * Note: when you get a NULL pointer exception here this means someone
10354 * passed in an incorrect kernel address to one of these functions.
10355 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
10356 int *err_ptr)
10357 {
10358 might_sleep();
10359 - return csum_partial_copy_generic((__force void *)src, dst,
10360 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
10361 len, sum, err_ptr, NULL);
10362 }
10363
10364 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
10365 {
10366 might_sleep();
10367 if (access_ok(VERIFY_WRITE, dst, len))
10368 - return csum_partial_copy_generic(src, (__force void *)dst,
10369 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
10370 len, sum, NULL, err_ptr);
10371
10372 if (len)
10373 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
10374 index 617bd56..7b047a1 100644
10375 --- a/arch/x86/include/asm/desc.h
10376 +++ b/arch/x86/include/asm/desc.h
10377 @@ -4,6 +4,7 @@
10378 #include <asm/desc_defs.h>
10379 #include <asm/ldt.h>
10380 #include <asm/mmu.h>
10381 +#include <asm/pgtable.h>
10382 #include <linux/smp.h>
10383
10384 static inline void fill_ldt(struct desc_struct *desc,
10385 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_struct *desc,
10386 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
10387 desc->type = (info->read_exec_only ^ 1) << 1;
10388 desc->type |= info->contents << 2;
10389 + desc->type |= info->seg_not_present ^ 1;
10390 desc->s = 1;
10391 desc->dpl = 0x3;
10392 desc->p = info->seg_not_present ^ 1;
10393 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_struct *desc,
10394 }
10395
10396 extern struct desc_ptr idt_descr;
10397 -extern gate_desc idt_table[];
10398 -
10399 -struct gdt_page {
10400 - struct desc_struct gdt[GDT_ENTRIES];
10401 -} __attribute__((aligned(PAGE_SIZE)));
10402 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
10403 +extern gate_desc idt_table[256];
10404
10405 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
10406 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
10407 {
10408 - return per_cpu(gdt_page, cpu).gdt;
10409 + return cpu_gdt_table[cpu];
10410 }
10411
10412 #ifdef CONFIG_X86_64
10413 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
10414 unsigned long base, unsigned dpl, unsigned flags,
10415 unsigned short seg)
10416 {
10417 - gate->a = (seg << 16) | (base & 0xffff);
10418 - gate->b = (base & 0xffff0000) |
10419 - (((0x80 | type | (dpl << 5)) & 0xff) << 8);
10420 + gate->gate.offset_low = base;
10421 + gate->gate.seg = seg;
10422 + gate->gate.reserved = 0;
10423 + gate->gate.type = type;
10424 + gate->gate.s = 0;
10425 + gate->gate.dpl = dpl;
10426 + gate->gate.p = 1;
10427 + gate->gate.offset_high = base >> 16;
10428 }
10429
10430 #endif
10431 @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
10432 static inline void native_write_idt_entry(gate_desc *idt, int entry,
10433 const gate_desc *gate)
10434 {
10435 + pax_open_kernel();
10436 memcpy(&idt[entry], gate, sizeof(*gate));
10437 + pax_close_kernel();
10438 }
10439
10440 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
10441 const void *desc)
10442 {
10443 + pax_open_kernel();
10444 memcpy(&ldt[entry], desc, 8);
10445 + pax_close_kernel();
10446 }
10447
10448 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
10449 @@ -139,7 +146,10 @@ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
10450 size = sizeof(struct desc_struct);
10451 break;
10452 }
10453 +
10454 + pax_open_kernel();
10455 memcpy(&gdt[entry], desc, size);
10456 + pax_close_kernel();
10457 }
10458
10459 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
10460 @@ -211,7 +221,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
10461
10462 static inline void native_load_tr_desc(void)
10463 {
10464 + pax_open_kernel();
10465 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
10466 + pax_close_kernel();
10467 }
10468
10469 static inline void native_load_gdt(const struct desc_ptr *dtr)
10470 @@ -246,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
10471 unsigned int i;
10472 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
10473
10474 + pax_open_kernel();
10475 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
10476 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
10477 + pax_close_kernel();
10478 }
10479
10480 #define _LDT_empty(info) \
10481 @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
10482 desc->limit = (limit >> 16) & 0xf;
10483 }
10484
10485 -static inline void _set_gate(int gate, unsigned type, void *addr,
10486 +static inline void _set_gate(int gate, unsigned type, const void *addr,
10487 unsigned dpl, unsigned ist, unsigned seg)
10488 {
10489 gate_desc s;
10490 @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
10491 * Pentium F0 0F bugfix can have resulted in the mapped
10492 * IDT being write-protected.
10493 */
10494 -static inline void set_intr_gate(unsigned int n, void *addr)
10495 +static inline void set_intr_gate(unsigned int n, const void *addr)
10496 {
10497 BUG_ON((unsigned)n > 0xFF);
10498 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
10499 @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
10500 /*
10501 * This routine sets up an interrupt gate at directory privilege level 3.
10502 */
10503 -static inline void set_system_intr_gate(unsigned int n, void *addr)
10504 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
10505 {
10506 BUG_ON((unsigned)n > 0xFF);
10507 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
10508 }
10509
10510 -static inline void set_system_trap_gate(unsigned int n, void *addr)
10511 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
10512 {
10513 BUG_ON((unsigned)n > 0xFF);
10514 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
10515 }
10516
10517 -static inline void set_trap_gate(unsigned int n, void *addr)
10518 +static inline void set_trap_gate(unsigned int n, const void *addr)
10519 {
10520 BUG_ON((unsigned)n > 0xFF);
10521 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
10522 @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
10523 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
10524 {
10525 BUG_ON((unsigned)n > 0xFF);
10526 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10527 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
10528 }
10529
10530 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
10531 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
10532 {
10533 BUG_ON((unsigned)n > 0xFF);
10534 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
10535 }
10536
10537 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10538 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
10539 {
10540 BUG_ON((unsigned)n > 0xFF);
10541 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
10542 }
10543
10544 +#ifdef CONFIG_X86_32
10545 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
10546 +{
10547 + struct desc_struct d;
10548 +
10549 + if (likely(limit))
10550 + limit = (limit - 1UL) >> PAGE_SHIFT;
10551 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
10552 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
10553 +}
10554 +#endif
10555 +
10556 #endif /* _ASM_X86_DESC_H */
10557 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
10558 index 9d66848..6b4a691 100644
10559 --- a/arch/x86/include/asm/desc_defs.h
10560 +++ b/arch/x86/include/asm/desc_defs.h
10561 @@ -31,6 +31,12 @@ struct desc_struct {
10562 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
10563 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
10564 };
10565 + struct {
10566 + u16 offset_low;
10567 + u16 seg;
10568 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
10569 + unsigned offset_high: 16;
10570 + } gate;
10571 };
10572 } __attribute__((packed));
10573
10574 diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
10575 index cee34e9..a7c3fa2 100644
10576 --- a/arch/x86/include/asm/device.h
10577 +++ b/arch/x86/include/asm/device.h
10578 @@ -6,7 +6,7 @@ struct dev_archdata {
10579 void *acpi_handle;
10580 #endif
10581 #ifdef CONFIG_X86_64
10582 -struct dma_map_ops *dma_ops;
10583 + const struct dma_map_ops *dma_ops;
10584 #endif
10585 #ifdef CONFIG_DMAR
10586 void *iommu; /* hook for IOMMU specific extension */
10587 diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
10588 index 6a25d5d..786b202 100644
10589 --- a/arch/x86/include/asm/dma-mapping.h
10590 +++ b/arch/x86/include/asm/dma-mapping.h
10591 @@ -25,9 +25,9 @@ extern int iommu_merge;
10592 extern struct device x86_dma_fallback_dev;
10593 extern int panic_on_overflow;
10594
10595 -extern struct dma_map_ops *dma_ops;
10596 +extern const struct dma_map_ops *dma_ops;
10597
10598 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
10599 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
10600 {
10601 #ifdef CONFIG_X86_32
10602 return dma_ops;
10603 @@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
10604 /* Make sure we keep the same behaviour */
10605 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
10606 {
10607 - struct dma_map_ops *ops = get_dma_ops(dev);
10608 + const struct dma_map_ops *ops = get_dma_ops(dev);
10609 if (ops->mapping_error)
10610 return ops->mapping_error(dev, dma_addr);
10611
10612 @@ -122,7 +122,7 @@ static inline void *
10613 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
10614 gfp_t gfp)
10615 {
10616 - struct dma_map_ops *ops = get_dma_ops(dev);
10617 + const struct dma_map_ops *ops = get_dma_ops(dev);
10618 void *memory;
10619
10620 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
10621 @@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
10622 static inline void dma_free_coherent(struct device *dev, size_t size,
10623 void *vaddr, dma_addr_t bus)
10624 {
10625 - struct dma_map_ops *ops = get_dma_ops(dev);
10626 + const struct dma_map_ops *ops = get_dma_ops(dev);
10627
10628 WARN_ON(irqs_disabled()); /* for portability */
10629
10630 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
10631 index 40b4e61..40d8133 100644
10632 --- a/arch/x86/include/asm/e820.h
10633 +++ b/arch/x86/include/asm/e820.h
10634 @@ -133,7 +133,7 @@ extern char *default_machine_specific_memory_setup(void);
10635 #define ISA_END_ADDRESS 0x100000
10636 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
10637
10638 -#define BIOS_BEGIN 0x000a0000
10639 +#define BIOS_BEGIN 0x000c0000
10640 #define BIOS_END 0x00100000
10641
10642 #ifdef __KERNEL__
10643 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
10644 index 8ac9d9a..0a6c96e 100644
10645 --- a/arch/x86/include/asm/elf.h
10646 +++ b/arch/x86/include/asm/elf.h
10647 @@ -257,7 +257,25 @@ extern int force_personality32;
10648 the loader. We need to make sure that it is out of the way of the program
10649 that it will "exec", and that there is sufficient room for the brk. */
10650
10651 +#ifdef CONFIG_PAX_SEGMEXEC
10652 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
10653 +#else
10654 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
10655 +#endif
10656 +
10657 +#ifdef CONFIG_PAX_ASLR
10658 +#ifdef CONFIG_X86_32
10659 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
10660 +
10661 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10662 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10663 +#else
10664 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
10665 +
10666 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10667 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10668 +#endif
10669 +#endif
10670
10671 /* This yields a mask that user programs can use to figure out what
10672 instruction set this CPU supports. This could be done in user space,
10673 @@ -310,9 +328,7 @@ do { \
10674
10675 #define ARCH_DLINFO \
10676 do { \
10677 - if (vdso_enabled) \
10678 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10679 - (unsigned long)current->mm->context.vdso); \
10680 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10681 } while (0)
10682
10683 #define AT_SYSINFO 32
10684 @@ -323,7 +339,7 @@ do { \
10685
10686 #endif /* !CONFIG_X86_32 */
10687
10688 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
10689 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10690
10691 #define VDSO_ENTRY \
10692 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
10693 @@ -337,7 +353,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
10694 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10695 #define compat_arch_setup_additional_pages syscall32_setup_pages
10696
10697 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10698 -#define arch_randomize_brk arch_randomize_brk
10699 -
10700 #endif /* _ASM_X86_ELF_H */
10701 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10702 index cc70c1c..d96d011 100644
10703 --- a/arch/x86/include/asm/emergency-restart.h
10704 +++ b/arch/x86/include/asm/emergency-restart.h
10705 @@ -15,6 +15,6 @@ enum reboot_type {
10706
10707 extern enum reboot_type reboot_type;
10708
10709 -extern void machine_emergency_restart(void);
10710 +extern void machine_emergency_restart(void) __noreturn;
10711
10712 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10713 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10714 index 1f11ce4..7caabd1 100644
10715 --- a/arch/x86/include/asm/futex.h
10716 +++ b/arch/x86/include/asm/futex.h
10717 @@ -12,16 +12,18 @@
10718 #include <asm/system.h>
10719
10720 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10721 + typecheck(u32 __user *, uaddr); \
10722 asm volatile("1:\t" insn "\n" \
10723 "2:\t.section .fixup,\"ax\"\n" \
10724 "3:\tmov\t%3, %1\n" \
10725 "\tjmp\t2b\n" \
10726 "\t.previous\n" \
10727 _ASM_EXTABLE(1b, 3b) \
10728 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10729 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10730 : "i" (-EFAULT), "0" (oparg), "1" (0))
10731
10732 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10733 + typecheck(u32 __user *, uaddr); \
10734 asm volatile("1:\tmovl %2, %0\n" \
10735 "\tmovl\t%0, %3\n" \
10736 "\t" insn "\n" \
10737 @@ -34,10 +36,10 @@
10738 _ASM_EXTABLE(1b, 4b) \
10739 _ASM_EXTABLE(2b, 4b) \
10740 : "=&a" (oldval), "=&r" (ret), \
10741 - "+m" (*uaddr), "=&r" (tem) \
10742 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10743 : "r" (oparg), "i" (-EFAULT), "1" (0))
10744
10745 -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10746 +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10747 {
10748 int op = (encoded_op >> 28) & 7;
10749 int cmp = (encoded_op >> 24) & 15;
10750 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10751
10752 switch (op) {
10753 case FUTEX_OP_SET:
10754 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10755 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10756 break;
10757 case FUTEX_OP_ADD:
10758 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10759 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10760 uaddr, oparg);
10761 break;
10762 case FUTEX_OP_OR:
10763 @@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10764 return ret;
10765 }
10766
10767 -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10768 +static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
10769 int newval)
10770 {
10771
10772 @@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10773 return -ENOSYS;
10774 #endif
10775
10776 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
10777 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10778 return -EFAULT;
10779
10780 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
10781 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
10782 "2:\t.section .fixup, \"ax\"\n"
10783 "3:\tmov %2, %0\n"
10784 "\tjmp 2b\n"
10785 "\t.previous\n"
10786 _ASM_EXTABLE(1b, 3b)
10787 - : "=a" (oldval), "+m" (*uaddr)
10788 + : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
10789 : "i" (-EFAULT), "r" (newval), "0" (oldval)
10790 : "memory"
10791 );
10792 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10793 index ba180d9..3bad351 100644
10794 --- a/arch/x86/include/asm/hw_irq.h
10795 +++ b/arch/x86/include/asm/hw_irq.h
10796 @@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
10797 extern void enable_IO_APIC(void);
10798
10799 /* Statistics */
10800 -extern atomic_t irq_err_count;
10801 -extern atomic_t irq_mis_count;
10802 +extern atomic_unchecked_t irq_err_count;
10803 +extern atomic_unchecked_t irq_mis_count;
10804
10805 /* EISA */
10806 extern void eisa_set_level_irq(unsigned int irq);
10807 diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
10808 index 0b20bbb..4cb1396 100644
10809 --- a/arch/x86/include/asm/i387.h
10810 +++ b/arch/x86/include/asm/i387.h
10811 @@ -60,6 +60,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10812 {
10813 int err;
10814
10815 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10816 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10817 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
10818 +#endif
10819 +
10820 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
10821 "2:\n"
10822 ".section .fixup,\"ax\"\n"
10823 @@ -105,6 +110,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10824 {
10825 int err;
10826
10827 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10828 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10829 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10830 +#endif
10831 +
10832 asm volatile("1: rex64/fxsave (%[fx])\n\t"
10833 "2:\n"
10834 ".section .fixup,\"ax\"\n"
10835 @@ -195,13 +205,8 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10836 }
10837
10838 /* We need a safe address that is cheap to find and that is already
10839 - in L1 during context switch. The best choices are unfortunately
10840 - different for UP and SMP */
10841 -#ifdef CONFIG_SMP
10842 -#define safe_address (__per_cpu_offset[0])
10843 -#else
10844 -#define safe_address (kstat_cpu(0).cpustat.user)
10845 -#endif
10846 + in L1 during context switch. */
10847 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
10848
10849 /*
10850 * These must be called with preempt disabled
10851 @@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void)
10852 struct thread_info *me = current_thread_info();
10853 preempt_disable();
10854 if (me->status & TS_USEDFPU)
10855 - __save_init_fpu(me->task);
10856 + __save_init_fpu(current);
10857 else
10858 clts();
10859 }
10860 diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h
10861 index a299900..15c5410 100644
10862 --- a/arch/x86/include/asm/io_32.h
10863 +++ b/arch/x86/include/asm/io_32.h
10864 @@ -3,6 +3,7 @@
10865
10866 #include <linux/string.h>
10867 #include <linux/compiler.h>
10868 +#include <asm/processor.h>
10869
10870 /*
10871 * This file contains the definitions for the x86 IO instructions
10872 @@ -42,6 +43,17 @@
10873
10874 #ifdef __KERNEL__
10875
10876 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10877 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10878 +{
10879 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10880 +}
10881 +
10882 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10883 +{
10884 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10885 +}
10886 +
10887 #include <asm-generic/iomap.h>
10888
10889 #include <linux/vmalloc.h>
10890 diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h
10891 index 2440678..c158b88 100644
10892 --- a/arch/x86/include/asm/io_64.h
10893 +++ b/arch/x86/include/asm/io_64.h
10894 @@ -140,6 +140,17 @@ __OUTS(l)
10895
10896 #include <linux/vmalloc.h>
10897
10898 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10899 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10900 +{
10901 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10902 +}
10903 +
10904 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10905 +{
10906 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10907 +}
10908 +
10909 #include <asm-generic/iomap.h>
10910
10911 void __memcpy_fromio(void *, unsigned long, unsigned);
10912 diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
10913 index fd6d21b..8b13915 100644
10914 --- a/arch/x86/include/asm/iommu.h
10915 +++ b/arch/x86/include/asm/iommu.h
10916 @@ -3,7 +3,7 @@
10917
10918 extern void pci_iommu_shutdown(void);
10919 extern void no_iommu_init(void);
10920 -extern struct dma_map_ops nommu_dma_ops;
10921 +extern const struct dma_map_ops nommu_dma_ops;
10922 extern int force_iommu, no_iommu;
10923 extern int iommu_detected;
10924 extern int iommu_pass_through;
10925 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10926 index 9e2b952..557206e 100644
10927 --- a/arch/x86/include/asm/irqflags.h
10928 +++ b/arch/x86/include/asm/irqflags.h
10929 @@ -142,6 +142,11 @@ static inline unsigned long __raw_local_irq_save(void)
10930 sti; \
10931 sysexit
10932
10933 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
10934 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10935 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
10936 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10937 +
10938 #else
10939 #define INTERRUPT_RETURN iret
10940 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10941 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10942 index 4fe681d..bb6d40c 100644
10943 --- a/arch/x86/include/asm/kprobes.h
10944 +++ b/arch/x86/include/asm/kprobes.h
10945 @@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
10946 #define BREAKPOINT_INSTRUCTION 0xcc
10947 #define RELATIVEJUMP_INSTRUCTION 0xe9
10948 #define MAX_INSN_SIZE 16
10949 -#define MAX_STACK_SIZE 64
10950 -#define MIN_STACK_SIZE(ADDR) \
10951 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10952 - THREAD_SIZE - (unsigned long)(ADDR))) \
10953 - ? (MAX_STACK_SIZE) \
10954 - : (((unsigned long)current_thread_info()) + \
10955 - THREAD_SIZE - (unsigned long)(ADDR)))
10956 +#define MAX_STACK_SIZE 64UL
10957 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10958
10959 #define flush_insn_slot(p) do { } while (0)
10960
10961 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10962 index 08bc2ff..2e88d1f 100644
10963 --- a/arch/x86/include/asm/kvm_host.h
10964 +++ b/arch/x86/include/asm/kvm_host.h
10965 @@ -534,9 +534,9 @@ struct kvm_x86_ops {
10966 bool (*gb_page_enable)(void);
10967
10968 const struct trace_print_flags *exit_reasons_str;
10969 -};
10970 +} __do_const;
10971
10972 -extern struct kvm_x86_ops *kvm_x86_ops;
10973 +extern const struct kvm_x86_ops *kvm_x86_ops;
10974
10975 int kvm_mmu_module_init(void);
10976 void kvm_mmu_module_exit(void);
10977 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10978 index 47b9b6f..815aaa1 100644
10979 --- a/arch/x86/include/asm/local.h
10980 +++ b/arch/x86/include/asm/local.h
10981 @@ -18,26 +18,58 @@ typedef struct {
10982
10983 static inline void local_inc(local_t *l)
10984 {
10985 - asm volatile(_ASM_INC "%0"
10986 + asm volatile(_ASM_INC "%0\n"
10987 +
10988 +#ifdef CONFIG_PAX_REFCOUNT
10989 + "jno 0f\n"
10990 + _ASM_DEC "%0\n"
10991 + "int $4\n0:\n"
10992 + _ASM_EXTABLE(0b, 0b)
10993 +#endif
10994 +
10995 : "+m" (l->a.counter));
10996 }
10997
10998 static inline void local_dec(local_t *l)
10999 {
11000 - asm volatile(_ASM_DEC "%0"
11001 + asm volatile(_ASM_DEC "%0\n"
11002 +
11003 +#ifdef CONFIG_PAX_REFCOUNT
11004 + "jno 0f\n"
11005 + _ASM_INC "%0\n"
11006 + "int $4\n0:\n"
11007 + _ASM_EXTABLE(0b, 0b)
11008 +#endif
11009 +
11010 : "+m" (l->a.counter));
11011 }
11012
11013 static inline void local_add(long i, local_t *l)
11014 {
11015 - asm volatile(_ASM_ADD "%1,%0"
11016 + asm volatile(_ASM_ADD "%1,%0\n"
11017 +
11018 +#ifdef CONFIG_PAX_REFCOUNT
11019 + "jno 0f\n"
11020 + _ASM_SUB "%1,%0\n"
11021 + "int $4\n0:\n"
11022 + _ASM_EXTABLE(0b, 0b)
11023 +#endif
11024 +
11025 : "+m" (l->a.counter)
11026 : "ir" (i));
11027 }
11028
11029 static inline void local_sub(long i, local_t *l)
11030 {
11031 - asm volatile(_ASM_SUB "%1,%0"
11032 + asm volatile(_ASM_SUB "%1,%0\n"
11033 +
11034 +#ifdef CONFIG_PAX_REFCOUNT
11035 + "jno 0f\n"
11036 + _ASM_ADD "%1,%0\n"
11037 + "int $4\n0:\n"
11038 + _ASM_EXTABLE(0b, 0b)
11039 +#endif
11040 +
11041 : "+m" (l->a.counter)
11042 : "ir" (i));
11043 }
11044 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
11045 {
11046 unsigned char c;
11047
11048 - asm volatile(_ASM_SUB "%2,%0; sete %1"
11049 + asm volatile(_ASM_SUB "%2,%0\n"
11050 +
11051 +#ifdef CONFIG_PAX_REFCOUNT
11052 + "jno 0f\n"
11053 + _ASM_ADD "%2,%0\n"
11054 + "int $4\n0:\n"
11055 + _ASM_EXTABLE(0b, 0b)
11056 +#endif
11057 +
11058 + "sete %1\n"
11059 : "+m" (l->a.counter), "=qm" (c)
11060 : "ir" (i) : "memory");
11061 return c;
11062 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
11063 {
11064 unsigned char c;
11065
11066 - asm volatile(_ASM_DEC "%0; sete %1"
11067 + asm volatile(_ASM_DEC "%0\n"
11068 +
11069 +#ifdef CONFIG_PAX_REFCOUNT
11070 + "jno 0f\n"
11071 + _ASM_INC "%0\n"
11072 + "int $4\n0:\n"
11073 + _ASM_EXTABLE(0b, 0b)
11074 +#endif
11075 +
11076 + "sete %1\n"
11077 : "+m" (l->a.counter), "=qm" (c)
11078 : : "memory");
11079 return c != 0;
11080 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
11081 {
11082 unsigned char c;
11083
11084 - asm volatile(_ASM_INC "%0; sete %1"
11085 + asm volatile(_ASM_INC "%0\n"
11086 +
11087 +#ifdef CONFIG_PAX_REFCOUNT
11088 + "jno 0f\n"
11089 + _ASM_DEC "%0\n"
11090 + "int $4\n0:\n"
11091 + _ASM_EXTABLE(0b, 0b)
11092 +#endif
11093 +
11094 + "sete %1\n"
11095 : "+m" (l->a.counter), "=qm" (c)
11096 : : "memory");
11097 return c != 0;
11098 @@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
11099 {
11100 unsigned char c;
11101
11102 - asm volatile(_ASM_ADD "%2,%0; sets %1"
11103 + asm volatile(_ASM_ADD "%2,%0\n"
11104 +
11105 +#ifdef CONFIG_PAX_REFCOUNT
11106 + "jno 0f\n"
11107 + _ASM_SUB "%2,%0\n"
11108 + "int $4\n0:\n"
11109 + _ASM_EXTABLE(0b, 0b)
11110 +#endif
11111 +
11112 + "sets %1\n"
11113 : "+m" (l->a.counter), "=qm" (c)
11114 : "ir" (i) : "memory");
11115 return c;
11116 @@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
11117 #endif
11118 /* Modern 486+ processor */
11119 __i = i;
11120 - asm volatile(_ASM_XADD "%0, %1;"
11121 + asm volatile(_ASM_XADD "%0, %1\n"
11122 +
11123 +#ifdef CONFIG_PAX_REFCOUNT
11124 + "jno 0f\n"
11125 + _ASM_MOV "%0,%1\n"
11126 + "int $4\n0:\n"
11127 + _ASM_EXTABLE(0b, 0b)
11128 +#endif
11129 +
11130 : "+r" (i), "+m" (l->a.counter)
11131 : : "memory");
11132 return i + __i;
11133 diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
11134 index ef51b50..514ba37 100644
11135 --- a/arch/x86/include/asm/microcode.h
11136 +++ b/arch/x86/include/asm/microcode.h
11137 @@ -12,13 +12,13 @@ struct device;
11138 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
11139
11140 struct microcode_ops {
11141 - enum ucode_state (*request_microcode_user) (int cpu,
11142 + enum ucode_state (* const request_microcode_user) (int cpu,
11143 const void __user *buf, size_t size);
11144
11145 - enum ucode_state (*request_microcode_fw) (int cpu,
11146 + enum ucode_state (* const request_microcode_fw) (int cpu,
11147 struct device *device);
11148
11149 - void (*microcode_fini_cpu) (int cpu);
11150 + void (* const microcode_fini_cpu) (int cpu);
11151
11152 /*
11153 * The generic 'microcode_core' part guarantees that
11154 @@ -38,18 +38,18 @@ struct ucode_cpu_info {
11155 extern struct ucode_cpu_info ucode_cpu_info[];
11156
11157 #ifdef CONFIG_MICROCODE_INTEL
11158 -extern struct microcode_ops * __init init_intel_microcode(void);
11159 +extern const struct microcode_ops * __init init_intel_microcode(void);
11160 #else
11161 -static inline struct microcode_ops * __init init_intel_microcode(void)
11162 +static inline const struct microcode_ops * __init init_intel_microcode(void)
11163 {
11164 return NULL;
11165 }
11166 #endif /* CONFIG_MICROCODE_INTEL */
11167
11168 #ifdef CONFIG_MICROCODE_AMD
11169 -extern struct microcode_ops * __init init_amd_microcode(void);
11170 +extern const struct microcode_ops * __init init_amd_microcode(void);
11171 #else
11172 -static inline struct microcode_ops * __init init_amd_microcode(void)
11173 +static inline const struct microcode_ops * __init init_amd_microcode(void)
11174 {
11175 return NULL;
11176 }
11177 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
11178 index 593e51d..fa69c9a 100644
11179 --- a/arch/x86/include/asm/mman.h
11180 +++ b/arch/x86/include/asm/mman.h
11181 @@ -5,4 +5,14 @@
11182
11183 #include <asm-generic/mman.h>
11184
11185 +#ifdef __KERNEL__
11186 +#ifndef __ASSEMBLY__
11187 +#ifdef CONFIG_X86_32
11188 +#define arch_mmap_check i386_mmap_check
11189 +int i386_mmap_check(unsigned long addr, unsigned long len,
11190 + unsigned long flags);
11191 +#endif
11192 +#endif
11193 +#endif
11194 +
11195 #endif /* _ASM_X86_MMAN_H */
11196 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
11197 index 80a1dee..239c67d 100644
11198 --- a/arch/x86/include/asm/mmu.h
11199 +++ b/arch/x86/include/asm/mmu.h
11200 @@ -9,10 +9,23 @@
11201 * we put the segment information here.
11202 */
11203 typedef struct {
11204 - void *ldt;
11205 + struct desc_struct *ldt;
11206 int size;
11207 struct mutex lock;
11208 - void *vdso;
11209 + unsigned long vdso;
11210 +
11211 +#ifdef CONFIG_X86_32
11212 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
11213 + unsigned long user_cs_base;
11214 + unsigned long user_cs_limit;
11215 +
11216 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
11217 + cpumask_t cpu_user_cs_mask;
11218 +#endif
11219 +
11220 +#endif
11221 +#endif
11222 +
11223 } mm_context_t;
11224
11225 #ifdef CONFIG_SMP
11226 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
11227 index 8b5393e..8143173 100644
11228 --- a/arch/x86/include/asm/mmu_context.h
11229 +++ b/arch/x86/include/asm/mmu_context.h
11230 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
11231
11232 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
11233 {
11234 +
11235 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11236 + unsigned int i;
11237 + pgd_t *pgd;
11238 +
11239 + pax_open_kernel();
11240 + pgd = get_cpu_pgd(smp_processor_id());
11241 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
11242 + set_pgd_batched(pgd+i, native_make_pgd(0));
11243 + pax_close_kernel();
11244 +#endif
11245 +
11246 #ifdef CONFIG_SMP
11247 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
11248 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
11249 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11250 struct task_struct *tsk)
11251 {
11252 unsigned cpu = smp_processor_id();
11253 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
11254 + int tlbstate = TLBSTATE_OK;
11255 +#endif
11256
11257 if (likely(prev != next)) {
11258 #ifdef CONFIG_SMP
11259 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11260 + tlbstate = percpu_read(cpu_tlbstate.state);
11261 +#endif
11262 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
11263 percpu_write(cpu_tlbstate.active_mm, next);
11264 #endif
11265 cpumask_set_cpu(cpu, mm_cpumask(next));
11266
11267 /* Re-load page tables */
11268 +#ifdef CONFIG_PAX_PER_CPU_PGD
11269 + pax_open_kernel();
11270 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
11271 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
11272 + pax_close_kernel();
11273 + load_cr3(get_cpu_pgd(cpu));
11274 +#else
11275 load_cr3(next->pgd);
11276 +#endif
11277
11278 /* stop flush ipis for the previous mm */
11279 cpumask_clear_cpu(cpu, mm_cpumask(prev));
11280 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11281 */
11282 if (unlikely(prev->context.ldt != next->context.ldt))
11283 load_LDT_nolock(&next->context);
11284 - }
11285 +
11286 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
11287 + if (!nx_enabled) {
11288 + smp_mb__before_clear_bit();
11289 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
11290 + smp_mb__after_clear_bit();
11291 + cpu_set(cpu, next->context.cpu_user_cs_mask);
11292 + }
11293 +#endif
11294 +
11295 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11296 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
11297 + prev->context.user_cs_limit != next->context.user_cs_limit))
11298 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11299 #ifdef CONFIG_SMP
11300 + else if (unlikely(tlbstate != TLBSTATE_OK))
11301 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11302 +#endif
11303 +#endif
11304 +
11305 + }
11306 else {
11307 +
11308 +#ifdef CONFIG_PAX_PER_CPU_PGD
11309 + pax_open_kernel();
11310 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
11311 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
11312 + pax_close_kernel();
11313 + load_cr3(get_cpu_pgd(cpu));
11314 +#endif
11315 +
11316 +#ifdef CONFIG_SMP
11317 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
11318 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
11319
11320 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11321 * tlb flush IPI delivery. We must reload CR3
11322 * to make sure to use no freed page tables.
11323 */
11324 +
11325 +#ifndef CONFIG_PAX_PER_CPU_PGD
11326 load_cr3(next->pgd);
11327 +#endif
11328 +
11329 load_LDT_nolock(&next->context);
11330 +
11331 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
11332 + if (!nx_enabled)
11333 + cpu_set(cpu, next->context.cpu_user_cs_mask);
11334 +#endif
11335 +
11336 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11337 +#ifdef CONFIG_PAX_PAGEEXEC
11338 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
11339 +#endif
11340 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11341 +#endif
11342 +
11343 }
11344 +#endif
11345 }
11346 -#endif
11347 }
11348
11349 #define activate_mm(prev, next) \
11350 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
11351 index 3e2ce58..caaf478 100644
11352 --- a/arch/x86/include/asm/module.h
11353 +++ b/arch/x86/include/asm/module.h
11354 @@ -5,6 +5,7 @@
11355
11356 #ifdef CONFIG_X86_64
11357 /* X86_64 does not define MODULE_PROC_FAMILY */
11358 +#define MODULE_PROC_FAMILY ""
11359 #elif defined CONFIG_M386
11360 #define MODULE_PROC_FAMILY "386 "
11361 #elif defined CONFIG_M486
11362 @@ -59,13 +60,26 @@
11363 #error unknown processor family
11364 #endif
11365
11366 -#ifdef CONFIG_X86_32
11367 -# ifdef CONFIG_4KSTACKS
11368 -# define MODULE_STACKSIZE "4KSTACKS "
11369 -# else
11370 -# define MODULE_STACKSIZE ""
11371 -# endif
11372 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
11373 +#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
11374 +#define MODULE_STACKSIZE "4KSTACKS "
11375 +#else
11376 +#define MODULE_STACKSIZE ""
11377 #endif
11378
11379 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
11380 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
11381 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
11382 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
11383 +#else
11384 +#define MODULE_PAX_KERNEXEC ""
11385 +#endif
11386 +
11387 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11388 +#define MODULE_PAX_UDEREF "UDEREF "
11389 +#else
11390 +#define MODULE_PAX_UDEREF ""
11391 +#endif
11392 +
11393 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
11394 +
11395 #endif /* _ASM_X86_MODULE_H */
11396 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
11397 index 7639dbf..e08a58c 100644
11398 --- a/arch/x86/include/asm/page_64_types.h
11399 +++ b/arch/x86/include/asm/page_64_types.h
11400 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
11401
11402 /* duplicated to the one in bootmem.h */
11403 extern unsigned long max_pfn;
11404 -extern unsigned long phys_base;
11405 +extern const unsigned long phys_base;
11406
11407 extern unsigned long __phys_addr(unsigned long);
11408 #define __phys_reloc_hide(x) (x)
11409 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
11410 index efb3899..ef30687 100644
11411 --- a/arch/x86/include/asm/paravirt.h
11412 +++ b/arch/x86/include/asm/paravirt.h
11413 @@ -648,6 +648,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
11414 val);
11415 }
11416
11417 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11418 +{
11419 + pgdval_t val = native_pgd_val(pgd);
11420 +
11421 + if (sizeof(pgdval_t) > sizeof(long))
11422 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
11423 + val, (u64)val >> 32);
11424 + else
11425 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
11426 + val);
11427 +}
11428 +
11429 static inline void pgd_clear(pgd_t *pgdp)
11430 {
11431 set_pgd(pgdp, __pgd(0));
11432 @@ -729,6 +741,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
11433 pv_mmu_ops.set_fixmap(idx, phys, flags);
11434 }
11435
11436 +#ifdef CONFIG_PAX_KERNEXEC
11437 +static inline unsigned long pax_open_kernel(void)
11438 +{
11439 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
11440 +}
11441 +
11442 +static inline unsigned long pax_close_kernel(void)
11443 +{
11444 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
11445 +}
11446 +#else
11447 +static inline unsigned long pax_open_kernel(void) { return 0; }
11448 +static inline unsigned long pax_close_kernel(void) { return 0; }
11449 +#endif
11450 +
11451 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
11452
11453 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
11454 @@ -945,7 +972,7 @@ extern void default_banner(void);
11455
11456 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
11457 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
11458 -#define PARA_INDIRECT(addr) *%cs:addr
11459 +#define PARA_INDIRECT(addr) *%ss:addr
11460 #endif
11461
11462 #define INTERRUPT_RETURN \
11463 @@ -1022,6 +1049,21 @@ extern void default_banner(void);
11464 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
11465 CLBR_NONE, \
11466 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
11467 +
11468 +#define GET_CR0_INTO_RDI \
11469 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
11470 + mov %rax,%rdi
11471 +
11472 +#define SET_RDI_INTO_CR0 \
11473 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11474 +
11475 +#define GET_CR3_INTO_RDI \
11476 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
11477 + mov %rax,%rdi
11478 +
11479 +#define SET_RDI_INTO_CR3 \
11480 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
11481 +
11482 #endif /* CONFIG_X86_32 */
11483
11484 #endif /* __ASSEMBLY__ */
11485 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
11486 index 9357473..aeb2de5 100644
11487 --- a/arch/x86/include/asm/paravirt_types.h
11488 +++ b/arch/x86/include/asm/paravirt_types.h
11489 @@ -78,19 +78,19 @@ struct pv_init_ops {
11490 */
11491 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
11492 unsigned long addr, unsigned len);
11493 -};
11494 +} __no_const;
11495
11496
11497 struct pv_lazy_ops {
11498 /* Set deferred update mode, used for batching operations. */
11499 void (*enter)(void);
11500 void (*leave)(void);
11501 -};
11502 +} __no_const;
11503
11504 struct pv_time_ops {
11505 unsigned long long (*sched_clock)(void);
11506 unsigned long (*get_tsc_khz)(void);
11507 -};
11508 +} __no_const;
11509
11510 struct pv_cpu_ops {
11511 /* hooks for various privileged instructions */
11512 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
11513
11514 void (*start_context_switch)(struct task_struct *prev);
11515 void (*end_context_switch)(struct task_struct *next);
11516 -};
11517 +} __no_const;
11518
11519 struct pv_irq_ops {
11520 /*
11521 @@ -217,7 +217,7 @@ struct pv_apic_ops {
11522 unsigned long start_eip,
11523 unsigned long start_esp);
11524 #endif
11525 -};
11526 +} __no_const;
11527
11528 struct pv_mmu_ops {
11529 unsigned long (*read_cr2)(void);
11530 @@ -301,6 +301,7 @@ struct pv_mmu_ops {
11531 struct paravirt_callee_save make_pud;
11532
11533 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
11534 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
11535 #endif /* PAGETABLE_LEVELS == 4 */
11536 #endif /* PAGETABLE_LEVELS >= 3 */
11537
11538 @@ -316,6 +317,12 @@ struct pv_mmu_ops {
11539 an mfn. We can tell which is which from the index. */
11540 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
11541 phys_addr_t phys, pgprot_t flags);
11542 +
11543 +#ifdef CONFIG_PAX_KERNEXEC
11544 + unsigned long (*pax_open_kernel)(void);
11545 + unsigned long (*pax_close_kernel)(void);
11546 +#endif
11547 +
11548 };
11549
11550 struct raw_spinlock;
11551 @@ -326,7 +333,7 @@ struct pv_lock_ops {
11552 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
11553 int (*spin_trylock)(struct raw_spinlock *lock);
11554 void (*spin_unlock)(struct raw_spinlock *lock);
11555 -};
11556 +} __no_const;
11557
11558 /* This contains all the paravirt structures: we get a convenient
11559 * number for each function using the offset which we use to indicate
11560 diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
11561 index b399988..3f47c38 100644
11562 --- a/arch/x86/include/asm/pci_x86.h
11563 +++ b/arch/x86/include/asm/pci_x86.h
11564 @@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct pci_dev *dev);
11565 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
11566
11567 struct pci_raw_ops {
11568 - int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
11569 + int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
11570 int reg, int len, u32 *val);
11571 - int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
11572 + int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
11573 int reg, int len, u32 val);
11574 };
11575
11576 -extern struct pci_raw_ops *raw_pci_ops;
11577 -extern struct pci_raw_ops *raw_pci_ext_ops;
11578 +extern const struct pci_raw_ops *raw_pci_ops;
11579 +extern const struct pci_raw_ops *raw_pci_ext_ops;
11580
11581 -extern struct pci_raw_ops pci_direct_conf1;
11582 +extern const struct pci_raw_ops pci_direct_conf1;
11583 extern bool port_cf9_safe;
11584
11585 /* arch_initcall level */
11586 diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
11587 index b65a36d..50345a4 100644
11588 --- a/arch/x86/include/asm/percpu.h
11589 +++ b/arch/x86/include/asm/percpu.h
11590 @@ -78,6 +78,7 @@ do { \
11591 if (0) { \
11592 T__ tmp__; \
11593 tmp__ = (val); \
11594 + (void)tmp__; \
11595 } \
11596 switch (sizeof(var)) { \
11597 case 1: \
11598 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
11599 index 271de94..ef944d6 100644
11600 --- a/arch/x86/include/asm/pgalloc.h
11601 +++ b/arch/x86/include/asm/pgalloc.h
11602 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
11603 pmd_t *pmd, pte_t *pte)
11604 {
11605 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11606 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
11607 +}
11608 +
11609 +static inline void pmd_populate_user(struct mm_struct *mm,
11610 + pmd_t *pmd, pte_t *pte)
11611 +{
11612 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11613 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
11614 }
11615
11616 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
11617 index 2334982..70bc412 100644
11618 --- a/arch/x86/include/asm/pgtable-2level.h
11619 +++ b/arch/x86/include/asm/pgtable-2level.h
11620 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
11621
11622 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11623 {
11624 + pax_open_kernel();
11625 *pmdp = pmd;
11626 + pax_close_kernel();
11627 }
11628
11629 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11630 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
11631 index 33927d2..ccde329 100644
11632 --- a/arch/x86/include/asm/pgtable-3level.h
11633 +++ b/arch/x86/include/asm/pgtable-3level.h
11634 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11635
11636 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11637 {
11638 + pax_open_kernel();
11639 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
11640 + pax_close_kernel();
11641 }
11642
11643 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11644 {
11645 + pax_open_kernel();
11646 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
11647 + pax_close_kernel();
11648 }
11649
11650 /*
11651 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
11652 index af6fd36..867ff74 100644
11653 --- a/arch/x86/include/asm/pgtable.h
11654 +++ b/arch/x86/include/asm/pgtable.h
11655 @@ -39,6 +39,7 @@ extern struct list_head pgd_list;
11656
11657 #ifndef __PAGETABLE_PUD_FOLDED
11658 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
11659 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
11660 #define pgd_clear(pgd) native_pgd_clear(pgd)
11661 #endif
11662
11663 @@ -74,12 +75,51 @@ extern struct list_head pgd_list;
11664
11665 #define arch_end_context_switch(prev) do {} while(0)
11666
11667 +#define pax_open_kernel() native_pax_open_kernel()
11668 +#define pax_close_kernel() native_pax_close_kernel()
11669 #endif /* CONFIG_PARAVIRT */
11670
11671 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
11672 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
11673 +
11674 +#ifdef CONFIG_PAX_KERNEXEC
11675 +static inline unsigned long native_pax_open_kernel(void)
11676 +{
11677 + unsigned long cr0;
11678 +
11679 + preempt_disable();
11680 + barrier();
11681 + cr0 = read_cr0() ^ X86_CR0_WP;
11682 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
11683 + write_cr0(cr0);
11684 + return cr0 ^ X86_CR0_WP;
11685 +}
11686 +
11687 +static inline unsigned long native_pax_close_kernel(void)
11688 +{
11689 + unsigned long cr0;
11690 +
11691 + cr0 = read_cr0() ^ X86_CR0_WP;
11692 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
11693 + write_cr0(cr0);
11694 + barrier();
11695 + preempt_enable_no_resched();
11696 + return cr0 ^ X86_CR0_WP;
11697 +}
11698 +#else
11699 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
11700 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
11701 +#endif
11702 +
11703 /*
11704 * The following only work if pte_present() is true.
11705 * Undefined behaviour if not..
11706 */
11707 +static inline int pte_user(pte_t pte)
11708 +{
11709 + return pte_val(pte) & _PAGE_USER;
11710 +}
11711 +
11712 static inline int pte_dirty(pte_t pte)
11713 {
11714 return pte_flags(pte) & _PAGE_DIRTY;
11715 @@ -167,9 +207,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11716 return pte_clear_flags(pte, _PAGE_RW);
11717 }
11718
11719 +static inline pte_t pte_mkread(pte_t pte)
11720 +{
11721 + return __pte(pte_val(pte) | _PAGE_USER);
11722 +}
11723 +
11724 static inline pte_t pte_mkexec(pte_t pte)
11725 {
11726 - return pte_clear_flags(pte, _PAGE_NX);
11727 +#ifdef CONFIG_X86_PAE
11728 + if (__supported_pte_mask & _PAGE_NX)
11729 + return pte_clear_flags(pte, _PAGE_NX);
11730 + else
11731 +#endif
11732 + return pte_set_flags(pte, _PAGE_USER);
11733 +}
11734 +
11735 +static inline pte_t pte_exprotect(pte_t pte)
11736 +{
11737 +#ifdef CONFIG_X86_PAE
11738 + if (__supported_pte_mask & _PAGE_NX)
11739 + return pte_set_flags(pte, _PAGE_NX);
11740 + else
11741 +#endif
11742 + return pte_clear_flags(pte, _PAGE_USER);
11743 }
11744
11745 static inline pte_t pte_mkdirty(pte_t pte)
11746 @@ -302,6 +362,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11747 #endif
11748
11749 #ifndef __ASSEMBLY__
11750 +
11751 +#ifdef CONFIG_PAX_PER_CPU_PGD
11752 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11753 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11754 +{
11755 + return cpu_pgd[cpu];
11756 +}
11757 +#endif
11758 +
11759 #include <linux/mm_types.h>
11760
11761 static inline int pte_none(pte_t pte)
11762 @@ -472,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11763
11764 static inline int pgd_bad(pgd_t pgd)
11765 {
11766 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11767 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11768 }
11769
11770 static inline int pgd_none(pgd_t pgd)
11771 @@ -495,7 +564,12 @@ static inline int pgd_none(pgd_t pgd)
11772 * pgd_offset() returns a (pgd_t *)
11773 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11774 */
11775 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11776 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11777 +
11778 +#ifdef CONFIG_PAX_PER_CPU_PGD
11779 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11780 +#endif
11781 +
11782 /*
11783 * a shortcut which implies the use of the kernel's pgd, instead
11784 * of a process's
11785 @@ -506,6 +580,20 @@ static inline int pgd_none(pgd_t pgd)
11786 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11787 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11788
11789 +#ifdef CONFIG_X86_32
11790 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11791 +#else
11792 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11793 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11794 +
11795 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11796 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11797 +#else
11798 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11799 +#endif
11800 +
11801 +#endif
11802 +
11803 #ifndef __ASSEMBLY__
11804
11805 extern int direct_gbpages;
11806 @@ -611,11 +699,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
11807 * dst and src can be on the same page, but the range must not overlap,
11808 * and must not cross a page boundary.
11809 */
11810 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11811 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11812 {
11813 - memcpy(dst, src, count * sizeof(pgd_t));
11814 + pax_open_kernel();
11815 + while (count--)
11816 + *dst++ = *src++;
11817 + pax_close_kernel();
11818 }
11819
11820 +#ifdef CONFIG_PAX_PER_CPU_PGD
11821 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11822 +#endif
11823 +
11824 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11825 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11826 +#else
11827 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
11828 +#endif
11829
11830 #include <asm-generic/pgtable.h>
11831 #endif /* __ASSEMBLY__ */
11832 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11833 index 750f1bf..971e839 100644
11834 --- a/arch/x86/include/asm/pgtable_32.h
11835 +++ b/arch/x86/include/asm/pgtable_32.h
11836 @@ -26,9 +26,6 @@
11837 struct mm_struct;
11838 struct vm_area_struct;
11839
11840 -extern pgd_t swapper_pg_dir[1024];
11841 -extern pgd_t trampoline_pg_dir[1024];
11842 -
11843 static inline void pgtable_cache_init(void) { }
11844 static inline void check_pgt_cache(void) { }
11845 void paging_init(void);
11846 @@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11847 # include <asm/pgtable-2level.h>
11848 #endif
11849
11850 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11851 +extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
11852 +#ifdef CONFIG_X86_PAE
11853 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11854 +#endif
11855 +
11856 #if defined(CONFIG_HIGHPTE)
11857 #define __KM_PTE \
11858 (in_nmi() ? KM_NMI_PTE : \
11859 @@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11860 /* Clear a kernel PTE and flush it from the TLB */
11861 #define kpte_clear_flush(ptep, vaddr) \
11862 do { \
11863 + pax_open_kernel(); \
11864 pte_clear(&init_mm, (vaddr), (ptep)); \
11865 + pax_close_kernel(); \
11866 __flush_tlb_one((vaddr)); \
11867 } while (0)
11868
11869 @@ -85,6 +90,9 @@ do { \
11870
11871 #endif /* !__ASSEMBLY__ */
11872
11873 +#define HAVE_ARCH_UNMAPPED_AREA
11874 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11875 +
11876 /*
11877 * kern_addr_valid() is (1) for FLATMEM and (0) for
11878 * SPARSEMEM and DISCONTIGMEM
11879 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11880 index 5e67c15..12d5c47 100644
11881 --- a/arch/x86/include/asm/pgtable_32_types.h
11882 +++ b/arch/x86/include/asm/pgtable_32_types.h
11883 @@ -8,7 +8,7 @@
11884 */
11885 #ifdef CONFIG_X86_PAE
11886 # include <asm/pgtable-3level_types.h>
11887 -# define PMD_SIZE (1UL << PMD_SHIFT)
11888 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11889 # define PMD_MASK (~(PMD_SIZE - 1))
11890 #else
11891 # include <asm/pgtable-2level_types.h>
11892 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11893 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11894 #endif
11895
11896 +#ifdef CONFIG_PAX_KERNEXEC
11897 +#ifndef __ASSEMBLY__
11898 +extern unsigned char MODULES_EXEC_VADDR[];
11899 +extern unsigned char MODULES_EXEC_END[];
11900 +#endif
11901 +#include <asm/boot.h>
11902 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11903 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11904 +#else
11905 +#define ktla_ktva(addr) (addr)
11906 +#define ktva_ktla(addr) (addr)
11907 +#endif
11908 +
11909 #define MODULES_VADDR VMALLOC_START
11910 #define MODULES_END VMALLOC_END
11911 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11912 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11913 index c57a301..6b414ff 100644
11914 --- a/arch/x86/include/asm/pgtable_64.h
11915 +++ b/arch/x86/include/asm/pgtable_64.h
11916 @@ -16,10 +16,14 @@
11917
11918 extern pud_t level3_kernel_pgt[512];
11919 extern pud_t level3_ident_pgt[512];
11920 +extern pud_t level3_vmalloc_start_pgt[512];
11921 +extern pud_t level3_vmalloc_end_pgt[512];
11922 +extern pud_t level3_vmemmap_pgt[512];
11923 +extern pud_t level2_vmemmap_pgt[512];
11924 extern pmd_t level2_kernel_pgt[512];
11925 extern pmd_t level2_fixmap_pgt[512];
11926 -extern pmd_t level2_ident_pgt[512];
11927 -extern pgd_t init_level4_pgt[];
11928 +extern pmd_t level2_ident_pgt[512*2];
11929 +extern pgd_t init_level4_pgt[512];
11930
11931 #define swapper_pg_dir init_level4_pgt
11932
11933 @@ -74,7 +78,9 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
11934
11935 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11936 {
11937 + pax_open_kernel();
11938 *pmdp = pmd;
11939 + pax_close_kernel();
11940 }
11941
11942 static inline void native_pmd_clear(pmd_t *pmd)
11943 @@ -94,6 +100,13 @@ static inline void native_pud_clear(pud_t *pud)
11944
11945 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11946 {
11947 + pax_open_kernel();
11948 + *pgdp = pgd;
11949 + pax_close_kernel();
11950 +}
11951 +
11952 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11953 +{
11954 *pgdp = pgd;
11955 }
11956
11957 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11958 index 766ea16..5b96cb3 100644
11959 --- a/arch/x86/include/asm/pgtable_64_types.h
11960 +++ b/arch/x86/include/asm/pgtable_64_types.h
11961 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11962 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11963 #define MODULES_END _AC(0xffffffffff000000, UL)
11964 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11965 +#define MODULES_EXEC_VADDR MODULES_VADDR
11966 +#define MODULES_EXEC_END MODULES_END
11967 +
11968 +#define ktla_ktva(addr) (addr)
11969 +#define ktva_ktla(addr) (addr)
11970
11971 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11972 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11973 index d1f4a76..2f46ba1 100644
11974 --- a/arch/x86/include/asm/pgtable_types.h
11975 +++ b/arch/x86/include/asm/pgtable_types.h
11976 @@ -16,12 +16,11 @@
11977 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11978 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11979 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11980 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11981 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11982 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11983 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11984 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11985 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11986 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11987 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11988 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11989
11990 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11991 @@ -39,7 +38,6 @@
11992 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11993 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11994 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11995 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11996 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11997 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11998 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11999 @@ -55,8 +53,10 @@
12000
12001 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
12002 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
12003 -#else
12004 +#elif defined(CONFIG_KMEMCHECK)
12005 #define _PAGE_NX (_AT(pteval_t, 0))
12006 +#else
12007 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
12008 #endif
12009
12010 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
12011 @@ -93,6 +93,9 @@
12012 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
12013 _PAGE_ACCESSED)
12014
12015 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
12016 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
12017 +
12018 #define __PAGE_KERNEL_EXEC \
12019 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
12020 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
12021 @@ -103,8 +106,8 @@
12022 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
12023 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
12024 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
12025 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
12026 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
12027 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
12028 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
12029 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
12030 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
12031 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
12032 @@ -163,8 +166,8 @@
12033 * bits are combined, this will alow user to access the high address mapped
12034 * VDSO in the presence of CONFIG_COMPAT_VDSO
12035 */
12036 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
12037 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
12038 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
12039 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
12040 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
12041 #endif
12042
12043 @@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
12044 {
12045 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
12046 }
12047 +#endif
12048
12049 +#if PAGETABLE_LEVELS == 3
12050 +#include <asm-generic/pgtable-nopud.h>
12051 +#endif
12052 +
12053 +#if PAGETABLE_LEVELS == 2
12054 +#include <asm-generic/pgtable-nopmd.h>
12055 +#endif
12056 +
12057 +#ifndef __ASSEMBLY__
12058 #if PAGETABLE_LEVELS > 3
12059 typedef struct { pudval_t pud; } pud_t;
12060
12061 @@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pud_t pud)
12062 return pud.pud;
12063 }
12064 #else
12065 -#include <asm-generic/pgtable-nopud.h>
12066 -
12067 static inline pudval_t native_pud_val(pud_t pud)
12068 {
12069 return native_pgd_val(pud.pgd);
12070 @@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
12071 return pmd.pmd;
12072 }
12073 #else
12074 -#include <asm-generic/pgtable-nopmd.h>
12075 -
12076 static inline pmdval_t native_pmd_val(pmd_t pmd)
12077 {
12078 return native_pgd_val(pmd.pud.pgd);
12079 @@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
12080
12081 extern pteval_t __supported_pte_mask;
12082 extern void set_nx(void);
12083 +
12084 +#ifdef CONFIG_X86_32
12085 +#ifdef CONFIG_X86_PAE
12086 extern int nx_enabled;
12087 +#else
12088 +#define nx_enabled (0)
12089 +#endif
12090 +#else
12091 +#define nx_enabled (1)
12092 +#endif
12093
12094 #define pgprot_writecombine pgprot_writecombine
12095 extern pgprot_t pgprot_writecombine(pgprot_t prot);
12096 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
12097 index fa04dea..5f823fc 100644
12098 --- a/arch/x86/include/asm/processor.h
12099 +++ b/arch/x86/include/asm/processor.h
12100 @@ -272,7 +272,7 @@ struct tss_struct {
12101
12102 } ____cacheline_aligned;
12103
12104 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
12105 +extern struct tss_struct init_tss[NR_CPUS];
12106
12107 /*
12108 * Save the original ist values for checking stack pointers during debugging
12109 @@ -911,11 +911,18 @@ static inline void spin_lock_prefetch(const void *x)
12110 */
12111 #define TASK_SIZE PAGE_OFFSET
12112 #define TASK_SIZE_MAX TASK_SIZE
12113 +
12114 +#ifdef CONFIG_PAX_SEGMEXEC
12115 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
12116 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
12117 +#else
12118 #define STACK_TOP TASK_SIZE
12119 -#define STACK_TOP_MAX STACK_TOP
12120 +#endif
12121 +
12122 +#define STACK_TOP_MAX TASK_SIZE
12123
12124 #define INIT_THREAD { \
12125 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
12126 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
12127 .vm86_info = NULL, \
12128 .sysenter_cs = __KERNEL_CS, \
12129 .io_bitmap_ptr = NULL, \
12130 @@ -929,7 +936,7 @@ static inline void spin_lock_prefetch(const void *x)
12131 */
12132 #define INIT_TSS { \
12133 .x86_tss = { \
12134 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
12135 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
12136 .ss0 = __KERNEL_DS, \
12137 .ss1 = __KERNEL_CS, \
12138 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
12139 @@ -940,11 +947,7 @@ static inline void spin_lock_prefetch(const void *x)
12140 extern unsigned long thread_saved_pc(struct task_struct *tsk);
12141
12142 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
12143 -#define KSTK_TOP(info) \
12144 -({ \
12145 - unsigned long *__ptr = (unsigned long *)(info); \
12146 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
12147 -})
12148 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
12149
12150 /*
12151 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
12152 @@ -959,7 +962,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
12153 #define task_pt_regs(task) \
12154 ({ \
12155 struct pt_regs *__regs__; \
12156 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
12157 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
12158 __regs__ - 1; \
12159 })
12160
12161 @@ -969,13 +972,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
12162 /*
12163 * User space process size. 47bits minus one guard page.
12164 */
12165 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
12166 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
12167
12168 /* This decides where the kernel will search for a free chunk of vm
12169 * space during mmap's.
12170 */
12171 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
12172 - 0xc0000000 : 0xFFFFe000)
12173 + 0xc0000000 : 0xFFFFf000)
12174
12175 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
12176 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
12177 @@ -986,11 +989,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
12178 #define STACK_TOP_MAX TASK_SIZE_MAX
12179
12180 #define INIT_THREAD { \
12181 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
12182 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
12183 }
12184
12185 #define INIT_TSS { \
12186 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
12187 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
12188 }
12189
12190 /*
12191 @@ -1012,6 +1015,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
12192 */
12193 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
12194
12195 +#ifdef CONFIG_PAX_SEGMEXEC
12196 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
12197 +#endif
12198 +
12199 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
12200
12201 /* Get/set a process' ability to use the timestamp counter instruction */
12202 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
12203 index 0f0d908..f2e3da2 100644
12204 --- a/arch/x86/include/asm/ptrace.h
12205 +++ b/arch/x86/include/asm/ptrace.h
12206 @@ -151,28 +151,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
12207 }
12208
12209 /*
12210 - * user_mode_vm(regs) determines whether a register set came from user mode.
12211 + * user_mode(regs) determines whether a register set came from user mode.
12212 * This is true if V8086 mode was enabled OR if the register set was from
12213 * protected mode with RPL-3 CS value. This tricky test checks that with
12214 * one comparison. Many places in the kernel can bypass this full check
12215 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
12216 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
12217 + * be used.
12218 */
12219 -static inline int user_mode(struct pt_regs *regs)
12220 +static inline int user_mode_novm(struct pt_regs *regs)
12221 {
12222 #ifdef CONFIG_X86_32
12223 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
12224 #else
12225 - return !!(regs->cs & 3);
12226 + return !!(regs->cs & SEGMENT_RPL_MASK);
12227 #endif
12228 }
12229
12230 -static inline int user_mode_vm(struct pt_regs *regs)
12231 +static inline int user_mode(struct pt_regs *regs)
12232 {
12233 #ifdef CONFIG_X86_32
12234 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
12235 USER_RPL;
12236 #else
12237 - return user_mode(regs);
12238 + return user_mode_novm(regs);
12239 #endif
12240 }
12241
12242 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
12243 index 562d4fd..6e39df1 100644
12244 --- a/arch/x86/include/asm/reboot.h
12245 +++ b/arch/x86/include/asm/reboot.h
12246 @@ -6,19 +6,19 @@
12247 struct pt_regs;
12248
12249 struct machine_ops {
12250 - void (*restart)(char *cmd);
12251 - void (*halt)(void);
12252 - void (*power_off)(void);
12253 + void (* __noreturn restart)(char *cmd);
12254 + void (* __noreturn halt)(void);
12255 + void (* __noreturn power_off)(void);
12256 void (*shutdown)(void);
12257 void (*crash_shutdown)(struct pt_regs *);
12258 - void (*emergency_restart)(void);
12259 -};
12260 + void (* __noreturn emergency_restart)(void);
12261 +} __no_const;
12262
12263 extern struct machine_ops machine_ops;
12264
12265 void native_machine_crash_shutdown(struct pt_regs *regs);
12266 void native_machine_shutdown(void);
12267 -void machine_real_restart(const unsigned char *code, int length);
12268 +void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
12269
12270 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
12271 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
12272 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
12273 index 606ede1..dbfff37 100644
12274 --- a/arch/x86/include/asm/rwsem.h
12275 +++ b/arch/x86/include/asm/rwsem.h
12276 @@ -118,6 +118,14 @@ static inline void __down_read(struct rw_semaphore *sem)
12277 {
12278 asm volatile("# beginning down_read\n\t"
12279 LOCK_PREFIX _ASM_INC "(%1)\n\t"
12280 +
12281 +#ifdef CONFIG_PAX_REFCOUNT
12282 + "jno 0f\n"
12283 + LOCK_PREFIX _ASM_DEC "(%1)\n\t"
12284 + "int $4\n0:\n"
12285 + _ASM_EXTABLE(0b, 0b)
12286 +#endif
12287 +
12288 /* adds 0x00000001, returns the old value */
12289 " jns 1f\n"
12290 " call call_rwsem_down_read_failed\n"
12291 @@ -139,6 +147,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
12292 "1:\n\t"
12293 " mov %1,%2\n\t"
12294 " add %3,%2\n\t"
12295 +
12296 +#ifdef CONFIG_PAX_REFCOUNT
12297 + "jno 0f\n"
12298 + "sub %3,%2\n"
12299 + "int $4\n0:\n"
12300 + _ASM_EXTABLE(0b, 0b)
12301 +#endif
12302 +
12303 " jle 2f\n\t"
12304 LOCK_PREFIX " cmpxchg %2,%0\n\t"
12305 " jnz 1b\n\t"
12306 @@ -160,6 +176,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
12307 tmp = RWSEM_ACTIVE_WRITE_BIAS;
12308 asm volatile("# beginning down_write\n\t"
12309 LOCK_PREFIX " xadd %1,(%2)\n\t"
12310 +
12311 +#ifdef CONFIG_PAX_REFCOUNT
12312 + "jno 0f\n"
12313 + "mov %1,(%2)\n"
12314 + "int $4\n0:\n"
12315 + _ASM_EXTABLE(0b, 0b)
12316 +#endif
12317 +
12318 /* subtract 0x0000ffff, returns the old value */
12319 " test %1,%1\n\t"
12320 /* was the count 0 before? */
12321 @@ -198,6 +222,14 @@ static inline void __up_read(struct rw_semaphore *sem)
12322 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
12323 asm volatile("# beginning __up_read\n\t"
12324 LOCK_PREFIX " xadd %1,(%2)\n\t"
12325 +
12326 +#ifdef CONFIG_PAX_REFCOUNT
12327 + "jno 0f\n"
12328 + "mov %1,(%2)\n"
12329 + "int $4\n0:\n"
12330 + _ASM_EXTABLE(0b, 0b)
12331 +#endif
12332 +
12333 /* subtracts 1, returns the old value */
12334 " jns 1f\n\t"
12335 " call call_rwsem_wake\n"
12336 @@ -216,6 +248,14 @@ static inline void __up_write(struct rw_semaphore *sem)
12337 rwsem_count_t tmp;
12338 asm volatile("# beginning __up_write\n\t"
12339 LOCK_PREFIX " xadd %1,(%2)\n\t"
12340 +
12341 +#ifdef CONFIG_PAX_REFCOUNT
12342 + "jno 0f\n"
12343 + "mov %1,(%2)\n"
12344 + "int $4\n0:\n"
12345 + _ASM_EXTABLE(0b, 0b)
12346 +#endif
12347 +
12348 /* tries to transition
12349 0xffff0001 -> 0x00000000 */
12350 " jz 1f\n"
12351 @@ -234,6 +274,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12352 {
12353 asm volatile("# beginning __downgrade_write\n\t"
12354 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
12355 +
12356 +#ifdef CONFIG_PAX_REFCOUNT
12357 + "jno 0f\n"
12358 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
12359 + "int $4\n0:\n"
12360 + _ASM_EXTABLE(0b, 0b)
12361 +#endif
12362 +
12363 /*
12364 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
12365 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
12366 @@ -253,7 +301,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12367 static inline void rwsem_atomic_add(rwsem_count_t delta,
12368 struct rw_semaphore *sem)
12369 {
12370 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
12371 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
12372 +
12373 +#ifdef CONFIG_PAX_REFCOUNT
12374 + "jno 0f\n"
12375 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
12376 + "int $4\n0:\n"
12377 + _ASM_EXTABLE(0b, 0b)
12378 +#endif
12379 +
12380 : "+m" (sem->count)
12381 : "er" (delta));
12382 }
12383 @@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
12384 {
12385 rwsem_count_t tmp = delta;
12386
12387 - asm volatile(LOCK_PREFIX "xadd %0,%1"
12388 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
12389 +
12390 +#ifdef CONFIG_PAX_REFCOUNT
12391 + "jno 0f\n"
12392 + "mov %0,%1\n"
12393 + "int $4\n0:\n"
12394 + _ASM_EXTABLE(0b, 0b)
12395 +#endif
12396 +
12397 : "+r" (tmp), "+m" (sem->count)
12398 : : "memory");
12399
12400 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
12401 index 14e0ed8..7f7dd5e 100644
12402 --- a/arch/x86/include/asm/segment.h
12403 +++ b/arch/x86/include/asm/segment.h
12404 @@ -62,10 +62,15 @@
12405 * 26 - ESPFIX small SS
12406 * 27 - per-cpu [ offset to per-cpu data area ]
12407 * 28 - stack_canary-20 [ for stack protector ]
12408 - * 29 - unused
12409 - * 30 - unused
12410 + * 29 - PCI BIOS CS
12411 + * 30 - PCI BIOS DS
12412 * 31 - TSS for double fault handler
12413 */
12414 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
12415 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
12416 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
12417 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
12418 +
12419 #define GDT_ENTRY_TLS_MIN 6
12420 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
12421
12422 @@ -77,6 +82,8 @@
12423
12424 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
12425
12426 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
12427 +
12428 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
12429
12430 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
12431 @@ -88,7 +95,7 @@
12432 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
12433 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
12434
12435 -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
12436 +#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
12437 #ifdef CONFIG_SMP
12438 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
12439 #else
12440 @@ -102,6 +109,12 @@
12441 #define __KERNEL_STACK_CANARY 0
12442 #endif
12443
12444 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
12445 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
12446 +
12447 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
12448 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
12449 +
12450 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
12451
12452 /*
12453 @@ -139,7 +152,7 @@
12454 */
12455
12456 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
12457 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
12458 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
12459
12460
12461 #else
12462 @@ -163,6 +176,8 @@
12463 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
12464 #define __USER32_DS __USER_DS
12465
12466 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
12467 +
12468 #define GDT_ENTRY_TSS 8 /* needs two entries */
12469 #define GDT_ENTRY_LDT 10 /* needs two entries */
12470 #define GDT_ENTRY_TLS_MIN 12
12471 @@ -183,6 +198,7 @@
12472 #endif
12473
12474 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
12475 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
12476 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
12477 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
12478 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
12479 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
12480 index 4c2f63c..5685db2 100644
12481 --- a/arch/x86/include/asm/smp.h
12482 +++ b/arch/x86/include/asm/smp.h
12483 @@ -24,7 +24,7 @@ extern unsigned int num_processors;
12484 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
12485 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
12486 DECLARE_PER_CPU(u16, cpu_llc_id);
12487 -DECLARE_PER_CPU(int, cpu_number);
12488 +DECLARE_PER_CPU(unsigned int, cpu_number);
12489
12490 static inline struct cpumask *cpu_sibling_mask(int cpu)
12491 {
12492 @@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
12493 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
12494
12495 /* Static state in head.S used to set up a CPU */
12496 -extern struct {
12497 - void *sp;
12498 - unsigned short ss;
12499 -} stack_start;
12500 +extern unsigned long stack_start; /* Initial stack pointer address */
12501
12502 struct smp_ops {
12503 void (*smp_prepare_boot_cpu)(void);
12504 @@ -60,7 +57,7 @@ struct smp_ops {
12505
12506 void (*send_call_func_ipi)(const struct cpumask *mask);
12507 void (*send_call_func_single_ipi)(int cpu);
12508 -};
12509 +} __no_const;
12510
12511 /* Globals due to paravirt */
12512 extern void set_cpu_sibling_map(int cpu);
12513 @@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitdata;
12514 extern int safe_smp_processor_id(void);
12515
12516 #elif defined(CONFIG_X86_64_SMP)
12517 -#define raw_smp_processor_id() (percpu_read(cpu_number))
12518 -
12519 -#define stack_smp_processor_id() \
12520 -({ \
12521 - struct thread_info *ti; \
12522 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
12523 - ti->cpu; \
12524 -})
12525 +#define raw_smp_processor_id() (percpu_read(cpu_number))
12526 +#define stack_smp_processor_id() raw_smp_processor_id()
12527 #define safe_smp_processor_id() smp_processor_id()
12528
12529 #endif
12530 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
12531 index 4e77853..4359783 100644
12532 --- a/arch/x86/include/asm/spinlock.h
12533 +++ b/arch/x86/include/asm/spinlock.h
12534 @@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(raw_rwlock_t *lock)
12535 static inline void __raw_read_lock(raw_rwlock_t *rw)
12536 {
12537 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
12538 +
12539 +#ifdef CONFIG_PAX_REFCOUNT
12540 + "jno 0f\n"
12541 + LOCK_PREFIX " addl $1,(%0)\n"
12542 + "int $4\n0:\n"
12543 + _ASM_EXTABLE(0b, 0b)
12544 +#endif
12545 +
12546 "jns 1f\n"
12547 "call __read_lock_failed\n\t"
12548 "1:\n"
12549 @@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
12550 static inline void __raw_write_lock(raw_rwlock_t *rw)
12551 {
12552 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
12553 +
12554 +#ifdef CONFIG_PAX_REFCOUNT
12555 + "jno 0f\n"
12556 + LOCK_PREFIX " addl %1,(%0)\n"
12557 + "int $4\n0:\n"
12558 + _ASM_EXTABLE(0b, 0b)
12559 +#endif
12560 +
12561 "jz 1f\n"
12562 "call __write_lock_failed\n\t"
12563 "1:\n"
12564 @@ -286,12 +302,29 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
12565
12566 static inline void __raw_read_unlock(raw_rwlock_t *rw)
12567 {
12568 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
12569 + asm volatile(LOCK_PREFIX "incl %0\n"
12570 +
12571 +#ifdef CONFIG_PAX_REFCOUNT
12572 + "jno 0f\n"
12573 + LOCK_PREFIX "decl %0\n"
12574 + "int $4\n0:\n"
12575 + _ASM_EXTABLE(0b, 0b)
12576 +#endif
12577 +
12578 + :"+m" (rw->lock) : : "memory");
12579 }
12580
12581 static inline void __raw_write_unlock(raw_rwlock_t *rw)
12582 {
12583 - asm volatile(LOCK_PREFIX "addl %1, %0"
12584 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
12585 +
12586 +#ifdef CONFIG_PAX_REFCOUNT
12587 + "jno 0f\n"
12588 + LOCK_PREFIX "subl %1, %0\n"
12589 + "int $4\n0:\n"
12590 + _ASM_EXTABLE(0b, 0b)
12591 +#endif
12592 +
12593 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
12594 }
12595
12596 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
12597 index 1575177..cb23f52 100644
12598 --- a/arch/x86/include/asm/stackprotector.h
12599 +++ b/arch/x86/include/asm/stackprotector.h
12600 @@ -48,7 +48,7 @@
12601 * head_32 for boot CPU and setup_per_cpu_areas() for others.
12602 */
12603 #define GDT_STACK_CANARY_INIT \
12604 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
12605 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
12606
12607 /*
12608 * Initialize the stackprotector canary value.
12609 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
12610
12611 static inline void load_stack_canary_segment(void)
12612 {
12613 -#ifdef CONFIG_X86_32
12614 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
12615 asm volatile ("mov %0, %%gs" : : "r" (0));
12616 #endif
12617 }
12618 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
12619 index e0fbf29..858ef4a 100644
12620 --- a/arch/x86/include/asm/system.h
12621 +++ b/arch/x86/include/asm/system.h
12622 @@ -132,7 +132,7 @@ do { \
12623 "thread_return:\n\t" \
12624 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
12625 __switch_canary \
12626 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
12627 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
12628 "movq %%rax,%%rdi\n\t" \
12629 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
12630 "jnz ret_from_fork\n\t" \
12631 @@ -143,7 +143,7 @@ do { \
12632 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
12633 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
12634 [_tif_fork] "i" (_TIF_FORK), \
12635 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
12636 + [thread_info] "m" (per_cpu_var(current_tinfo)), \
12637 [current_task] "m" (per_cpu_var(current_task)) \
12638 __switch_canary_iparam \
12639 : "memory", "cc" __EXTRA_CLOBBER)
12640 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
12641 {
12642 unsigned long __limit;
12643 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
12644 - return __limit + 1;
12645 + return __limit;
12646 }
12647
12648 static inline void native_clts(void)
12649 @@ -340,12 +340,12 @@ void enable_hlt(void);
12650
12651 void cpu_idle_wait(void);
12652
12653 -extern unsigned long arch_align_stack(unsigned long sp);
12654 +#define arch_align_stack(x) ((x) & ~0xfUL)
12655 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
12656
12657 void default_idle(void);
12658
12659 -void stop_this_cpu(void *dummy);
12660 +void stop_this_cpu(void *dummy) __noreturn;
12661
12662 /*
12663 * Force strict CPU ordering.
12664 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
12665 index 19c3ce4..8962535 100644
12666 --- a/arch/x86/include/asm/thread_info.h
12667 +++ b/arch/x86/include/asm/thread_info.h
12668 @@ -10,6 +10,7 @@
12669 #include <linux/compiler.h>
12670 #include <asm/page.h>
12671 #include <asm/types.h>
12672 +#include <asm/percpu.h>
12673
12674 /*
12675 * low level task data that entry.S needs immediate access to
12676 @@ -24,7 +25,6 @@ struct exec_domain;
12677 #include <asm/atomic.h>
12678
12679 struct thread_info {
12680 - struct task_struct *task; /* main task structure */
12681 struct exec_domain *exec_domain; /* execution domain */
12682 __u32 flags; /* low level flags */
12683 __u32 status; /* thread synchronous flags */
12684 @@ -34,18 +34,12 @@ struct thread_info {
12685 mm_segment_t addr_limit;
12686 struct restart_block restart_block;
12687 void __user *sysenter_return;
12688 -#ifdef CONFIG_X86_32
12689 - unsigned long previous_esp; /* ESP of the previous stack in
12690 - case of nested (IRQ) stacks
12691 - */
12692 - __u8 supervisor_stack[0];
12693 -#endif
12694 + unsigned long lowest_stack;
12695 int uaccess_err;
12696 };
12697
12698 -#define INIT_THREAD_INFO(tsk) \
12699 +#define INIT_THREAD_INFO \
12700 { \
12701 - .task = &tsk, \
12702 .exec_domain = &default_exec_domain, \
12703 .flags = 0, \
12704 .cpu = 0, \
12705 @@ -56,7 +50,7 @@ struct thread_info {
12706 }, \
12707 }
12708
12709 -#define init_thread_info (init_thread_union.thread_info)
12710 +#define init_thread_info (init_thread_union.stack)
12711 #define init_stack (init_thread_union.stack)
12712
12713 #else /* !__ASSEMBLY__ */
12714 @@ -163,45 +157,40 @@ struct thread_info {
12715 #define alloc_thread_info(tsk) \
12716 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
12717
12718 -#ifdef CONFIG_X86_32
12719 -
12720 -#define STACK_WARN (THREAD_SIZE/8)
12721 -/*
12722 - * macros/functions for gaining access to the thread information structure
12723 - *
12724 - * preempt_count needs to be 1 initially, until the scheduler is functional.
12725 - */
12726 -#ifndef __ASSEMBLY__
12727 -
12728 -
12729 -/* how to get the current stack pointer from C */
12730 -register unsigned long current_stack_pointer asm("esp") __used;
12731 -
12732 -/* how to get the thread information struct from C */
12733 -static inline struct thread_info *current_thread_info(void)
12734 -{
12735 - return (struct thread_info *)
12736 - (current_stack_pointer & ~(THREAD_SIZE - 1));
12737 -}
12738 -
12739 -#else /* !__ASSEMBLY__ */
12740 -
12741 +#ifdef __ASSEMBLY__
12742 /* how to get the thread information struct from ASM */
12743 #define GET_THREAD_INFO(reg) \
12744 - movl $-THREAD_SIZE, reg; \
12745 - andl %esp, reg
12746 + mov PER_CPU_VAR(current_tinfo), reg
12747
12748 /* use this one if reg already contains %esp */
12749 -#define GET_THREAD_INFO_WITH_ESP(reg) \
12750 - andl $-THREAD_SIZE, reg
12751 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12752 +#else
12753 +/* how to get the thread information struct from C */
12754 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12755 +
12756 +static __always_inline struct thread_info *current_thread_info(void)
12757 +{
12758 + return percpu_read_stable(current_tinfo);
12759 +}
12760 +#endif
12761 +
12762 +#ifdef CONFIG_X86_32
12763 +
12764 +#define STACK_WARN (THREAD_SIZE/8)
12765 +/*
12766 + * macros/functions for gaining access to the thread information structure
12767 + *
12768 + * preempt_count needs to be 1 initially, until the scheduler is functional.
12769 + */
12770 +#ifndef __ASSEMBLY__
12771 +
12772 +/* how to get the current stack pointer from C */
12773 +register unsigned long current_stack_pointer asm("esp") __used;
12774
12775 #endif
12776
12777 #else /* X86_32 */
12778
12779 -#include <asm/percpu.h>
12780 -#define KERNEL_STACK_OFFSET (5*8)
12781 -
12782 /*
12783 * macros/functions for gaining access to the thread information structure
12784 * preempt_count needs to be 1 initially, until the scheduler is functional.
12785 @@ -209,21 +198,8 @@ static inline struct thread_info *current_thread_info(void)
12786 #ifndef __ASSEMBLY__
12787 DECLARE_PER_CPU(unsigned long, kernel_stack);
12788
12789 -static inline struct thread_info *current_thread_info(void)
12790 -{
12791 - struct thread_info *ti;
12792 - ti = (void *)(percpu_read_stable(kernel_stack) +
12793 - KERNEL_STACK_OFFSET - THREAD_SIZE);
12794 - return ti;
12795 -}
12796 -
12797 -#else /* !__ASSEMBLY__ */
12798 -
12799 -/* how to get the thread information struct from ASM */
12800 -#define GET_THREAD_INFO(reg) \
12801 - movq PER_CPU_VAR(kernel_stack),reg ; \
12802 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12803 -
12804 +/* how to get the current stack pointer from C */
12805 +register unsigned long current_stack_pointer asm("rsp") __used;
12806 #endif
12807
12808 #endif /* !X86_32 */
12809 @@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
12810 extern void free_thread_info(struct thread_info *ti);
12811 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12812 #define arch_task_cache_init arch_task_cache_init
12813 +
12814 +#define __HAVE_THREAD_FUNCTIONS
12815 +#define task_thread_info(task) (&(task)->tinfo)
12816 +#define task_stack_page(task) ((task)->stack)
12817 +#define setup_thread_stack(p, org) do {} while (0)
12818 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12819 +
12820 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12821 +extern struct task_struct *alloc_task_struct(void);
12822 +extern void free_task_struct(struct task_struct *);
12823 +
12824 #endif
12825 #endif /* _ASM_X86_THREAD_INFO_H */
12826 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12827 index 61c5874..8a046e9 100644
12828 --- a/arch/x86/include/asm/uaccess.h
12829 +++ b/arch/x86/include/asm/uaccess.h
12830 @@ -8,12 +8,15 @@
12831 #include <linux/thread_info.h>
12832 #include <linux/prefetch.h>
12833 #include <linux/string.h>
12834 +#include <linux/sched.h>
12835 #include <asm/asm.h>
12836 #include <asm/page.h>
12837
12838 #define VERIFY_READ 0
12839 #define VERIFY_WRITE 1
12840
12841 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
12842 +
12843 /*
12844 * The fs value determines whether argument validity checking should be
12845 * performed or not. If get_fs() == USER_DS, checking is performed, with
12846 @@ -29,7 +32,12 @@
12847
12848 #define get_ds() (KERNEL_DS)
12849 #define get_fs() (current_thread_info()->addr_limit)
12850 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12851 +void __set_fs(mm_segment_t x);
12852 +void set_fs(mm_segment_t x);
12853 +#else
12854 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12855 +#endif
12856
12857 #define segment_eq(a, b) ((a).seg == (b).seg)
12858
12859 @@ -77,7 +85,33 @@
12860 * checks that the pointer is in the user space range - after calling
12861 * this function, memory access functions may still return -EFAULT.
12862 */
12863 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12864 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12865 +#define access_ok(type, addr, size) \
12866 +({ \
12867 + long __size = size; \
12868 + unsigned long __addr = (unsigned long)addr; \
12869 + unsigned long __addr_ao = __addr & PAGE_MASK; \
12870 + unsigned long __end_ao = __addr + __size - 1; \
12871 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12872 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12873 + while(__addr_ao <= __end_ao) { \
12874 + char __c_ao; \
12875 + __addr_ao += PAGE_SIZE; \
12876 + if (__size > PAGE_SIZE) \
12877 + cond_resched(); \
12878 + if (__get_user(__c_ao, (char __user *)__addr)) \
12879 + break; \
12880 + if (type != VERIFY_WRITE) { \
12881 + __addr = __addr_ao; \
12882 + continue; \
12883 + } \
12884 + if (__put_user(__c_ao, (char __user *)__addr)) \
12885 + break; \
12886 + __addr = __addr_ao; \
12887 + } \
12888 + } \
12889 + __ret_ao; \
12890 +})
12891
12892 /*
12893 * The exception table consists of pairs of addresses: the first is the
12894 @@ -183,12 +217,20 @@ extern int __get_user_bad(void);
12895 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12896 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12897
12898 -
12899 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12900 +#define __copyuser_seg "gs;"
12901 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12902 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12903 +#else
12904 +#define __copyuser_seg
12905 +#define __COPYUSER_SET_ES
12906 +#define __COPYUSER_RESTORE_ES
12907 +#endif
12908
12909 #ifdef CONFIG_X86_32
12910 #define __put_user_asm_u64(x, addr, err, errret) \
12911 - asm volatile("1: movl %%eax,0(%2)\n" \
12912 - "2: movl %%edx,4(%2)\n" \
12913 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12914 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12915 "3:\n" \
12916 ".section .fixup,\"ax\"\n" \
12917 "4: movl %3,%0\n" \
12918 @@ -200,8 +242,8 @@ extern int __get_user_bad(void);
12919 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12920
12921 #define __put_user_asm_ex_u64(x, addr) \
12922 - asm volatile("1: movl %%eax,0(%1)\n" \
12923 - "2: movl %%edx,4(%1)\n" \
12924 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12925 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12926 "3:\n" \
12927 _ASM_EXTABLE(1b, 2b - 1b) \
12928 _ASM_EXTABLE(2b, 3b - 2b) \
12929 @@ -253,7 +295,7 @@ extern void __put_user_8(void);
12930 __typeof__(*(ptr)) __pu_val; \
12931 __chk_user_ptr(ptr); \
12932 might_fault(); \
12933 - __pu_val = x; \
12934 + __pu_val = (x); \
12935 switch (sizeof(*(ptr))) { \
12936 case 1: \
12937 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12938 @@ -374,7 +416,7 @@ do { \
12939 } while (0)
12940
12941 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12942 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12943 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12944 "2:\n" \
12945 ".section .fixup,\"ax\"\n" \
12946 "3: mov %3,%0\n" \
12947 @@ -382,7 +424,7 @@ do { \
12948 " jmp 2b\n" \
12949 ".previous\n" \
12950 _ASM_EXTABLE(1b, 3b) \
12951 - : "=r" (err), ltype(x) \
12952 + : "=r" (err), ltype (x) \
12953 : "m" (__m(addr)), "i" (errret), "0" (err))
12954
12955 #define __get_user_size_ex(x, ptr, size) \
12956 @@ -407,7 +449,7 @@ do { \
12957 } while (0)
12958
12959 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12960 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12961 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12962 "2:\n" \
12963 _ASM_EXTABLE(1b, 2b - 1b) \
12964 : ltype(x) : "m" (__m(addr)))
12965 @@ -424,13 +466,24 @@ do { \
12966 int __gu_err; \
12967 unsigned long __gu_val; \
12968 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12969 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
12970 + (x) = (__typeof__(*(ptr)))__gu_val; \
12971 __gu_err; \
12972 })
12973
12974 /* FIXME: this hack is definitely wrong -AK */
12975 struct __large_struct { unsigned long buf[100]; };
12976 -#define __m(x) (*(struct __large_struct __user *)(x))
12977 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12978 +#define ____m(x) \
12979 +({ \
12980 + unsigned long ____x = (unsigned long)(x); \
12981 + if (____x < PAX_USER_SHADOW_BASE) \
12982 + ____x += PAX_USER_SHADOW_BASE; \
12983 + (void __user *)____x; \
12984 +})
12985 +#else
12986 +#define ____m(x) (x)
12987 +#endif
12988 +#define __m(x) (*(struct __large_struct __user *)____m(x))
12989
12990 /*
12991 * Tell gcc we read from memory instead of writing: this is because
12992 @@ -438,7 +491,7 @@ struct __large_struct { unsigned long buf[100]; };
12993 * aliasing issues.
12994 */
12995 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12996 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12997 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12998 "2:\n" \
12999 ".section .fixup,\"ax\"\n" \
13000 "3: mov %3,%0\n" \
13001 @@ -446,10 +499,10 @@ struct __large_struct { unsigned long buf[100]; };
13002 ".previous\n" \
13003 _ASM_EXTABLE(1b, 3b) \
13004 : "=r"(err) \
13005 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
13006 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
13007
13008 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
13009 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
13010 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
13011 "2:\n" \
13012 _ASM_EXTABLE(1b, 2b - 1b) \
13013 : : ltype(x), "m" (__m(addr)))
13014 @@ -488,8 +541,12 @@ struct __large_struct { unsigned long buf[100]; };
13015 * On error, the variable @x is set to zero.
13016 */
13017
13018 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13019 +#define __get_user(x, ptr) get_user((x), (ptr))
13020 +#else
13021 #define __get_user(x, ptr) \
13022 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
13023 +#endif
13024
13025 /**
13026 * __put_user: - Write a simple value into user space, with less checking.
13027 @@ -511,8 +568,12 @@ struct __large_struct { unsigned long buf[100]; };
13028 * Returns zero on success, or -EFAULT on error.
13029 */
13030
13031 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13032 +#define __put_user(x, ptr) put_user((x), (ptr))
13033 +#else
13034 #define __put_user(x, ptr) \
13035 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
13036 +#endif
13037
13038 #define __get_user_unaligned __get_user
13039 #define __put_user_unaligned __put_user
13040 @@ -530,7 +591,7 @@ struct __large_struct { unsigned long buf[100]; };
13041 #define get_user_ex(x, ptr) do { \
13042 unsigned long __gue_val; \
13043 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
13044 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
13045 + (x) = (__typeof__(*(ptr)))__gue_val; \
13046 } while (0)
13047
13048 #ifdef CONFIG_X86_WP_WORKS_OK
13049 @@ -567,6 +628,7 @@ extern struct movsl_mask {
13050
13051 #define ARCH_HAS_NOCACHE_UACCESS 1
13052
13053 +#define ARCH_HAS_SORT_EXTABLE
13054 #ifdef CONFIG_X86_32
13055 # include "uaccess_32.h"
13056 #else
13057 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
13058 index 632fb44..e30e334 100644
13059 --- a/arch/x86/include/asm/uaccess_32.h
13060 +++ b/arch/x86/include/asm/uaccess_32.h
13061 @@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
13062 static __always_inline unsigned long __must_check
13063 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
13064 {
13065 + pax_track_stack();
13066 +
13067 + if ((long)n < 0)
13068 + return n;
13069 +
13070 if (__builtin_constant_p(n)) {
13071 unsigned long ret;
13072
13073 @@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
13074 return ret;
13075 }
13076 }
13077 + if (!__builtin_constant_p(n))
13078 + check_object_size(from, n, true);
13079 return __copy_to_user_ll(to, from, n);
13080 }
13081
13082 @@ -83,12 +90,16 @@ static __always_inline unsigned long __must_check
13083 __copy_to_user(void __user *to, const void *from, unsigned long n)
13084 {
13085 might_fault();
13086 +
13087 return __copy_to_user_inatomic(to, from, n);
13088 }
13089
13090 static __always_inline unsigned long
13091 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
13092 {
13093 + if ((long)n < 0)
13094 + return n;
13095 +
13096 /* Avoid zeroing the tail if the copy fails..
13097 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
13098 * but as the zeroing behaviour is only significant when n is not
13099 @@ -138,6 +149,12 @@ static __always_inline unsigned long
13100 __copy_from_user(void *to, const void __user *from, unsigned long n)
13101 {
13102 might_fault();
13103 +
13104 + pax_track_stack();
13105 +
13106 + if ((long)n < 0)
13107 + return n;
13108 +
13109 if (__builtin_constant_p(n)) {
13110 unsigned long ret;
13111
13112 @@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
13113 return ret;
13114 }
13115 }
13116 + if (!__builtin_constant_p(n))
13117 + check_object_size(to, n, false);
13118 return __copy_from_user_ll(to, from, n);
13119 }
13120
13121 @@ -160,6 +179,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
13122 const void __user *from, unsigned long n)
13123 {
13124 might_fault();
13125 +
13126 + if ((long)n < 0)
13127 + return n;
13128 +
13129 if (__builtin_constant_p(n)) {
13130 unsigned long ret;
13131
13132 @@ -182,14 +205,62 @@ static __always_inline unsigned long
13133 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
13134 unsigned long n)
13135 {
13136 - return __copy_from_user_ll_nocache_nozero(to, from, n);
13137 + if ((long)n < 0)
13138 + return n;
13139 +
13140 + return __copy_from_user_ll_nocache_nozero(to, from, n);
13141 +}
13142 +
13143 +/**
13144 + * copy_to_user: - Copy a block of data into user space.
13145 + * @to: Destination address, in user space.
13146 + * @from: Source address, in kernel space.
13147 + * @n: Number of bytes to copy.
13148 + *
13149 + * Context: User context only. This function may sleep.
13150 + *
13151 + * Copy data from kernel space to user space.
13152 + *
13153 + * Returns number of bytes that could not be copied.
13154 + * On success, this will be zero.
13155 + */
13156 +static __always_inline unsigned long __must_check
13157 +copy_to_user(void __user *to, const void *from, unsigned long n)
13158 +{
13159 + if (access_ok(VERIFY_WRITE, to, n))
13160 + n = __copy_to_user(to, from, n);
13161 + return n;
13162 +}
13163 +
13164 +/**
13165 + * copy_from_user: - Copy a block of data from user space.
13166 + * @to: Destination address, in kernel space.
13167 + * @from: Source address, in user space.
13168 + * @n: Number of bytes to copy.
13169 + *
13170 + * Context: User context only. This function may sleep.
13171 + *
13172 + * Copy data from user space to kernel space.
13173 + *
13174 + * Returns number of bytes that could not be copied.
13175 + * On success, this will be zero.
13176 + *
13177 + * If some data could not be copied, this function will pad the copied
13178 + * data to the requested size using zero bytes.
13179 + */
13180 +static __always_inline unsigned long __must_check
13181 +copy_from_user(void *to, const void __user *from, unsigned long n)
13182 +{
13183 + if (access_ok(VERIFY_READ, from, n))
13184 + n = __copy_from_user(to, from, n);
13185 + else if ((long)n > 0) {
13186 + if (!__builtin_constant_p(n))
13187 + check_object_size(to, n, false);
13188 + memset(to, 0, n);
13189 + }
13190 + return n;
13191 }
13192
13193 -unsigned long __must_check copy_to_user(void __user *to,
13194 - const void *from, unsigned long n);
13195 -unsigned long __must_check copy_from_user(void *to,
13196 - const void __user *from,
13197 - unsigned long n);
13198 long __must_check strncpy_from_user(char *dst, const char __user *src,
13199 long count);
13200 long __must_check __strncpy_from_user(char *dst,
13201 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
13202 index db24b21..f595ae7 100644
13203 --- a/arch/x86/include/asm/uaccess_64.h
13204 +++ b/arch/x86/include/asm/uaccess_64.h
13205 @@ -9,6 +9,9 @@
13206 #include <linux/prefetch.h>
13207 #include <linux/lockdep.h>
13208 #include <asm/page.h>
13209 +#include <asm/pgtable.h>
13210 +
13211 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
13212
13213 /*
13214 * Copy To/From Userspace
13215 @@ -16,116 +19,205 @@
13216
13217 /* Handles exceptions in both to and from, but doesn't do access_ok */
13218 __must_check unsigned long
13219 -copy_user_generic(void *to, const void *from, unsigned len);
13220 +copy_user_generic(void *to, const void *from, unsigned long len);
13221
13222 __must_check unsigned long
13223 -copy_to_user(void __user *to, const void *from, unsigned len);
13224 -__must_check unsigned long
13225 -copy_from_user(void *to, const void __user *from, unsigned len);
13226 -__must_check unsigned long
13227 -copy_in_user(void __user *to, const void __user *from, unsigned len);
13228 +copy_in_user(void __user *to, const void __user *from, unsigned long len);
13229
13230 static __always_inline __must_check
13231 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
13232 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
13233 {
13234 - int ret = 0;
13235 + unsigned ret = 0;
13236
13237 might_fault();
13238 - if (!__builtin_constant_p(size))
13239 - return copy_user_generic(dst, (__force void *)src, size);
13240 +
13241 + if (size > INT_MAX)
13242 + return size;
13243 +
13244 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13245 + if (!__access_ok(VERIFY_READ, src, size))
13246 + return size;
13247 +#endif
13248 +
13249 + if (!__builtin_constant_p(size)) {
13250 + check_object_size(dst, size, false);
13251 +
13252 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13253 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13254 + src += PAX_USER_SHADOW_BASE;
13255 +#endif
13256 +
13257 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13258 + }
13259 switch (size) {
13260 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
13261 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
13262 ret, "b", "b", "=q", 1);
13263 return ret;
13264 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
13265 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
13266 ret, "w", "w", "=r", 2);
13267 return ret;
13268 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
13269 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
13270 ret, "l", "k", "=r", 4);
13271 return ret;
13272 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
13273 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13274 ret, "q", "", "=r", 8);
13275 return ret;
13276 case 10:
13277 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13278 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13279 ret, "q", "", "=r", 10);
13280 if (unlikely(ret))
13281 return ret;
13282 __get_user_asm(*(u16 *)(8 + (char *)dst),
13283 - (u16 __user *)(8 + (char __user *)src),
13284 + (const u16 __user *)(8 + (const char __user *)src),
13285 ret, "w", "w", "=r", 2);
13286 return ret;
13287 case 16:
13288 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13289 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13290 ret, "q", "", "=r", 16);
13291 if (unlikely(ret))
13292 return ret;
13293 __get_user_asm(*(u64 *)(8 + (char *)dst),
13294 - (u64 __user *)(8 + (char __user *)src),
13295 + (const u64 __user *)(8 + (const char __user *)src),
13296 ret, "q", "", "=r", 8);
13297 return ret;
13298 default:
13299 - return copy_user_generic(dst, (__force void *)src, size);
13300 +
13301 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13302 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13303 + src += PAX_USER_SHADOW_BASE;
13304 +#endif
13305 +
13306 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13307 }
13308 }
13309
13310 static __always_inline __must_check
13311 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
13312 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
13313 {
13314 - int ret = 0;
13315 + unsigned ret = 0;
13316
13317 might_fault();
13318 - if (!__builtin_constant_p(size))
13319 - return copy_user_generic((__force void *)dst, src, size);
13320 +
13321 + pax_track_stack();
13322 +
13323 + if (size > INT_MAX)
13324 + return size;
13325 +
13326 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13327 + if (!__access_ok(VERIFY_WRITE, dst, size))
13328 + return size;
13329 +#endif
13330 +
13331 + if (!__builtin_constant_p(size)) {
13332 + check_object_size(src, size, true);
13333 +
13334 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13335 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13336 + dst += PAX_USER_SHADOW_BASE;
13337 +#endif
13338 +
13339 + return copy_user_generic((__force_kernel void *)dst, src, size);
13340 + }
13341 switch (size) {
13342 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
13343 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
13344 ret, "b", "b", "iq", 1);
13345 return ret;
13346 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
13347 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
13348 ret, "w", "w", "ir", 2);
13349 return ret;
13350 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
13351 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
13352 ret, "l", "k", "ir", 4);
13353 return ret;
13354 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
13355 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13356 ret, "q", "", "er", 8);
13357 return ret;
13358 case 10:
13359 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13360 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13361 ret, "q", "", "er", 10);
13362 if (unlikely(ret))
13363 return ret;
13364 asm("":::"memory");
13365 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
13366 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
13367 ret, "w", "w", "ir", 2);
13368 return ret;
13369 case 16:
13370 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13371 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13372 ret, "q", "", "er", 16);
13373 if (unlikely(ret))
13374 return ret;
13375 asm("":::"memory");
13376 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
13377 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
13378 ret, "q", "", "er", 8);
13379 return ret;
13380 default:
13381 - return copy_user_generic((__force void *)dst, src, size);
13382 +
13383 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13384 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13385 + dst += PAX_USER_SHADOW_BASE;
13386 +#endif
13387 +
13388 + return copy_user_generic((__force_kernel void *)dst, src, size);
13389 + }
13390 +}
13391 +
13392 +static __always_inline __must_check
13393 +unsigned long copy_to_user(void __user *to, const void *from, unsigned long len)
13394 +{
13395 + if (access_ok(VERIFY_WRITE, to, len))
13396 + len = __copy_to_user(to, from, len);
13397 + return len;
13398 +}
13399 +
13400 +static __always_inline __must_check
13401 +unsigned long copy_from_user(void *to, const void __user *from, unsigned long len)
13402 +{
13403 + might_fault();
13404 +
13405 + if (access_ok(VERIFY_READ, from, len))
13406 + len = __copy_from_user(to, from, len);
13407 + else if (len < INT_MAX) {
13408 + if (!__builtin_constant_p(len))
13409 + check_object_size(to, len, false);
13410 + memset(to, 0, len);
13411 }
13412 + return len;
13413 }
13414
13415 static __always_inline __must_check
13416 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13417 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
13418 {
13419 - int ret = 0;
13420 + unsigned ret = 0;
13421
13422 might_fault();
13423 - if (!__builtin_constant_p(size))
13424 - return copy_user_generic((__force void *)dst,
13425 - (__force void *)src, size);
13426 +
13427 + pax_track_stack();
13428 +
13429 + if (size > INT_MAX)
13430 + return size;
13431 +
13432 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13433 + if (!__access_ok(VERIFY_READ, src, size))
13434 + return size;
13435 + if (!__access_ok(VERIFY_WRITE, dst, size))
13436 + return size;
13437 +#endif
13438 +
13439 + if (!__builtin_constant_p(size)) {
13440 +
13441 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13442 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13443 + src += PAX_USER_SHADOW_BASE;
13444 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13445 + dst += PAX_USER_SHADOW_BASE;
13446 +#endif
13447 +
13448 + return copy_user_generic((__force_kernel void *)dst,
13449 + (__force_kernel const void *)src, size);
13450 + }
13451 switch (size) {
13452 case 1: {
13453 u8 tmp;
13454 - __get_user_asm(tmp, (u8 __user *)src,
13455 + __get_user_asm(tmp, (const u8 __user *)src,
13456 ret, "b", "b", "=q", 1);
13457 if (likely(!ret))
13458 __put_user_asm(tmp, (u8 __user *)dst,
13459 @@ -134,7 +226,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13460 }
13461 case 2: {
13462 u16 tmp;
13463 - __get_user_asm(tmp, (u16 __user *)src,
13464 + __get_user_asm(tmp, (const u16 __user *)src,
13465 ret, "w", "w", "=r", 2);
13466 if (likely(!ret))
13467 __put_user_asm(tmp, (u16 __user *)dst,
13468 @@ -144,7 +236,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13469
13470 case 4: {
13471 u32 tmp;
13472 - __get_user_asm(tmp, (u32 __user *)src,
13473 + __get_user_asm(tmp, (const u32 __user *)src,
13474 ret, "l", "k", "=r", 4);
13475 if (likely(!ret))
13476 __put_user_asm(tmp, (u32 __user *)dst,
13477 @@ -153,7 +245,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13478 }
13479 case 8: {
13480 u64 tmp;
13481 - __get_user_asm(tmp, (u64 __user *)src,
13482 + __get_user_asm(tmp, (const u64 __user *)src,
13483 ret, "q", "", "=r", 8);
13484 if (likely(!ret))
13485 __put_user_asm(tmp, (u64 __user *)dst,
13486 @@ -161,8 +253,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13487 return ret;
13488 }
13489 default:
13490 - return copy_user_generic((__force void *)dst,
13491 - (__force void *)src, size);
13492 +
13493 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13494 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13495 + src += PAX_USER_SHADOW_BASE;
13496 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13497 + dst += PAX_USER_SHADOW_BASE;
13498 +#endif
13499 +
13500 + return copy_user_generic((__force_kernel void *)dst,
13501 + (__force_kernel const void *)src, size);
13502 }
13503 }
13504
13505 @@ -176,33 +276,75 @@ __must_check long strlen_user(const char __user *str);
13506 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
13507 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
13508
13509 -__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
13510 - unsigned size);
13511 +static __must_check __always_inline unsigned long
13512 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
13513 +{
13514 + pax_track_stack();
13515 +
13516 + if (size > INT_MAX)
13517 + return size;
13518 +
13519 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13520 + if (!__access_ok(VERIFY_READ, src, size))
13521 + return size;
13522
13523 -static __must_check __always_inline int
13524 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
13525 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13526 + src += PAX_USER_SHADOW_BASE;
13527 +#endif
13528 +
13529 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13530 +}
13531 +
13532 +static __must_check __always_inline unsigned long
13533 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
13534 {
13535 - return copy_user_generic((__force void *)dst, src, size);
13536 + if (size > INT_MAX)
13537 + return size;
13538 +
13539 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13540 + if (!__access_ok(VERIFY_WRITE, dst, size))
13541 + return size;
13542 +
13543 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13544 + dst += PAX_USER_SHADOW_BASE;
13545 +#endif
13546 +
13547 + return copy_user_generic((__force_kernel void *)dst, src, size);
13548 }
13549
13550 -extern long __copy_user_nocache(void *dst, const void __user *src,
13551 - unsigned size, int zerorest);
13552 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
13553 + unsigned long size, int zerorest);
13554
13555 -static inline int
13556 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
13557 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
13558 {
13559 might_sleep();
13560 +
13561 + if (size > INT_MAX)
13562 + return size;
13563 +
13564 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13565 + if (!__access_ok(VERIFY_READ, src, size))
13566 + return size;
13567 +#endif
13568 +
13569 return __copy_user_nocache(dst, src, size, 1);
13570 }
13571
13572 -static inline int
13573 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13574 - unsigned size)
13575 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13576 + unsigned long size)
13577 {
13578 + if (size > INT_MAX)
13579 + return size;
13580 +
13581 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13582 + if (!__access_ok(VERIFY_READ, src, size))
13583 + return size;
13584 +#endif
13585 +
13586 return __copy_user_nocache(dst, src, size, 0);
13587 }
13588
13589 -unsigned long
13590 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
13591 +extern unsigned long
13592 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
13593
13594 #endif /* _ASM_X86_UACCESS_64_H */
13595 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
13596 index 9064052..786cfbc 100644
13597 --- a/arch/x86/include/asm/vdso.h
13598 +++ b/arch/x86/include/asm/vdso.h
13599 @@ -25,7 +25,7 @@ extern const char VDSO32_PRELINK[];
13600 #define VDSO32_SYMBOL(base, name) \
13601 ({ \
13602 extern const char VDSO32_##name[]; \
13603 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13604 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13605 })
13606 #endif
13607
13608 diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
13609 index 3d61e20..9507180 100644
13610 --- a/arch/x86/include/asm/vgtod.h
13611 +++ b/arch/x86/include/asm/vgtod.h
13612 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
13613 int sysctl_enabled;
13614 struct timezone sys_tz;
13615 struct { /* extract of a clocksource struct */
13616 + char name[8];
13617 cycle_t (*vread)(void);
13618 cycle_t cycle_last;
13619 cycle_t mask;
13620 diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h
13621 index 61e08c0..b0da582 100644
13622 --- a/arch/x86/include/asm/vmi.h
13623 +++ b/arch/x86/include/asm/vmi.h
13624 @@ -191,6 +191,7 @@ struct vrom_header {
13625 u8 reserved[96]; /* Reserved for headers */
13626 char vmi_init[8]; /* VMI_Init jump point */
13627 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
13628 + char rom_data[8048]; /* rest of the option ROM */
13629 } __attribute__((packed));
13630
13631 struct pnp_header {
13632 diff --git a/arch/x86/include/asm/vmi_time.h b/arch/x86/include/asm/vmi_time.h
13633 index c6e0bee..fcb9f74 100644
13634 --- a/arch/x86/include/asm/vmi_time.h
13635 +++ b/arch/x86/include/asm/vmi_time.h
13636 @@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
13637 int (*wallclock_updated)(void);
13638 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
13639 void (*cancel_alarm)(u32 flags);
13640 -} vmi_timer_ops;
13641 +} __no_const vmi_timer_ops;
13642
13643 /* Prototypes */
13644 extern void __init vmi_time_init(void);
13645 diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
13646 index d0983d2..1f7c9e9 100644
13647 --- a/arch/x86/include/asm/vsyscall.h
13648 +++ b/arch/x86/include/asm/vsyscall.h
13649 @@ -15,9 +15,10 @@ enum vsyscall_num {
13650
13651 #ifdef __KERNEL__
13652 #include <linux/seqlock.h>
13653 +#include <linux/getcpu.h>
13654 +#include <linux/time.h>
13655
13656 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
13657 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
13658
13659 /* Definitions for CONFIG_GENERIC_TIME definitions */
13660 #define __section_vsyscall_gtod_data __attribute__ \
13661 @@ -31,7 +32,6 @@ enum vsyscall_num {
13662 #define VGETCPU_LSL 2
13663
13664 extern int __vgetcpu_mode;
13665 -extern volatile unsigned long __jiffies;
13666
13667 /* kernel space (writeable) */
13668 extern int vgetcpu_mode;
13669 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
13670
13671 extern void map_vsyscall(void);
13672
13673 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
13674 +extern time_t vtime(time_t *t);
13675 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
13676 #endif /* __KERNEL__ */
13677
13678 #endif /* _ASM_X86_VSYSCALL_H */
13679 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
13680 index 2c756fd..3377e37 100644
13681 --- a/arch/x86/include/asm/x86_init.h
13682 +++ b/arch/x86/include/asm/x86_init.h
13683 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
13684 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13685 void (*find_smp_config)(unsigned int reserve);
13686 void (*get_smp_config)(unsigned int early);
13687 -};
13688 +} __no_const;
13689
13690 /**
13691 * struct x86_init_resources - platform specific resource related ops
13692 @@ -42,7 +42,7 @@ struct x86_init_resources {
13693 void (*probe_roms)(void);
13694 void (*reserve_resources)(void);
13695 char *(*memory_setup)(void);
13696 -};
13697 +} __no_const;
13698
13699 /**
13700 * struct x86_init_irqs - platform specific interrupt setup
13701 @@ -55,7 +55,7 @@ struct x86_init_irqs {
13702 void (*pre_vector_init)(void);
13703 void (*intr_init)(void);
13704 void (*trap_init)(void);
13705 -};
13706 +} __no_const;
13707
13708 /**
13709 * struct x86_init_oem - oem platform specific customizing functions
13710 @@ -65,7 +65,7 @@ struct x86_init_irqs {
13711 struct x86_init_oem {
13712 void (*arch_setup)(void);
13713 void (*banner)(void);
13714 -};
13715 +} __no_const;
13716
13717 /**
13718 * struct x86_init_paging - platform specific paging functions
13719 @@ -75,7 +75,7 @@ struct x86_init_oem {
13720 struct x86_init_paging {
13721 void (*pagetable_setup_start)(pgd_t *base);
13722 void (*pagetable_setup_done)(pgd_t *base);
13723 -};
13724 +} __no_const;
13725
13726 /**
13727 * struct x86_init_timers - platform specific timer setup
13728 @@ -88,7 +88,7 @@ struct x86_init_timers {
13729 void (*setup_percpu_clockev)(void);
13730 void (*tsc_pre_init)(void);
13731 void (*timer_init)(void);
13732 -};
13733 +} __no_const;
13734
13735 /**
13736 * struct x86_init_ops - functions for platform specific setup
13737 @@ -101,7 +101,7 @@ struct x86_init_ops {
13738 struct x86_init_oem oem;
13739 struct x86_init_paging paging;
13740 struct x86_init_timers timers;
13741 -};
13742 +} __no_const;
13743
13744 /**
13745 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13746 @@ -109,7 +109,7 @@ struct x86_init_ops {
13747 */
13748 struct x86_cpuinit_ops {
13749 void (*setup_percpu_clockev)(void);
13750 -};
13751 +} __no_const;
13752
13753 /**
13754 * struct x86_platform_ops - platform specific runtime functions
13755 @@ -121,7 +121,7 @@ struct x86_platform_ops {
13756 unsigned long (*calibrate_tsc)(void);
13757 unsigned long (*get_wallclock)(void);
13758 int (*set_wallclock)(unsigned long nowtime);
13759 -};
13760 +} __no_const;
13761
13762 extern struct x86_init_ops x86_init;
13763 extern struct x86_cpuinit_ops x86_cpuinit;
13764 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13765 index 727acc1..554f3eb 100644
13766 --- a/arch/x86/include/asm/xsave.h
13767 +++ b/arch/x86/include/asm/xsave.h
13768 @@ -56,6 +56,12 @@ static inline int xrstor_checking(struct xsave_struct *fx)
13769 static inline int xsave_user(struct xsave_struct __user *buf)
13770 {
13771 int err;
13772 +
13773 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13774 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13775 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13776 +#endif
13777 +
13778 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
13779 "2:\n"
13780 ".section .fixup,\"ax\"\n"
13781 @@ -78,10 +84,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13782 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13783 {
13784 int err;
13785 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13786 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13787 u32 lmask = mask;
13788 u32 hmask = mask >> 32;
13789
13790 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13791 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13792 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13793 +#endif
13794 +
13795 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13796 "2:\n"
13797 ".section .fixup,\"ax\"\n"
13798 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13799 index 6a564ac..9b1340c 100644
13800 --- a/arch/x86/kernel/acpi/realmode/Makefile
13801 +++ b/arch/x86/kernel/acpi/realmode/Makefile
13802 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13803 $(call cc-option, -fno-stack-protector) \
13804 $(call cc-option, -mpreferred-stack-boundary=2)
13805 KBUILD_CFLAGS += $(call cc-option, -m32)
13806 +ifdef CONSTIFY_PLUGIN
13807 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13808 +endif
13809 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13810 GCOV_PROFILE := n
13811
13812 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13813 index 580b4e2..d4129e4 100644
13814 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
13815 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13816 @@ -91,6 +91,9 @@ _start:
13817 /* Do any other stuff... */
13818
13819 #ifndef CONFIG_64BIT
13820 + /* Recheck NX bit overrides (64bit path does this in trampoline) */
13821 + call verify_cpu
13822 +
13823 /* This could also be done in C code... */
13824 movl pmode_cr3, %eax
13825 movl %eax, %cr3
13826 @@ -104,7 +107,7 @@ _start:
13827 movl %eax, %ecx
13828 orl %edx, %ecx
13829 jz 1f
13830 - movl $0xc0000080, %ecx
13831 + mov $MSR_EFER, %ecx
13832 wrmsr
13833 1:
13834
13835 @@ -114,6 +117,7 @@ _start:
13836 movl pmode_cr0, %eax
13837 movl %eax, %cr0
13838 jmp pmode_return
13839 +# include "../../verify_cpu.S"
13840 #else
13841 pushw $0
13842 pushw trampoline_segment
13843 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13844 index ca93638..7042f24 100644
13845 --- a/arch/x86/kernel/acpi/sleep.c
13846 +++ b/arch/x86/kernel/acpi/sleep.c
13847 @@ -11,11 +11,12 @@
13848 #include <linux/cpumask.h>
13849 #include <asm/segment.h>
13850 #include <asm/desc.h>
13851 +#include <asm/e820.h>
13852
13853 #include "realmode/wakeup.h"
13854 #include "sleep.h"
13855
13856 -unsigned long acpi_wakeup_address;
13857 +unsigned long acpi_wakeup_address = 0x2000;
13858 unsigned long acpi_realmode_flags;
13859
13860 /* address in low memory of the wakeup routine. */
13861 @@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
13862 #else /* CONFIG_64BIT */
13863 header->trampoline_segment = setup_trampoline() >> 4;
13864 #ifdef CONFIG_SMP
13865 - stack_start.sp = temp_stack + sizeof(temp_stack);
13866 + stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13867 +
13868 + pax_open_kernel();
13869 early_gdt_descr.address =
13870 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13871 + pax_close_kernel();
13872 +
13873 initial_gs = per_cpu_offset(smp_processor_id());
13874 #endif
13875 initial_code = (unsigned long)wakeup_long64;
13876 @@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
13877 return;
13878 }
13879
13880 - acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
13881 -
13882 - if (!acpi_realmode) {
13883 - printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
13884 - return;
13885 - }
13886 -
13887 - acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
13888 + reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
13889 + acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
13890 }
13891
13892
13893 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13894 index 8ded418..079961e 100644
13895 --- a/arch/x86/kernel/acpi/wakeup_32.S
13896 +++ b/arch/x86/kernel/acpi/wakeup_32.S
13897 @@ -30,13 +30,11 @@ wakeup_pmode_return:
13898 # and restore the stack ... but you need gdt for this to work
13899 movl saved_context_esp, %esp
13900
13901 - movl %cs:saved_magic, %eax
13902 - cmpl $0x12345678, %eax
13903 + cmpl $0x12345678, saved_magic
13904 jne bogus_magic
13905
13906 # jump to place where we left off
13907 - movl saved_eip, %eax
13908 - jmp *%eax
13909 + jmp *(saved_eip)
13910
13911 bogus_magic:
13912 jmp bogus_magic
13913 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13914 index de7353c..075da5f 100644
13915 --- a/arch/x86/kernel/alternative.c
13916 +++ b/arch/x86/kernel/alternative.c
13917 @@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13918
13919 BUG_ON(p->len > MAX_PATCH_LEN);
13920 /* prep the buffer with the original instructions */
13921 - memcpy(insnbuf, p->instr, p->len);
13922 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13923 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13924 (unsigned long)p->instr, p->len);
13925
13926 @@ -475,7 +475,7 @@ void __init alternative_instructions(void)
13927 if (smp_alt_once)
13928 free_init_pages("SMP alternatives",
13929 (unsigned long)__smp_locks,
13930 - (unsigned long)__smp_locks_end);
13931 + PAGE_ALIGN((unsigned long)__smp_locks_end));
13932
13933 restart_nmi();
13934 }
13935 @@ -492,13 +492,17 @@ void __init alternative_instructions(void)
13936 * instructions. And on the local CPU you need to be protected again NMI or MCE
13937 * handlers seeing an inconsistent instruction while you patch.
13938 */
13939 -static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13940 +static void *__kprobes text_poke_early(void *addr, const void *opcode,
13941 size_t len)
13942 {
13943 unsigned long flags;
13944 local_irq_save(flags);
13945 - memcpy(addr, opcode, len);
13946 +
13947 + pax_open_kernel();
13948 + memcpy(ktla_ktva(addr), opcode, len);
13949 sync_core();
13950 + pax_close_kernel();
13951 +
13952 local_irq_restore(flags);
13953 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13954 that causes hangs on some VIA CPUs. */
13955 @@ -520,35 +524,21 @@ static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13956 */
13957 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13958 {
13959 - unsigned long flags;
13960 - char *vaddr;
13961 + unsigned char *vaddr = ktla_ktva(addr);
13962 struct page *pages[2];
13963 - int i;
13964 + size_t i;
13965
13966 if (!core_kernel_text((unsigned long)addr)) {
13967 - pages[0] = vmalloc_to_page(addr);
13968 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13969 + pages[0] = vmalloc_to_page(vaddr);
13970 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13971 } else {
13972 - pages[0] = virt_to_page(addr);
13973 + pages[0] = virt_to_page(vaddr);
13974 WARN_ON(!PageReserved(pages[0]));
13975 - pages[1] = virt_to_page(addr + PAGE_SIZE);
13976 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13977 }
13978 BUG_ON(!pages[0]);
13979 - local_irq_save(flags);
13980 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13981 - if (pages[1])
13982 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13983 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13984 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13985 - clear_fixmap(FIX_TEXT_POKE0);
13986 - if (pages[1])
13987 - clear_fixmap(FIX_TEXT_POKE1);
13988 - local_flush_tlb();
13989 - sync_core();
13990 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
13991 - that causes hangs on some VIA CPUs. */
13992 + text_poke_early(addr, opcode, len);
13993 for (i = 0; i < len; i++)
13994 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13995 - local_irq_restore(flags);
13996 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13997 return addr;
13998 }
13999 diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
14000 index 3a44b75..1601800 100644
14001 --- a/arch/x86/kernel/amd_iommu.c
14002 +++ b/arch/x86/kernel/amd_iommu.c
14003 @@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(void)
14004 }
14005 }
14006
14007 -static struct dma_map_ops amd_iommu_dma_ops = {
14008 +static const struct dma_map_ops amd_iommu_dma_ops = {
14009 .alloc_coherent = alloc_coherent,
14010 .free_coherent = free_coherent,
14011 .map_page = map_page,
14012 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
14013 index 1d2d670..8e3f477 100644
14014 --- a/arch/x86/kernel/apic/apic.c
14015 +++ b/arch/x86/kernel/apic/apic.c
14016 @@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
14017 /*
14018 * Debug level, exported for io_apic.c
14019 */
14020 -unsigned int apic_verbosity;
14021 +int apic_verbosity;
14022
14023 int pic_mode;
14024
14025 @@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs *regs)
14026 apic_write(APIC_ESR, 0);
14027 v1 = apic_read(APIC_ESR);
14028 ack_APIC_irq();
14029 - atomic_inc(&irq_err_count);
14030 + atomic_inc_unchecked(&irq_err_count);
14031
14032 /*
14033 * Here is what the APIC error bits mean:
14034 @@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(void)
14035 u16 *bios_cpu_apicid;
14036 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
14037
14038 + pax_track_stack();
14039 +
14040 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
14041 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
14042
14043 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
14044 index 8928d97..f799cea 100644
14045 --- a/arch/x86/kernel/apic/io_apic.c
14046 +++ b/arch/x86/kernel/apic/io_apic.c
14047 @@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void)
14048 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
14049 GFP_ATOMIC);
14050 if (!ioapic_entries)
14051 - return 0;
14052 + return NULL;
14053
14054 for (apic = 0; apic < nr_ioapics; apic++) {
14055 ioapic_entries[apic] =
14056 @@ -733,7 +733,7 @@ nomem:
14057 kfree(ioapic_entries[apic]);
14058 kfree(ioapic_entries);
14059
14060 - return 0;
14061 + return NULL;
14062 }
14063
14064 /*
14065 @@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
14066 }
14067 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
14068
14069 -void lock_vector_lock(void)
14070 +void lock_vector_lock(void) __acquires(vector_lock)
14071 {
14072 /* Used to the online set of cpus does not change
14073 * during assign_irq_vector.
14074 @@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
14075 spin_lock(&vector_lock);
14076 }
14077
14078 -void unlock_vector_lock(void)
14079 +void unlock_vector_lock(void) __releases(vector_lock)
14080 {
14081 spin_unlock(&vector_lock);
14082 }
14083 @@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int irq)
14084 ack_APIC_irq();
14085 }
14086
14087 -atomic_t irq_mis_count;
14088 +atomic_unchecked_t irq_mis_count;
14089
14090 static void ack_apic_level(unsigned int irq)
14091 {
14092 @@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int irq)
14093
14094 /* Tail end of version 0x11 I/O APIC bug workaround */
14095 if (!(v & (1 << (i & 0x1f)))) {
14096 - atomic_inc(&irq_mis_count);
14097 + atomic_inc_unchecked(&irq_mis_count);
14098 spin_lock(&ioapic_lock);
14099 __mask_and_edge_IO_APIC_irq(cfg);
14100 __unmask_and_level_IO_APIC_irq(cfg);
14101 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
14102 index 151ace6..f317474 100644
14103 --- a/arch/x86/kernel/apm_32.c
14104 +++ b/arch/x86/kernel/apm_32.c
14105 @@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
14106 * This is for buggy BIOS's that refer to (real mode) segment 0x40
14107 * even though they are called in protected mode.
14108 */
14109 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
14110 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
14111 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
14112
14113 static const char driver_version[] = "1.16ac"; /* no spaces */
14114 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
14115 BUG_ON(cpu != 0);
14116 gdt = get_cpu_gdt_table(cpu);
14117 save_desc_40 = gdt[0x40 / 8];
14118 +
14119 + pax_open_kernel();
14120 gdt[0x40 / 8] = bad_bios_desc;
14121 + pax_close_kernel();
14122
14123 apm_irq_save(flags);
14124 APM_DO_SAVE_SEGS;
14125 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
14126 &call->esi);
14127 APM_DO_RESTORE_SEGS;
14128 apm_irq_restore(flags);
14129 +
14130 + pax_open_kernel();
14131 gdt[0x40 / 8] = save_desc_40;
14132 + pax_close_kernel();
14133 +
14134 put_cpu();
14135
14136 return call->eax & 0xff;
14137 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
14138 BUG_ON(cpu != 0);
14139 gdt = get_cpu_gdt_table(cpu);
14140 save_desc_40 = gdt[0x40 / 8];
14141 +
14142 + pax_open_kernel();
14143 gdt[0x40 / 8] = bad_bios_desc;
14144 + pax_close_kernel();
14145
14146 apm_irq_save(flags);
14147 APM_DO_SAVE_SEGS;
14148 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
14149 &call->eax);
14150 APM_DO_RESTORE_SEGS;
14151 apm_irq_restore(flags);
14152 +
14153 + pax_open_kernel();
14154 gdt[0x40 / 8] = save_desc_40;
14155 + pax_close_kernel();
14156 +
14157 put_cpu();
14158 return error;
14159 }
14160 @@ -975,7 +989,7 @@ recalc:
14161
14162 static void apm_power_off(void)
14163 {
14164 - unsigned char po_bios_call[] = {
14165 + const unsigned char po_bios_call[] = {
14166 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
14167 0x8e, 0xd0, /* movw ax,ss */
14168 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
14169 @@ -2357,12 +2371,15 @@ static int __init apm_init(void)
14170 * code to that CPU.
14171 */
14172 gdt = get_cpu_gdt_table(0);
14173 +
14174 + pax_open_kernel();
14175 set_desc_base(&gdt[APM_CS >> 3],
14176 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
14177 set_desc_base(&gdt[APM_CS_16 >> 3],
14178 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
14179 set_desc_base(&gdt[APM_DS >> 3],
14180 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
14181 + pax_close_kernel();
14182
14183 proc_create("apm", 0, NULL, &apm_file_ops);
14184
14185 diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
14186 index dfdbf64..9b2b6ce 100644
14187 --- a/arch/x86/kernel/asm-offsets_32.c
14188 +++ b/arch/x86/kernel/asm-offsets_32.c
14189 @@ -51,7 +51,6 @@ void foo(void)
14190 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
14191 BLANK();
14192
14193 - OFFSET(TI_task, thread_info, task);
14194 OFFSET(TI_exec_domain, thread_info, exec_domain);
14195 OFFSET(TI_flags, thread_info, flags);
14196 OFFSET(TI_status, thread_info, status);
14197 @@ -60,6 +59,8 @@ void foo(void)
14198 OFFSET(TI_restart_block, thread_info, restart_block);
14199 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
14200 OFFSET(TI_cpu, thread_info, cpu);
14201 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
14202 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
14203 BLANK();
14204
14205 OFFSET(GDS_size, desc_ptr, size);
14206 @@ -99,6 +100,7 @@ void foo(void)
14207
14208 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
14209 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
14210 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
14211 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
14212 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
14213 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
14214 @@ -115,6 +117,11 @@ void foo(void)
14215 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
14216 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
14217 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
14218 +
14219 +#ifdef CONFIG_PAX_KERNEXEC
14220 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
14221 +#endif
14222 +
14223 #endif
14224
14225 #ifdef CONFIG_XEN
14226 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
14227 index 4a6aeed..371de20 100644
14228 --- a/arch/x86/kernel/asm-offsets_64.c
14229 +++ b/arch/x86/kernel/asm-offsets_64.c
14230 @@ -44,6 +44,8 @@ int main(void)
14231 ENTRY(addr_limit);
14232 ENTRY(preempt_count);
14233 ENTRY(status);
14234 + ENTRY(lowest_stack);
14235 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
14236 #ifdef CONFIG_IA32_EMULATION
14237 ENTRY(sysenter_return);
14238 #endif
14239 @@ -63,6 +65,18 @@ int main(void)
14240 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
14241 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
14242 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
14243 +
14244 +#ifdef CONFIG_PAX_KERNEXEC
14245 + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
14246 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
14247 +#endif
14248 +
14249 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14250 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
14251 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
14252 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
14253 +#endif
14254 +
14255 #endif
14256
14257
14258 @@ -115,6 +129,7 @@ int main(void)
14259 ENTRY(cr8);
14260 BLANK();
14261 #undef ENTRY
14262 + DEFINE(TSS_size, sizeof(struct tss_struct));
14263 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
14264 BLANK();
14265 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
14266 @@ -130,6 +145,7 @@ int main(void)
14267
14268 BLANK();
14269 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
14270 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
14271 #ifdef CONFIG_XEN
14272 BLANK();
14273 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
14274 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
14275 index ff502cc..dc5133e 100644
14276 --- a/arch/x86/kernel/cpu/Makefile
14277 +++ b/arch/x86/kernel/cpu/Makefile
14278 @@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
14279 CFLAGS_REMOVE_common.o = -pg
14280 endif
14281
14282 -# Make sure load_percpu_segment has no stackprotector
14283 -nostackp := $(call cc-option, -fno-stack-protector)
14284 -CFLAGS_common.o := $(nostackp)
14285 -
14286 obj-y := intel_cacheinfo.o addon_cpuid_features.o
14287 obj-y += proc.o capflags.o powerflags.o common.o
14288 obj-y += vmware.o hypervisor.o sched.o
14289 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
14290 index 6e082dc..a0b5f36 100644
14291 --- a/arch/x86/kernel/cpu/amd.c
14292 +++ b/arch/x86/kernel/cpu/amd.c
14293 @@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
14294 unsigned int size)
14295 {
14296 /* AMD errata T13 (order #21922) */
14297 - if ((c->x86 == 6)) {
14298 + if (c->x86 == 6) {
14299 /* Duron Rev A0 */
14300 if (c->x86_model == 3 && c->x86_mask == 0)
14301 size = 64;
14302 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
14303 index 4e34d10..ba6bc97 100644
14304 --- a/arch/x86/kernel/cpu/common.c
14305 +++ b/arch/x86/kernel/cpu/common.c
14306 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
14307
14308 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
14309
14310 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
14311 -#ifdef CONFIG_X86_64
14312 - /*
14313 - * We need valid kernel segments for data and code in long mode too
14314 - * IRET will check the segment types kkeil 2000/10/28
14315 - * Also sysret mandates a special GDT layout
14316 - *
14317 - * TLS descriptors are currently at a different place compared to i386.
14318 - * Hopefully nobody expects them at a fixed place (Wine?)
14319 - */
14320 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
14321 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
14322 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
14323 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
14324 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
14325 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
14326 -#else
14327 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
14328 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14329 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
14330 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
14331 - /*
14332 - * Segments used for calling PnP BIOS have byte granularity.
14333 - * They code segments and data segments have fixed 64k limits,
14334 - * the transfer segment sizes are set at run time.
14335 - */
14336 - /* 32-bit code */
14337 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14338 - /* 16-bit code */
14339 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14340 - /* 16-bit data */
14341 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
14342 - /* 16-bit data */
14343 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
14344 - /* 16-bit data */
14345 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
14346 - /*
14347 - * The APM segments have byte granularity and their bases
14348 - * are set at run time. All have 64k limits.
14349 - */
14350 - /* 32-bit code */
14351 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14352 - /* 16-bit code */
14353 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14354 - /* data */
14355 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
14356 -
14357 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14358 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14359 - GDT_STACK_CANARY_INIT
14360 -#endif
14361 -} };
14362 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
14363 -
14364 static int __init x86_xsave_setup(char *s)
14365 {
14366 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
14367 @@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
14368 {
14369 struct desc_ptr gdt_descr;
14370
14371 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
14372 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14373 gdt_descr.size = GDT_SIZE - 1;
14374 load_gdt(&gdt_descr);
14375 /* Reload the per-cpu base */
14376 @@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
14377 /* Filter out anything that depends on CPUID levels we don't have */
14378 filter_cpuid_features(c, true);
14379
14380 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14381 + setup_clear_cpu_cap(X86_FEATURE_SEP);
14382 +#endif
14383 +
14384 /* If the model name is still unset, do table lookup. */
14385 if (!c->x86_model_id[0]) {
14386 const char *p;
14387 @@ -980,6 +930,9 @@ static __init int setup_disablecpuid(char *arg)
14388 }
14389 __setup("clearcpuid=", setup_disablecpuid);
14390
14391 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
14392 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
14393 +
14394 #ifdef CONFIG_X86_64
14395 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
14396
14397 @@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
14398 EXPORT_PER_CPU_SYMBOL(current_task);
14399
14400 DEFINE_PER_CPU(unsigned long, kernel_stack) =
14401 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
14402 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
14403 EXPORT_PER_CPU_SYMBOL(kernel_stack);
14404
14405 DEFINE_PER_CPU(char *, irq_stack_ptr) =
14406 @@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
14407 {
14408 memset(regs, 0, sizeof(struct pt_regs));
14409 regs->fs = __KERNEL_PERCPU;
14410 - regs->gs = __KERNEL_STACK_CANARY;
14411 + savesegment(gs, regs->gs);
14412
14413 return regs;
14414 }
14415 @@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
14416 int i;
14417
14418 cpu = stack_smp_processor_id();
14419 - t = &per_cpu(init_tss, cpu);
14420 + t = init_tss + cpu;
14421 orig_ist = &per_cpu(orig_ist, cpu);
14422
14423 #ifdef CONFIG_NUMA
14424 @@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
14425 switch_to_new_gdt(cpu);
14426 loadsegment(fs, 0);
14427
14428 - load_idt((const struct desc_ptr *)&idt_descr);
14429 + load_idt(&idt_descr);
14430
14431 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
14432 syscall_init();
14433 @@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
14434 wrmsrl(MSR_KERNEL_GS_BASE, 0);
14435 barrier();
14436
14437 - check_efer();
14438 if (cpu != 0)
14439 enable_x2apic();
14440
14441 @@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
14442 {
14443 int cpu = smp_processor_id();
14444 struct task_struct *curr = current;
14445 - struct tss_struct *t = &per_cpu(init_tss, cpu);
14446 + struct tss_struct *t = init_tss + cpu;
14447 struct thread_struct *thread = &curr->thread;
14448
14449 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
14450 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
14451 index 6a77cca..4f4fca0 100644
14452 --- a/arch/x86/kernel/cpu/intel.c
14453 +++ b/arch/x86/kernel/cpu/intel.c
14454 @@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug(void)
14455 * Update the IDT descriptor and reload the IDT so that
14456 * it uses the read-only mapped virtual address.
14457 */
14458 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
14459 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
14460 load_idt(&idt_descr);
14461 }
14462 #endif
14463 diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
14464 index 417990f..96dc36b 100644
14465 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c
14466 +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
14467 @@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
14468 return ret;
14469 }
14470
14471 -static struct sysfs_ops sysfs_ops = {
14472 +static const struct sysfs_ops sysfs_ops = {
14473 .show = show,
14474 .store = store,
14475 };
14476 diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
14477 index 472763d..9831e11 100644
14478 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
14479 +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
14480 @@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *filp, const char __user *ubuf,
14481 static int inject_init(void)
14482 {
14483 printk(KERN_INFO "Machine check injector initialized\n");
14484 - mce_chrdev_ops.write = mce_write;
14485 + pax_open_kernel();
14486 + *(void **)&mce_chrdev_ops.write = mce_write;
14487 + pax_close_kernel();
14488 register_die_notifier(&mce_raise_nb);
14489 return 0;
14490 }
14491 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
14492 index 0f16a2b..21740f5 100644
14493 --- a/arch/x86/kernel/cpu/mcheck/mce.c
14494 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
14495 @@ -43,6 +43,7 @@
14496 #include <asm/ipi.h>
14497 #include <asm/mce.h>
14498 #include <asm/msr.h>
14499 +#include <asm/local.h>
14500
14501 #include "mce-internal.h"
14502
14503 @@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
14504 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
14505 m->cs, m->ip);
14506
14507 - if (m->cs == __KERNEL_CS)
14508 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
14509 print_symbol("{%s}", m->ip);
14510 pr_cont("\n");
14511 }
14512 @@ -221,10 +222,10 @@ static void print_mce_tail(void)
14513
14514 #define PANIC_TIMEOUT 5 /* 5 seconds */
14515
14516 -static atomic_t mce_paniced;
14517 +static atomic_unchecked_t mce_paniced;
14518
14519 static int fake_panic;
14520 -static atomic_t mce_fake_paniced;
14521 +static atomic_unchecked_t mce_fake_paniced;
14522
14523 /* Panic in progress. Enable interrupts and wait for final IPI */
14524 static void wait_for_panic(void)
14525 @@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14526 /*
14527 * Make sure only one CPU runs in machine check panic
14528 */
14529 - if (atomic_inc_return(&mce_paniced) > 1)
14530 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
14531 wait_for_panic();
14532 barrier();
14533
14534 @@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14535 console_verbose();
14536 } else {
14537 /* Don't log too much for fake panic */
14538 - if (atomic_inc_return(&mce_fake_paniced) > 1)
14539 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
14540 return;
14541 }
14542 print_mce_head();
14543 @@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
14544 * might have been modified by someone else.
14545 */
14546 rmb();
14547 - if (atomic_read(&mce_paniced))
14548 + if (atomic_read_unchecked(&mce_paniced))
14549 wait_for_panic();
14550 if (!monarch_timeout)
14551 goto out;
14552 @@ -1394,7 +1395,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
14553 }
14554
14555 /* Call the installed machine check handler for this CPU setup. */
14556 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
14557 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
14558 unexpected_machine_check;
14559
14560 /*
14561 @@ -1416,7 +1417,9 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
14562 return;
14563 }
14564
14565 + pax_open_kernel();
14566 machine_check_vector = do_machine_check;
14567 + pax_close_kernel();
14568
14569 mce_init();
14570 mce_cpu_features(c);
14571 @@ -1429,14 +1432,14 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
14572 */
14573
14574 static DEFINE_SPINLOCK(mce_state_lock);
14575 -static int open_count; /* #times opened */
14576 +static local_t open_count; /* #times opened */
14577 static int open_exclu; /* already open exclusive? */
14578
14579 static int mce_open(struct inode *inode, struct file *file)
14580 {
14581 spin_lock(&mce_state_lock);
14582
14583 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
14584 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
14585 spin_unlock(&mce_state_lock);
14586
14587 return -EBUSY;
14588 @@ -1444,7 +1447,7 @@ static int mce_open(struct inode *inode, struct file *file)
14589
14590 if (file->f_flags & O_EXCL)
14591 open_exclu = 1;
14592 - open_count++;
14593 + local_inc(&open_count);
14594
14595 spin_unlock(&mce_state_lock);
14596
14597 @@ -1455,7 +1458,7 @@ static int mce_release(struct inode *inode, struct file *file)
14598 {
14599 spin_lock(&mce_state_lock);
14600
14601 - open_count--;
14602 + local_dec(&open_count);
14603 open_exclu = 0;
14604
14605 spin_unlock(&mce_state_lock);
14606 @@ -2082,7 +2085,7 @@ struct dentry *mce_get_debugfs_dir(void)
14607 static void mce_reset(void)
14608 {
14609 cpu_missing = 0;
14610 - atomic_set(&mce_fake_paniced, 0);
14611 + atomic_set_unchecked(&mce_fake_paniced, 0);
14612 atomic_set(&mce_executing, 0);
14613 atomic_set(&mce_callin, 0);
14614 atomic_set(&global_nwo, 0);
14615 diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
14616 index ef3cd31..9d2f6ab 100644
14617 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
14618 +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
14619 @@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
14620 return ret;
14621 }
14622
14623 -static struct sysfs_ops threshold_ops = {
14624 +static const struct sysfs_ops threshold_ops = {
14625 .show = show,
14626 .store = store,
14627 };
14628 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
14629 index 5c0e653..0882b0a 100644
14630 --- a/arch/x86/kernel/cpu/mcheck/p5.c
14631 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
14632 @@ -12,6 +12,7 @@
14633 #include <asm/system.h>
14634 #include <asm/mce.h>
14635 #include <asm/msr.h>
14636 +#include <asm/pgtable.h>
14637
14638 /* By default disabled */
14639 int mce_p5_enabled __read_mostly;
14640 @@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
14641 if (!cpu_has(c, X86_FEATURE_MCE))
14642 return;
14643
14644 + pax_open_kernel();
14645 machine_check_vector = pentium_machine_check;
14646 + pax_close_kernel();
14647 /* Make sure the vector pointer is visible before we enable MCEs: */
14648 wmb();
14649
14650 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
14651 index 54060f5..c1a7577 100644
14652 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
14653 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
14654 @@ -11,6 +11,7 @@
14655 #include <asm/system.h>
14656 #include <asm/mce.h>
14657 #include <asm/msr.h>
14658 +#include <asm/pgtable.h>
14659
14660 /* Machine check handler for WinChip C6: */
14661 static void winchip_machine_check(struct pt_regs *regs, long error_code)
14662 @@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
14663 {
14664 u32 lo, hi;
14665
14666 + pax_open_kernel();
14667 machine_check_vector = winchip_machine_check;
14668 + pax_close_kernel();
14669 /* Make sure the vector pointer is visible before we enable MCEs: */
14670 wmb();
14671
14672 diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
14673 index 33af141..92ba9cd 100644
14674 --- a/arch/x86/kernel/cpu/mtrr/amd.c
14675 +++ b/arch/x86/kernel/cpu/mtrr/amd.c
14676 @@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
14677 return 0;
14678 }
14679
14680 -static struct mtrr_ops amd_mtrr_ops = {
14681 +static const struct mtrr_ops amd_mtrr_ops = {
14682 .vendor = X86_VENDOR_AMD,
14683 .set = amd_set_mtrr,
14684 .get = amd_get_mtrr,
14685 diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
14686 index de89f14..316fe3e 100644
14687 --- a/arch/x86/kernel/cpu/mtrr/centaur.c
14688 +++ b/arch/x86/kernel/cpu/mtrr/centaur.c
14689 @@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
14690 return 0;
14691 }
14692
14693 -static struct mtrr_ops centaur_mtrr_ops = {
14694 +static const struct mtrr_ops centaur_mtrr_ops = {
14695 .vendor = X86_VENDOR_CENTAUR,
14696 .set = centaur_set_mcr,
14697 .get = centaur_get_mcr,
14698 diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
14699 index 228d982..68a3343 100644
14700 --- a/arch/x86/kernel/cpu/mtrr/cyrix.c
14701 +++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
14702 @@ -265,7 +265,7 @@ static void cyrix_set_all(void)
14703 post_set();
14704 }
14705
14706 -static struct mtrr_ops cyrix_mtrr_ops = {
14707 +static const struct mtrr_ops cyrix_mtrr_ops = {
14708 .vendor = X86_VENDOR_CYRIX,
14709 .set_all = cyrix_set_all,
14710 .set = cyrix_set_arr,
14711 diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
14712 index 55da0c5..4d75584 100644
14713 --- a/arch/x86/kernel/cpu/mtrr/generic.c
14714 +++ b/arch/x86/kernel/cpu/mtrr/generic.c
14715 @@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
14716 /*
14717 * Generic structure...
14718 */
14719 -struct mtrr_ops generic_mtrr_ops = {
14720 +const struct mtrr_ops generic_mtrr_ops = {
14721 .use_intel_if = 1,
14722 .set_all = generic_set_all,
14723 .get = generic_get_mtrr,
14724 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14725 index fd60f09..c94ef52 100644
14726 --- a/arch/x86/kernel/cpu/mtrr/main.c
14727 +++ b/arch/x86/kernel/cpu/mtrr/main.c
14728 @@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
14729 u64 size_or_mask, size_and_mask;
14730 static bool mtrr_aps_delayed_init;
14731
14732 -static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
14733 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14734
14735 -struct mtrr_ops *mtrr_if;
14736 +const struct mtrr_ops *mtrr_if;
14737
14738 static void set_mtrr(unsigned int reg, unsigned long base,
14739 unsigned long size, mtrr_type type);
14740
14741 -void set_mtrr_ops(struct mtrr_ops *ops)
14742 +void set_mtrr_ops(const struct mtrr_ops *ops)
14743 {
14744 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
14745 mtrr_ops[ops->vendor] = ops;
14746 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14747 index a501dee..816c719 100644
14748 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14749 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14750 @@ -25,14 +25,14 @@ struct mtrr_ops {
14751 int (*validate_add_page)(unsigned long base, unsigned long size,
14752 unsigned int type);
14753 int (*have_wrcomb)(void);
14754 -};
14755 +} __do_const;
14756
14757 extern int generic_get_free_region(unsigned long base, unsigned long size,
14758 int replace_reg);
14759 extern int generic_validate_add_page(unsigned long base, unsigned long size,
14760 unsigned int type);
14761
14762 -extern struct mtrr_ops generic_mtrr_ops;
14763 +extern const struct mtrr_ops generic_mtrr_ops;
14764
14765 extern int positive_have_wrcomb(void);
14766
14767 @@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index,
14768 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
14769 void get_mtrr_state(void);
14770
14771 -extern void set_mtrr_ops(struct mtrr_ops *ops);
14772 +extern void set_mtrr_ops(const struct mtrr_ops *ops);
14773
14774 extern u64 size_or_mask, size_and_mask;
14775 -extern struct mtrr_ops *mtrr_if;
14776 +extern const struct mtrr_ops *mtrr_if;
14777
14778 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
14779 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
14780 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14781 index 0ff02ca..fc49a60 100644
14782 --- a/arch/x86/kernel/cpu/perf_event.c
14783 +++ b/arch/x86/kernel/cpu/perf_event.c
14784 @@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event *event,
14785 * count to the generic event atomically:
14786 */
14787 again:
14788 - prev_raw_count = atomic64_read(&hwc->prev_count);
14789 + prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
14790 rdmsrl(hwc->event_base + idx, new_raw_count);
14791
14792 - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
14793 + if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
14794 new_raw_count) != prev_raw_count)
14795 goto again;
14796
14797 @@ -741,7 +741,7 @@ again:
14798 delta = (new_raw_count << shift) - (prev_raw_count << shift);
14799 delta >>= shift;
14800
14801 - atomic64_add(delta, &event->count);
14802 + atomic64_add_unchecked(delta, &event->count);
14803 atomic64_sub(delta, &hwc->period_left);
14804
14805 return new_raw_count;
14806 @@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_event *event,
14807 * The hw event starts counting from this event offset,
14808 * mark it to be able to extra future deltas:
14809 */
14810 - atomic64_set(&hwc->prev_count, (u64)-left);
14811 + atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
14812
14813 err = checking_wrmsrl(hwc->event_base + idx,
14814 (u64)(-left) & x86_pmu.event_mask);
14815 @@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
14816 break;
14817
14818 callchain_store(entry, frame.return_address);
14819 - fp = frame.next_frame;
14820 + fp = (__force const void __user *)frame.next_frame;
14821 }
14822 }
14823
14824 diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
14825 index 898df97..9e82503 100644
14826 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c
14827 +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
14828 @@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
14829
14830 /* Interface defining a CPU specific perfctr watchdog */
14831 struct wd_ops {
14832 - int (*reserve)(void);
14833 - void (*unreserve)(void);
14834 - int (*setup)(unsigned nmi_hz);
14835 - void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14836 - void (*stop)(void);
14837 + int (* const reserve)(void);
14838 + void (* const unreserve)(void);
14839 + int (* const setup)(unsigned nmi_hz);
14840 + void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14841 + void (* const stop)(void);
14842 unsigned perfctr;
14843 unsigned evntsel;
14844 u64 checkbit;
14845 @@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
14846 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
14847 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
14848
14849 +/* cannot be const */
14850 static struct wd_ops intel_arch_wd_ops;
14851
14852 static int setup_intel_arch_watchdog(unsigned nmi_hz)
14853 @@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
14854 return 1;
14855 }
14856
14857 +/* cannot be const */
14858 static struct wd_ops intel_arch_wd_ops __read_mostly = {
14859 .reserve = single_msr_reserve,
14860 .unreserve = single_msr_unreserve,
14861 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14862 index ff95824..2ffdcb5 100644
14863 --- a/arch/x86/kernel/crash.c
14864 +++ b/arch/x86/kernel/crash.c
14865 @@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
14866 regs = args->regs;
14867
14868 #ifdef CONFIG_X86_32
14869 - if (!user_mode_vm(regs)) {
14870 + if (!user_mode(regs)) {
14871 crash_fixup_ss_esp(&fixed_regs, regs);
14872 regs = &fixed_regs;
14873 }
14874 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14875 index 37250fe..bf2ec74 100644
14876 --- a/arch/x86/kernel/doublefault_32.c
14877 +++ b/arch/x86/kernel/doublefault_32.c
14878 @@ -11,7 +11,7 @@
14879
14880 #define DOUBLEFAULT_STACKSIZE (1024)
14881 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14882 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14883 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14884
14885 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14886
14887 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
14888 unsigned long gdt, tss;
14889
14890 store_gdt(&gdt_desc);
14891 - gdt = gdt_desc.address;
14892 + gdt = (unsigned long)gdt_desc.address;
14893
14894 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14895
14896 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14897 /* 0x2 bit is always set */
14898 .flags = X86_EFLAGS_SF | 0x2,
14899 .sp = STACK_START,
14900 - .es = __USER_DS,
14901 + .es = __KERNEL_DS,
14902 .cs = __KERNEL_CS,
14903 .ss = __KERNEL_DS,
14904 - .ds = __USER_DS,
14905 + .ds = __KERNEL_DS,
14906 .fs = __KERNEL_PERCPU,
14907
14908 .__cr3 = __pa_nodebug(swapper_pg_dir),
14909 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14910 index 2d8a371..4fa6ae6 100644
14911 --- a/arch/x86/kernel/dumpstack.c
14912 +++ b/arch/x86/kernel/dumpstack.c
14913 @@ -2,6 +2,9 @@
14914 * Copyright (C) 1991, 1992 Linus Torvalds
14915 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14916 */
14917 +#ifdef CONFIG_GRKERNSEC_HIDESYM
14918 +#define __INCLUDED_BY_HIDESYM 1
14919 +#endif
14920 #include <linux/kallsyms.h>
14921 #include <linux/kprobes.h>
14922 #include <linux/uaccess.h>
14923 @@ -28,7 +31,7 @@ static int die_counter;
14924
14925 void printk_address(unsigned long address, int reliable)
14926 {
14927 - printk(" [<%p>] %s%pS\n", (void *) address,
14928 + printk(" [<%p>] %s%pA\n", (void *) address,
14929 reliable ? "" : "? ", (void *) address);
14930 }
14931
14932 @@ -36,9 +39,8 @@ void printk_address(unsigned long address, int reliable)
14933 static void
14934 print_ftrace_graph_addr(unsigned long addr, void *data,
14935 const struct stacktrace_ops *ops,
14936 - struct thread_info *tinfo, int *graph)
14937 + struct task_struct *task, int *graph)
14938 {
14939 - struct task_struct *task = tinfo->task;
14940 unsigned long ret_addr;
14941 int index = task->curr_ret_stack;
14942
14943 @@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14944 static inline void
14945 print_ftrace_graph_addr(unsigned long addr, void *data,
14946 const struct stacktrace_ops *ops,
14947 - struct thread_info *tinfo, int *graph)
14948 + struct task_struct *task, int *graph)
14949 { }
14950 #endif
14951
14952 @@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14953 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14954 */
14955
14956 -static inline int valid_stack_ptr(struct thread_info *tinfo,
14957 - void *p, unsigned int size, void *end)
14958 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14959 {
14960 - void *t = tinfo;
14961 if (end) {
14962 if (p < end && p >= (end-THREAD_SIZE))
14963 return 1;
14964 @@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14965 }
14966
14967 unsigned long
14968 -print_context_stack(struct thread_info *tinfo,
14969 +print_context_stack(struct task_struct *task, void *stack_start,
14970 unsigned long *stack, unsigned long bp,
14971 const struct stacktrace_ops *ops, void *data,
14972 unsigned long *end, int *graph)
14973 {
14974 struct stack_frame *frame = (struct stack_frame *)bp;
14975
14976 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14977 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14978 unsigned long addr;
14979
14980 addr = *stack;
14981 @@ -103,7 +103,7 @@ print_context_stack(struct thread_info *tinfo,
14982 } else {
14983 ops->address(data, addr, 0);
14984 }
14985 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14986 + print_ftrace_graph_addr(addr, data, ops, task, graph);
14987 }
14988 stack++;
14989 }
14990 @@ -180,7 +180,7 @@ void dump_stack(void)
14991 #endif
14992
14993 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14994 - current->pid, current->comm, print_tainted(),
14995 + task_pid_nr(current), current->comm, print_tainted(),
14996 init_utsname()->release,
14997 (int)strcspn(init_utsname()->version, " "),
14998 init_utsname()->version);
14999 @@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
15000 return flags;
15001 }
15002
15003 +extern void gr_handle_kernel_exploit(void);
15004 +
15005 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
15006 {
15007 if (regs && kexec_should_crash(current))
15008 @@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
15009 panic("Fatal exception in interrupt");
15010 if (panic_on_oops)
15011 panic("Fatal exception");
15012 - do_exit(signr);
15013 +
15014 + gr_handle_kernel_exploit();
15015 +
15016 + do_group_exit(signr);
15017 }
15018
15019 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
15020 @@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
15021 unsigned long flags = oops_begin();
15022 int sig = SIGSEGV;
15023
15024 - if (!user_mode_vm(regs))
15025 + if (!user_mode(regs))
15026 report_bug(regs->ip, regs);
15027
15028 if (__die(str, regs, err))
15029 diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
15030 index 81086c2..13e8b17 100644
15031 --- a/arch/x86/kernel/dumpstack.h
15032 +++ b/arch/x86/kernel/dumpstack.h
15033 @@ -15,7 +15,7 @@
15034 #endif
15035
15036 extern unsigned long
15037 -print_context_stack(struct thread_info *tinfo,
15038 +print_context_stack(struct task_struct *task, void *stack_start,
15039 unsigned long *stack, unsigned long bp,
15040 const struct stacktrace_ops *ops, void *data,
15041 unsigned long *end, int *graph);
15042 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
15043 index f7dd2a7..504f53b 100644
15044 --- a/arch/x86/kernel/dumpstack_32.c
15045 +++ b/arch/x86/kernel/dumpstack_32.c
15046 @@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15047 #endif
15048
15049 for (;;) {
15050 - struct thread_info *context;
15051 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
15052 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
15053
15054 - context = (struct thread_info *)
15055 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
15056 - bp = print_context_stack(context, stack, bp, ops,
15057 - data, NULL, &graph);
15058 -
15059 - stack = (unsigned long *)context->previous_esp;
15060 - if (!stack)
15061 + if (stack_start == task_stack_page(task))
15062 break;
15063 + stack = *(unsigned long **)stack_start;
15064 if (ops->stack(data, "IRQ") < 0)
15065 break;
15066 touch_nmi_watchdog();
15067 @@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs)
15068 * When in-kernel, we also print out the stack and code at the
15069 * time of the fault..
15070 */
15071 - if (!user_mode_vm(regs)) {
15072 + if (!user_mode(regs)) {
15073 unsigned int code_prologue = code_bytes * 43 / 64;
15074 unsigned int code_len = code_bytes;
15075 unsigned char c;
15076 u8 *ip;
15077 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
15078
15079 printk(KERN_EMERG "Stack:\n");
15080 show_stack_log_lvl(NULL, regs, &regs->sp,
15081 @@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs)
15082
15083 printk(KERN_EMERG "Code: ");
15084
15085 - ip = (u8 *)regs->ip - code_prologue;
15086 + ip = (u8 *)regs->ip - code_prologue + cs_base;
15087 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
15088 /* try starting at IP */
15089 - ip = (u8 *)regs->ip;
15090 + ip = (u8 *)regs->ip + cs_base;
15091 code_len = code_len - code_prologue + 1;
15092 }
15093 for (i = 0; i < code_len; i++, ip++) {
15094 @@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs)
15095 printk(" Bad EIP value.");
15096 break;
15097 }
15098 - if (ip == (u8 *)regs->ip)
15099 + if (ip == (u8 *)regs->ip + cs_base)
15100 printk("<%02x> ", c);
15101 else
15102 printk("%02x ", c);
15103 @@ -145,10 +142,23 @@ void show_registers(struct pt_regs *regs)
15104 printk("\n");
15105 }
15106
15107 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15108 +void pax_check_alloca(unsigned long size)
15109 +{
15110 + unsigned long sp = (unsigned long)&sp, stack_left;
15111 +
15112 + /* all kernel stacks are of the same size */
15113 + stack_left = sp & (THREAD_SIZE - 1);
15114 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
15115 +}
15116 +EXPORT_SYMBOL(pax_check_alloca);
15117 +#endif
15118 +
15119 int is_valid_bugaddr(unsigned long ip)
15120 {
15121 unsigned short ud2;
15122
15123 + ip = ktla_ktva(ip);
15124 if (ip < PAGE_OFFSET)
15125 return 0;
15126 if (probe_kernel_address((unsigned short *)ip, ud2))
15127 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
15128 index a071e6b..36cd585 100644
15129 --- a/arch/x86/kernel/dumpstack_64.c
15130 +++ b/arch/x86/kernel/dumpstack_64.c
15131 @@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15132 unsigned long *irq_stack_end =
15133 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
15134 unsigned used = 0;
15135 - struct thread_info *tinfo;
15136 int graph = 0;
15137 + void *stack_start;
15138
15139 if (!task)
15140 task = current;
15141 @@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15142 * current stack address. If the stacks consist of nested
15143 * exceptions
15144 */
15145 - tinfo = task_thread_info(task);
15146 for (;;) {
15147 char *id;
15148 unsigned long *estack_end;
15149 +
15150 estack_end = in_exception_stack(cpu, (unsigned long)stack,
15151 &used, &id);
15152
15153 @@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15154 if (ops->stack(data, id) < 0)
15155 break;
15156
15157 - bp = print_context_stack(tinfo, stack, bp, ops,
15158 + bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
15159 data, estack_end, &graph);
15160 ops->stack(data, "<EOE>");
15161 /*
15162 @@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15163 if (stack >= irq_stack && stack < irq_stack_end) {
15164 if (ops->stack(data, "IRQ") < 0)
15165 break;
15166 - bp = print_context_stack(tinfo, stack, bp,
15167 + bp = print_context_stack(task, irq_stack, stack, bp,
15168 ops, data, irq_stack_end, &graph);
15169 /*
15170 * We link to the next stack (which would be
15171 @@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15172 /*
15173 * This handles the process stack:
15174 */
15175 - bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
15176 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
15177 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
15178 put_cpu();
15179 }
15180 EXPORT_SYMBOL(dump_trace);
15181 @@ -304,3 +305,50 @@ int is_valid_bugaddr(unsigned long ip)
15182 return ud2 == 0x0b0f;
15183 }
15184
15185 +
15186 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15187 +void pax_check_alloca(unsigned long size)
15188 +{
15189 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
15190 + unsigned cpu, used;
15191 + char *id;
15192 +
15193 + /* check the process stack first */
15194 + stack_start = (unsigned long)task_stack_page(current);
15195 + stack_end = stack_start + THREAD_SIZE;
15196 + if (likely(stack_start <= sp && sp < stack_end)) {
15197 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
15198 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
15199 + return;
15200 + }
15201 +
15202 + cpu = get_cpu();
15203 +
15204 + /* check the irq stacks */
15205 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
15206 + stack_start = stack_end - IRQ_STACK_SIZE;
15207 + if (stack_start <= sp && sp < stack_end) {
15208 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
15209 + put_cpu();
15210 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
15211 + return;
15212 + }
15213 +
15214 + /* check the exception stacks */
15215 + used = 0;
15216 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
15217 + stack_start = stack_end - EXCEPTION_STKSZ;
15218 + if (stack_end && stack_start <= sp && sp < stack_end) {
15219 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
15220 + put_cpu();
15221 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
15222 + return;
15223 + }
15224 +
15225 + put_cpu();
15226 +
15227 + /* unknown stack */
15228 + BUG();
15229 +}
15230 +EXPORT_SYMBOL(pax_check_alloca);
15231 +#endif
15232 diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
15233 index a89739a..95e0c48 100644
15234 --- a/arch/x86/kernel/e820.c
15235 +++ b/arch/x86/kernel/e820.c
15236 @@ -733,7 +733,7 @@ struct early_res {
15237 };
15238 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
15239 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
15240 - {}
15241 + { 0, 0, {0}, 0 }
15242 };
15243
15244 static int __init find_overlapped_early(u64 start, u64 end)
15245 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
15246 index b9c830c..1e41a96 100644
15247 --- a/arch/x86/kernel/early_printk.c
15248 +++ b/arch/x86/kernel/early_printk.c
15249 @@ -7,6 +7,7 @@
15250 #include <linux/pci_regs.h>
15251 #include <linux/pci_ids.h>
15252 #include <linux/errno.h>
15253 +#include <linux/sched.h>
15254 #include <asm/io.h>
15255 #include <asm/processor.h>
15256 #include <asm/fcntl.h>
15257 @@ -170,6 +171,8 @@ asmlinkage void early_printk(const char *fmt, ...)
15258 int n;
15259 va_list ap;
15260
15261 + pax_track_stack();
15262 +
15263 va_start(ap, fmt);
15264 n = vscnprintf(buf, sizeof(buf), fmt, ap);
15265 early_console->write(early_console, buf, n);
15266 diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
15267 index 5cab48e..b025f9b 100644
15268 --- a/arch/x86/kernel/efi_32.c
15269 +++ b/arch/x86/kernel/efi_32.c
15270 @@ -38,70 +38,56 @@
15271 */
15272
15273 static unsigned long efi_rt_eflags;
15274 -static pgd_t efi_bak_pg_dir_pointer[2];
15275 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
15276
15277 -void efi_call_phys_prelog(void)
15278 +void __init efi_call_phys_prelog(void)
15279 {
15280 - unsigned long cr4;
15281 - unsigned long temp;
15282 struct desc_ptr gdt_descr;
15283
15284 +#ifdef CONFIG_PAX_KERNEXEC
15285 + struct desc_struct d;
15286 +#endif
15287 +
15288 local_irq_save(efi_rt_eflags);
15289
15290 - /*
15291 - * If I don't have PAE, I should just duplicate two entries in page
15292 - * directory. If I have PAE, I just need to duplicate one entry in
15293 - * page directory.
15294 - */
15295 - cr4 = read_cr4_safe();
15296 -
15297 - if (cr4 & X86_CR4_PAE) {
15298 - efi_bak_pg_dir_pointer[0].pgd =
15299 - swapper_pg_dir[pgd_index(0)].pgd;
15300 - swapper_pg_dir[0].pgd =
15301 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
15302 - } else {
15303 - efi_bak_pg_dir_pointer[0].pgd =
15304 - swapper_pg_dir[pgd_index(0)].pgd;
15305 - efi_bak_pg_dir_pointer[1].pgd =
15306 - swapper_pg_dir[pgd_index(0x400000)].pgd;
15307 - swapper_pg_dir[pgd_index(0)].pgd =
15308 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
15309 - temp = PAGE_OFFSET + 0x400000;
15310 - swapper_pg_dir[pgd_index(0x400000)].pgd =
15311 - swapper_pg_dir[pgd_index(temp)].pgd;
15312 - }
15313 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
15314 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
15315 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
15316
15317 /*
15318 * After the lock is released, the original page table is restored.
15319 */
15320 __flush_tlb_all();
15321
15322 +#ifdef CONFIG_PAX_KERNEXEC
15323 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
15324 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
15325 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
15326 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
15327 +#endif
15328 +
15329 gdt_descr.address = __pa(get_cpu_gdt_table(0));
15330 gdt_descr.size = GDT_SIZE - 1;
15331 load_gdt(&gdt_descr);
15332 }
15333
15334 -void efi_call_phys_epilog(void)
15335 +void __init efi_call_phys_epilog(void)
15336 {
15337 - unsigned long cr4;
15338 struct desc_ptr gdt_descr;
15339
15340 +#ifdef CONFIG_PAX_KERNEXEC
15341 + struct desc_struct d;
15342 +
15343 + memset(&d, 0, sizeof d);
15344 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
15345 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
15346 +#endif
15347 +
15348 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
15349 gdt_descr.size = GDT_SIZE - 1;
15350 load_gdt(&gdt_descr);
15351
15352 - cr4 = read_cr4_safe();
15353 -
15354 - if (cr4 & X86_CR4_PAE) {
15355 - swapper_pg_dir[pgd_index(0)].pgd =
15356 - efi_bak_pg_dir_pointer[0].pgd;
15357 - } else {
15358 - swapper_pg_dir[pgd_index(0)].pgd =
15359 - efi_bak_pg_dir_pointer[0].pgd;
15360 - swapper_pg_dir[pgd_index(0x400000)].pgd =
15361 - efi_bak_pg_dir_pointer[1].pgd;
15362 - }
15363 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
15364
15365 /*
15366 * After the lock is released, the original page table is restored.
15367 diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S
15368 index fbe66e6..c5c0dd2 100644
15369 --- a/arch/x86/kernel/efi_stub_32.S
15370 +++ b/arch/x86/kernel/efi_stub_32.S
15371 @@ -6,7 +6,9 @@
15372 */
15373
15374 #include <linux/linkage.h>
15375 +#include <linux/init.h>
15376 #include <asm/page_types.h>
15377 +#include <asm/segment.h>
15378
15379 /*
15380 * efi_call_phys(void *, ...) is a function with variable parameters.
15381 @@ -20,7 +22,7 @@
15382 * service functions will comply with gcc calling convention, too.
15383 */
15384
15385 -.text
15386 +__INIT
15387 ENTRY(efi_call_phys)
15388 /*
15389 * 0. The function can only be called in Linux kernel. So CS has been
15390 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
15391 * The mapping of lower virtual memory has been created in prelog and
15392 * epilog.
15393 */
15394 - movl $1f, %edx
15395 - subl $__PAGE_OFFSET, %edx
15396 - jmp *%edx
15397 + movl $(__KERNEXEC_EFI_DS), %edx
15398 + mov %edx, %ds
15399 + mov %edx, %es
15400 + mov %edx, %ss
15401 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
15402 1:
15403
15404 /*
15405 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
15406 * parameter 2, ..., param n. To make things easy, we save the return
15407 * address of efi_call_phys in a global variable.
15408 */
15409 - popl %edx
15410 - movl %edx, saved_return_addr
15411 - /* get the function pointer into ECX*/
15412 - popl %ecx
15413 - movl %ecx, efi_rt_function_ptr
15414 - movl $2f, %edx
15415 - subl $__PAGE_OFFSET, %edx
15416 - pushl %edx
15417 + popl (saved_return_addr)
15418 + popl (efi_rt_function_ptr)
15419
15420 /*
15421 * 3. Clear PG bit in %CR0.
15422 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
15423 /*
15424 * 5. Call the physical function.
15425 */
15426 - jmp *%ecx
15427 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
15428
15429 -2:
15430 /*
15431 * 6. After EFI runtime service returns, control will return to
15432 * following instruction. We'd better readjust stack pointer first.
15433 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
15434 movl %cr0, %edx
15435 orl $0x80000000, %edx
15436 movl %edx, %cr0
15437 - jmp 1f
15438 -1:
15439 +
15440 /*
15441 * 8. Now restore the virtual mode from flat mode by
15442 * adding EIP with PAGE_OFFSET.
15443 */
15444 - movl $1f, %edx
15445 - jmp *%edx
15446 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
15447 1:
15448 + movl $(__KERNEL_DS), %edx
15449 + mov %edx, %ds
15450 + mov %edx, %es
15451 + mov %edx, %ss
15452
15453 /*
15454 * 9. Balance the stack. And because EAX contain the return value,
15455 * we'd better not clobber it.
15456 */
15457 - leal efi_rt_function_ptr, %edx
15458 - movl (%edx), %ecx
15459 - pushl %ecx
15460 + pushl (efi_rt_function_ptr)
15461
15462 /*
15463 - * 10. Push the saved return address onto the stack and return.
15464 + * 10. Return to the saved return address.
15465 */
15466 - leal saved_return_addr, %edx
15467 - movl (%edx), %ecx
15468 - pushl %ecx
15469 - ret
15470 + jmpl *(saved_return_addr)
15471 ENDPROC(efi_call_phys)
15472 .previous
15473
15474 -.data
15475 +__INITDATA
15476 saved_return_addr:
15477 .long 0
15478 efi_rt_function_ptr:
15479 diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
15480 index 4c07cca..2c8427d 100644
15481 --- a/arch/x86/kernel/efi_stub_64.S
15482 +++ b/arch/x86/kernel/efi_stub_64.S
15483 @@ -7,6 +7,7 @@
15484 */
15485
15486 #include <linux/linkage.h>
15487 +#include <asm/alternative-asm.h>
15488
15489 #define SAVE_XMM \
15490 mov %rsp, %rax; \
15491 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
15492 call *%rdi
15493 addq $32, %rsp
15494 RESTORE_XMM
15495 + pax_force_retaddr 0, 1
15496 ret
15497 ENDPROC(efi_call0)
15498
15499 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
15500 call *%rdi
15501 addq $32, %rsp
15502 RESTORE_XMM
15503 + pax_force_retaddr 0, 1
15504 ret
15505 ENDPROC(efi_call1)
15506
15507 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
15508 call *%rdi
15509 addq $32, %rsp
15510 RESTORE_XMM
15511 + pax_force_retaddr 0, 1
15512 ret
15513 ENDPROC(efi_call2)
15514
15515 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
15516 call *%rdi
15517 addq $32, %rsp
15518 RESTORE_XMM
15519 + pax_force_retaddr 0, 1
15520 ret
15521 ENDPROC(efi_call3)
15522
15523 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
15524 call *%rdi
15525 addq $32, %rsp
15526 RESTORE_XMM
15527 + pax_force_retaddr 0, 1
15528 ret
15529 ENDPROC(efi_call4)
15530
15531 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
15532 call *%rdi
15533 addq $48, %rsp
15534 RESTORE_XMM
15535 + pax_force_retaddr 0, 1
15536 ret
15537 ENDPROC(efi_call5)
15538
15539 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
15540 call *%rdi
15541 addq $48, %rsp
15542 RESTORE_XMM
15543 + pax_force_retaddr 0, 1
15544 ret
15545 ENDPROC(efi_call6)
15546 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
15547 index c097e7d..c689cf4 100644
15548 --- a/arch/x86/kernel/entry_32.S
15549 +++ b/arch/x86/kernel/entry_32.S
15550 @@ -185,13 +185,146 @@
15551 /*CFI_REL_OFFSET gs, PT_GS*/
15552 .endm
15553 .macro SET_KERNEL_GS reg
15554 +
15555 +#ifdef CONFIG_CC_STACKPROTECTOR
15556 movl $(__KERNEL_STACK_CANARY), \reg
15557 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
15558 + movl $(__USER_DS), \reg
15559 +#else
15560 + xorl \reg, \reg
15561 +#endif
15562 +
15563 movl \reg, %gs
15564 .endm
15565
15566 #endif /* CONFIG_X86_32_LAZY_GS */
15567
15568 -.macro SAVE_ALL
15569 +.macro pax_enter_kernel
15570 +#ifdef CONFIG_PAX_KERNEXEC
15571 + call pax_enter_kernel
15572 +#endif
15573 +.endm
15574 +
15575 +.macro pax_exit_kernel
15576 +#ifdef CONFIG_PAX_KERNEXEC
15577 + call pax_exit_kernel
15578 +#endif
15579 +.endm
15580 +
15581 +#ifdef CONFIG_PAX_KERNEXEC
15582 +ENTRY(pax_enter_kernel)
15583 +#ifdef CONFIG_PARAVIRT
15584 + pushl %eax
15585 + pushl %ecx
15586 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
15587 + mov %eax, %esi
15588 +#else
15589 + mov %cr0, %esi
15590 +#endif
15591 + bts $16, %esi
15592 + jnc 1f
15593 + mov %cs, %esi
15594 + cmp $__KERNEL_CS, %esi
15595 + jz 3f
15596 + ljmp $__KERNEL_CS, $3f
15597 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
15598 +2:
15599 +#ifdef CONFIG_PARAVIRT
15600 + mov %esi, %eax
15601 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
15602 +#else
15603 + mov %esi, %cr0
15604 +#endif
15605 +3:
15606 +#ifdef CONFIG_PARAVIRT
15607 + popl %ecx
15608 + popl %eax
15609 +#endif
15610 + ret
15611 +ENDPROC(pax_enter_kernel)
15612 +
15613 +ENTRY(pax_exit_kernel)
15614 +#ifdef CONFIG_PARAVIRT
15615 + pushl %eax
15616 + pushl %ecx
15617 +#endif
15618 + mov %cs, %esi
15619 + cmp $__KERNEXEC_KERNEL_CS, %esi
15620 + jnz 2f
15621 +#ifdef CONFIG_PARAVIRT
15622 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
15623 + mov %eax, %esi
15624 +#else
15625 + mov %cr0, %esi
15626 +#endif
15627 + btr $16, %esi
15628 + ljmp $__KERNEL_CS, $1f
15629 +1:
15630 +#ifdef CONFIG_PARAVIRT
15631 + mov %esi, %eax
15632 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
15633 +#else
15634 + mov %esi, %cr0
15635 +#endif
15636 +2:
15637 +#ifdef CONFIG_PARAVIRT
15638 + popl %ecx
15639 + popl %eax
15640 +#endif
15641 + ret
15642 +ENDPROC(pax_exit_kernel)
15643 +#endif
15644 +
15645 +.macro pax_erase_kstack
15646 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15647 + call pax_erase_kstack
15648 +#endif
15649 +.endm
15650 +
15651 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15652 +/*
15653 + * ebp: thread_info
15654 + * ecx, edx: can be clobbered
15655 + */
15656 +ENTRY(pax_erase_kstack)
15657 + pushl %edi
15658 + pushl %eax
15659 +
15660 + mov TI_lowest_stack(%ebp), %edi
15661 + mov $-0xBEEF, %eax
15662 + std
15663 +
15664 +1: mov %edi, %ecx
15665 + and $THREAD_SIZE_asm - 1, %ecx
15666 + shr $2, %ecx
15667 + repne scasl
15668 + jecxz 2f
15669 +
15670 + cmp $2*16, %ecx
15671 + jc 2f
15672 +
15673 + mov $2*16, %ecx
15674 + repe scasl
15675 + jecxz 2f
15676 + jne 1b
15677 +
15678 +2: cld
15679 + mov %esp, %ecx
15680 + sub %edi, %ecx
15681 + shr $2, %ecx
15682 + rep stosl
15683 +
15684 + mov TI_task_thread_sp0(%ebp), %edi
15685 + sub $128, %edi
15686 + mov %edi, TI_lowest_stack(%ebp)
15687 +
15688 + popl %eax
15689 + popl %edi
15690 + ret
15691 +ENDPROC(pax_erase_kstack)
15692 +#endif
15693 +
15694 +.macro __SAVE_ALL _DS
15695 cld
15696 PUSH_GS
15697 pushl %fs
15698 @@ -224,7 +357,7 @@
15699 pushl %ebx
15700 CFI_ADJUST_CFA_OFFSET 4
15701 CFI_REL_OFFSET ebx, 0
15702 - movl $(__USER_DS), %edx
15703 + movl $\_DS, %edx
15704 movl %edx, %ds
15705 movl %edx, %es
15706 movl $(__KERNEL_PERCPU), %edx
15707 @@ -232,6 +365,15 @@
15708 SET_KERNEL_GS %edx
15709 .endm
15710
15711 +.macro SAVE_ALL
15712 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
15713 + __SAVE_ALL __KERNEL_DS
15714 + pax_enter_kernel
15715 +#else
15716 + __SAVE_ALL __USER_DS
15717 +#endif
15718 +.endm
15719 +
15720 .macro RESTORE_INT_REGS
15721 popl %ebx
15722 CFI_ADJUST_CFA_OFFSET -4
15723 @@ -331,7 +473,7 @@ ENTRY(ret_from_fork)
15724 CFI_ADJUST_CFA_OFFSET -4
15725 jmp syscall_exit
15726 CFI_ENDPROC
15727 -END(ret_from_fork)
15728 +ENDPROC(ret_from_fork)
15729
15730 /*
15731 * Return to user mode is not as complex as all this looks,
15732 @@ -352,7 +494,15 @@ check_userspace:
15733 movb PT_CS(%esp), %al
15734 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
15735 cmpl $USER_RPL, %eax
15736 +
15737 +#ifdef CONFIG_PAX_KERNEXEC
15738 + jae resume_userspace
15739 +
15740 + PAX_EXIT_KERNEL
15741 + jmp resume_kernel
15742 +#else
15743 jb resume_kernel # not returning to v8086 or userspace
15744 +#endif
15745
15746 ENTRY(resume_userspace)
15747 LOCKDEP_SYS_EXIT
15748 @@ -364,8 +514,8 @@ ENTRY(resume_userspace)
15749 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15750 # int/exception return?
15751 jne work_pending
15752 - jmp restore_all
15753 -END(ret_from_exception)
15754 + jmp restore_all_pax
15755 +ENDPROC(ret_from_exception)
15756
15757 #ifdef CONFIG_PREEMPT
15758 ENTRY(resume_kernel)
15759 @@ -380,7 +530,7 @@ need_resched:
15760 jz restore_all
15761 call preempt_schedule_irq
15762 jmp need_resched
15763 -END(resume_kernel)
15764 +ENDPROC(resume_kernel)
15765 #endif
15766 CFI_ENDPROC
15767
15768 @@ -414,25 +564,36 @@ sysenter_past_esp:
15769 /*CFI_REL_OFFSET cs, 0*/
15770 /*
15771 * Push current_thread_info()->sysenter_return to the stack.
15772 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15773 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
15774 */
15775 - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
15776 + pushl $0
15777 CFI_ADJUST_CFA_OFFSET 4
15778 CFI_REL_OFFSET eip, 0
15779
15780 pushl %eax
15781 CFI_ADJUST_CFA_OFFSET 4
15782 SAVE_ALL
15783 + GET_THREAD_INFO(%ebp)
15784 + movl TI_sysenter_return(%ebp),%ebp
15785 + movl %ebp,PT_EIP(%esp)
15786 ENABLE_INTERRUPTS(CLBR_NONE)
15787
15788 /*
15789 * Load the potential sixth argument from user stack.
15790 * Careful about security.
15791 */
15792 + movl PT_OLDESP(%esp),%ebp
15793 +
15794 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15795 + mov PT_OLDSS(%esp),%ds
15796 +1: movl %ds:(%ebp),%ebp
15797 + push %ss
15798 + pop %ds
15799 +#else
15800 cmpl $__PAGE_OFFSET-3,%ebp
15801 jae syscall_fault
15802 1: movl (%ebp),%ebp
15803 +#endif
15804 +
15805 movl %ebp,PT_EBP(%esp)
15806 .section __ex_table,"a"
15807 .align 4
15808 @@ -455,12 +616,24 @@ sysenter_do_call:
15809 testl $_TIF_ALLWORK_MASK, %ecx
15810 jne sysexit_audit
15811 sysenter_exit:
15812 +
15813 +#ifdef CONFIG_PAX_RANDKSTACK
15814 + pushl_cfi %eax
15815 + movl %esp, %eax
15816 + call pax_randomize_kstack
15817 + popl_cfi %eax
15818 +#endif
15819 +
15820 + pax_erase_kstack
15821 +
15822 /* if something modifies registers it must also disable sysexit */
15823 movl PT_EIP(%esp), %edx
15824 movl PT_OLDESP(%esp), %ecx
15825 xorl %ebp,%ebp
15826 TRACE_IRQS_ON
15827 1: mov PT_FS(%esp), %fs
15828 +2: mov PT_DS(%esp), %ds
15829 +3: mov PT_ES(%esp), %es
15830 PTGS_TO_GS
15831 ENABLE_INTERRUPTS_SYSEXIT
15832
15833 @@ -477,6 +650,9 @@ sysenter_audit:
15834 movl %eax,%edx /* 2nd arg: syscall number */
15835 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15836 call audit_syscall_entry
15837 +
15838 + pax_erase_kstack
15839 +
15840 pushl %ebx
15841 CFI_ADJUST_CFA_OFFSET 4
15842 movl PT_EAX(%esp),%eax /* reload syscall number */
15843 @@ -504,11 +680,17 @@ sysexit_audit:
15844
15845 CFI_ENDPROC
15846 .pushsection .fixup,"ax"
15847 -2: movl $0,PT_FS(%esp)
15848 +4: movl $0,PT_FS(%esp)
15849 + jmp 1b
15850 +5: movl $0,PT_DS(%esp)
15851 + jmp 1b
15852 +6: movl $0,PT_ES(%esp)
15853 jmp 1b
15854 .section __ex_table,"a"
15855 .align 4
15856 - .long 1b,2b
15857 + .long 1b,4b
15858 + .long 2b,5b
15859 + .long 3b,6b
15860 .popsection
15861 PTGS_TO_GS_EX
15862 ENDPROC(ia32_sysenter_target)
15863 @@ -538,6 +720,15 @@ syscall_exit:
15864 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15865 jne syscall_exit_work
15866
15867 +restore_all_pax:
15868 +
15869 +#ifdef CONFIG_PAX_RANDKSTACK
15870 + movl %esp, %eax
15871 + call pax_randomize_kstack
15872 +#endif
15873 +
15874 + pax_erase_kstack
15875 +
15876 restore_all:
15877 TRACE_IRQS_IRET
15878 restore_all_notrace:
15879 @@ -602,10 +793,29 @@ ldt_ss:
15880 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15881 mov %dx, %ax /* eax: new kernel esp */
15882 sub %eax, %edx /* offset (low word is 0) */
15883 - PER_CPU(gdt_page, %ebx)
15884 +#ifdef CONFIG_SMP
15885 + movl PER_CPU_VAR(cpu_number), %ebx
15886 + shll $PAGE_SHIFT_asm, %ebx
15887 + addl $cpu_gdt_table, %ebx
15888 +#else
15889 + movl $cpu_gdt_table, %ebx
15890 +#endif
15891 shr $16, %edx
15892 +
15893 +#ifdef CONFIG_PAX_KERNEXEC
15894 + mov %cr0, %esi
15895 + btr $16, %esi
15896 + mov %esi, %cr0
15897 +#endif
15898 +
15899 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
15900 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
15901 +
15902 +#ifdef CONFIG_PAX_KERNEXEC
15903 + bts $16, %esi
15904 + mov %esi, %cr0
15905 +#endif
15906 +
15907 pushl $__ESPFIX_SS
15908 CFI_ADJUST_CFA_OFFSET 4
15909 push %eax /* new kernel esp */
15910 @@ -636,36 +846,30 @@ work_resched:
15911 movl TI_flags(%ebp), %ecx
15912 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15913 # than syscall tracing?
15914 - jz restore_all
15915 + jz restore_all_pax
15916 testb $_TIF_NEED_RESCHED, %cl
15917 jnz work_resched
15918
15919 work_notifysig: # deal with pending signals and
15920 # notify-resume requests
15921 + movl %esp, %eax
15922 #ifdef CONFIG_VM86
15923 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15924 - movl %esp, %eax
15925 - jne work_notifysig_v86 # returning to kernel-space or
15926 + jz 1f # returning to kernel-space or
15927 # vm86-space
15928 - xorl %edx, %edx
15929 - call do_notify_resume
15930 - jmp resume_userspace_sig
15931
15932 - ALIGN
15933 -work_notifysig_v86:
15934 pushl %ecx # save ti_flags for do_notify_resume
15935 CFI_ADJUST_CFA_OFFSET 4
15936 call save_v86_state # %eax contains pt_regs pointer
15937 popl %ecx
15938 CFI_ADJUST_CFA_OFFSET -4
15939 movl %eax, %esp
15940 -#else
15941 - movl %esp, %eax
15942 +1:
15943 #endif
15944 xorl %edx, %edx
15945 call do_notify_resume
15946 jmp resume_userspace_sig
15947 -END(work_pending)
15948 +ENDPROC(work_pending)
15949
15950 # perform syscall exit tracing
15951 ALIGN
15952 @@ -673,11 +877,14 @@ syscall_trace_entry:
15953 movl $-ENOSYS,PT_EAX(%esp)
15954 movl %esp, %eax
15955 call syscall_trace_enter
15956 +
15957 + pax_erase_kstack
15958 +
15959 /* What it returned is what we'll actually use. */
15960 cmpl $(nr_syscalls), %eax
15961 jnae syscall_call
15962 jmp syscall_exit
15963 -END(syscall_trace_entry)
15964 +ENDPROC(syscall_trace_entry)
15965
15966 # perform syscall exit tracing
15967 ALIGN
15968 @@ -690,20 +897,24 @@ syscall_exit_work:
15969 movl %esp, %eax
15970 call syscall_trace_leave
15971 jmp resume_userspace
15972 -END(syscall_exit_work)
15973 +ENDPROC(syscall_exit_work)
15974 CFI_ENDPROC
15975
15976 RING0_INT_FRAME # can't unwind into user space anyway
15977 syscall_fault:
15978 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15979 + push %ss
15980 + pop %ds
15981 +#endif
15982 GET_THREAD_INFO(%ebp)
15983 movl $-EFAULT,PT_EAX(%esp)
15984 jmp resume_userspace
15985 -END(syscall_fault)
15986 +ENDPROC(syscall_fault)
15987
15988 syscall_badsys:
15989 movl $-ENOSYS,PT_EAX(%esp)
15990 jmp resume_userspace
15991 -END(syscall_badsys)
15992 +ENDPROC(syscall_badsys)
15993 CFI_ENDPROC
15994
15995 /*
15996 @@ -726,6 +937,33 @@ PTREGSCALL(rt_sigreturn)
15997 PTREGSCALL(vm86)
15998 PTREGSCALL(vm86old)
15999
16000 + ALIGN;
16001 +ENTRY(kernel_execve)
16002 + push %ebp
16003 + sub $PT_OLDSS+4,%esp
16004 + push %edi
16005 + push %ecx
16006 + push %eax
16007 + lea 3*4(%esp),%edi
16008 + mov $PT_OLDSS/4+1,%ecx
16009 + xorl %eax,%eax
16010 + rep stosl
16011 + pop %eax
16012 + pop %ecx
16013 + pop %edi
16014 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
16015 + mov %eax,PT_EBX(%esp)
16016 + mov %edx,PT_ECX(%esp)
16017 + mov %ecx,PT_EDX(%esp)
16018 + mov %esp,%eax
16019 + call sys_execve
16020 + GET_THREAD_INFO(%ebp)
16021 + test %eax,%eax
16022 + jz syscall_exit
16023 + add $PT_OLDSS+4,%esp
16024 + pop %ebp
16025 + ret
16026 +
16027 .macro FIXUP_ESPFIX_STACK
16028 /*
16029 * Switch back for ESPFIX stack to the normal zerobased stack
16030 @@ -735,7 +973,13 @@ PTREGSCALL(vm86old)
16031 * normal stack and adjusts ESP with the matching offset.
16032 */
16033 /* fixup the stack */
16034 - PER_CPU(gdt_page, %ebx)
16035 +#ifdef CONFIG_SMP
16036 + movl PER_CPU_VAR(cpu_number), %ebx
16037 + shll $PAGE_SHIFT_asm, %ebx
16038 + addl $cpu_gdt_table, %ebx
16039 +#else
16040 + movl $cpu_gdt_table, %ebx
16041 +#endif
16042 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
16043 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
16044 shl $16, %eax
16045 @@ -793,7 +1037,7 @@ vector=vector+1
16046 .endr
16047 2: jmp common_interrupt
16048 .endr
16049 -END(irq_entries_start)
16050 +ENDPROC(irq_entries_start)
16051
16052 .previous
16053 END(interrupt)
16054 @@ -840,7 +1084,7 @@ ENTRY(coprocessor_error)
16055 CFI_ADJUST_CFA_OFFSET 4
16056 jmp error_code
16057 CFI_ENDPROC
16058 -END(coprocessor_error)
16059 +ENDPROC(coprocessor_error)
16060
16061 ENTRY(simd_coprocessor_error)
16062 RING0_INT_FRAME
16063 @@ -850,7 +1094,7 @@ ENTRY(simd_coprocessor_error)
16064 CFI_ADJUST_CFA_OFFSET 4
16065 jmp error_code
16066 CFI_ENDPROC
16067 -END(simd_coprocessor_error)
16068 +ENDPROC(simd_coprocessor_error)
16069
16070 ENTRY(device_not_available)
16071 RING0_INT_FRAME
16072 @@ -860,7 +1104,7 @@ ENTRY(device_not_available)
16073 CFI_ADJUST_CFA_OFFSET 4
16074 jmp error_code
16075 CFI_ENDPROC
16076 -END(device_not_available)
16077 +ENDPROC(device_not_available)
16078
16079 #ifdef CONFIG_PARAVIRT
16080 ENTRY(native_iret)
16081 @@ -869,12 +1113,12 @@ ENTRY(native_iret)
16082 .align 4
16083 .long native_iret, iret_exc
16084 .previous
16085 -END(native_iret)
16086 +ENDPROC(native_iret)
16087
16088 ENTRY(native_irq_enable_sysexit)
16089 sti
16090 sysexit
16091 -END(native_irq_enable_sysexit)
16092 +ENDPROC(native_irq_enable_sysexit)
16093 #endif
16094
16095 ENTRY(overflow)
16096 @@ -885,7 +1129,7 @@ ENTRY(overflow)
16097 CFI_ADJUST_CFA_OFFSET 4
16098 jmp error_code
16099 CFI_ENDPROC
16100 -END(overflow)
16101 +ENDPROC(overflow)
16102
16103 ENTRY(bounds)
16104 RING0_INT_FRAME
16105 @@ -895,7 +1139,7 @@ ENTRY(bounds)
16106 CFI_ADJUST_CFA_OFFSET 4
16107 jmp error_code
16108 CFI_ENDPROC
16109 -END(bounds)
16110 +ENDPROC(bounds)
16111
16112 ENTRY(invalid_op)
16113 RING0_INT_FRAME
16114 @@ -905,7 +1149,7 @@ ENTRY(invalid_op)
16115 CFI_ADJUST_CFA_OFFSET 4
16116 jmp error_code
16117 CFI_ENDPROC
16118 -END(invalid_op)
16119 +ENDPROC(invalid_op)
16120
16121 ENTRY(coprocessor_segment_overrun)
16122 RING0_INT_FRAME
16123 @@ -915,7 +1159,7 @@ ENTRY(coprocessor_segment_overrun)
16124 CFI_ADJUST_CFA_OFFSET 4
16125 jmp error_code
16126 CFI_ENDPROC
16127 -END(coprocessor_segment_overrun)
16128 +ENDPROC(coprocessor_segment_overrun)
16129
16130 ENTRY(invalid_TSS)
16131 RING0_EC_FRAME
16132 @@ -923,7 +1167,7 @@ ENTRY(invalid_TSS)
16133 CFI_ADJUST_CFA_OFFSET 4
16134 jmp error_code
16135 CFI_ENDPROC
16136 -END(invalid_TSS)
16137 +ENDPROC(invalid_TSS)
16138
16139 ENTRY(segment_not_present)
16140 RING0_EC_FRAME
16141 @@ -931,7 +1175,7 @@ ENTRY(segment_not_present)
16142 CFI_ADJUST_CFA_OFFSET 4
16143 jmp error_code
16144 CFI_ENDPROC
16145 -END(segment_not_present)
16146 +ENDPROC(segment_not_present)
16147
16148 ENTRY(stack_segment)
16149 RING0_EC_FRAME
16150 @@ -939,7 +1183,7 @@ ENTRY(stack_segment)
16151 CFI_ADJUST_CFA_OFFSET 4
16152 jmp error_code
16153 CFI_ENDPROC
16154 -END(stack_segment)
16155 +ENDPROC(stack_segment)
16156
16157 ENTRY(alignment_check)
16158 RING0_EC_FRAME
16159 @@ -947,7 +1191,7 @@ ENTRY(alignment_check)
16160 CFI_ADJUST_CFA_OFFSET 4
16161 jmp error_code
16162 CFI_ENDPROC
16163 -END(alignment_check)
16164 +ENDPROC(alignment_check)
16165
16166 ENTRY(divide_error)
16167 RING0_INT_FRAME
16168 @@ -957,7 +1201,7 @@ ENTRY(divide_error)
16169 CFI_ADJUST_CFA_OFFSET 4
16170 jmp error_code
16171 CFI_ENDPROC
16172 -END(divide_error)
16173 +ENDPROC(divide_error)
16174
16175 #ifdef CONFIG_X86_MCE
16176 ENTRY(machine_check)
16177 @@ -968,7 +1212,7 @@ ENTRY(machine_check)
16178 CFI_ADJUST_CFA_OFFSET 4
16179 jmp error_code
16180 CFI_ENDPROC
16181 -END(machine_check)
16182 +ENDPROC(machine_check)
16183 #endif
16184
16185 ENTRY(spurious_interrupt_bug)
16186 @@ -979,7 +1223,7 @@ ENTRY(spurious_interrupt_bug)
16187 CFI_ADJUST_CFA_OFFSET 4
16188 jmp error_code
16189 CFI_ENDPROC
16190 -END(spurious_interrupt_bug)
16191 +ENDPROC(spurious_interrupt_bug)
16192
16193 ENTRY(kernel_thread_helper)
16194 pushl $0 # fake return address for unwinder
16195 @@ -1095,7 +1339,7 @@ ENDPROC(xen_failsafe_callback)
16196
16197 ENTRY(mcount)
16198 ret
16199 -END(mcount)
16200 +ENDPROC(mcount)
16201
16202 ENTRY(ftrace_caller)
16203 cmpl $0, function_trace_stop
16204 @@ -1124,7 +1368,7 @@ ftrace_graph_call:
16205 .globl ftrace_stub
16206 ftrace_stub:
16207 ret
16208 -END(ftrace_caller)
16209 +ENDPROC(ftrace_caller)
16210
16211 #else /* ! CONFIG_DYNAMIC_FTRACE */
16212
16213 @@ -1160,7 +1404,7 @@ trace:
16214 popl %ecx
16215 popl %eax
16216 jmp ftrace_stub
16217 -END(mcount)
16218 +ENDPROC(mcount)
16219 #endif /* CONFIG_DYNAMIC_FTRACE */
16220 #endif /* CONFIG_FUNCTION_TRACER */
16221
16222 @@ -1181,7 +1425,7 @@ ENTRY(ftrace_graph_caller)
16223 popl %ecx
16224 popl %eax
16225 ret
16226 -END(ftrace_graph_caller)
16227 +ENDPROC(ftrace_graph_caller)
16228
16229 .globl return_to_handler
16230 return_to_handler:
16231 @@ -1198,7 +1442,6 @@ return_to_handler:
16232 ret
16233 #endif
16234
16235 -.section .rodata,"a"
16236 #include "syscall_table_32.S"
16237
16238 syscall_table_size=(.-sys_call_table)
16239 @@ -1255,15 +1498,18 @@ error_code:
16240 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
16241 REG_TO_PTGS %ecx
16242 SET_KERNEL_GS %ecx
16243 - movl $(__USER_DS), %ecx
16244 + movl $(__KERNEL_DS), %ecx
16245 movl %ecx, %ds
16246 movl %ecx, %es
16247 +
16248 + pax_enter_kernel
16249 +
16250 TRACE_IRQS_OFF
16251 movl %esp,%eax # pt_regs pointer
16252 call *%edi
16253 jmp ret_from_exception
16254 CFI_ENDPROC
16255 -END(page_fault)
16256 +ENDPROC(page_fault)
16257
16258 /*
16259 * Debug traps and NMI can happen at the one SYSENTER instruction
16260 @@ -1309,7 +1555,7 @@ debug_stack_correct:
16261 call do_debug
16262 jmp ret_from_exception
16263 CFI_ENDPROC
16264 -END(debug)
16265 +ENDPROC(debug)
16266
16267 /*
16268 * NMI is doubly nasty. It can happen _while_ we're handling
16269 @@ -1351,6 +1597,9 @@ nmi_stack_correct:
16270 xorl %edx,%edx # zero error code
16271 movl %esp,%eax # pt_regs pointer
16272 call do_nmi
16273 +
16274 + pax_exit_kernel
16275 +
16276 jmp restore_all_notrace
16277 CFI_ENDPROC
16278
16279 @@ -1391,12 +1640,15 @@ nmi_espfix_stack:
16280 FIXUP_ESPFIX_STACK # %eax == %esp
16281 xorl %edx,%edx # zero error code
16282 call do_nmi
16283 +
16284 + pax_exit_kernel
16285 +
16286 RESTORE_REGS
16287 lss 12+4(%esp), %esp # back to espfix stack
16288 CFI_ADJUST_CFA_OFFSET -24
16289 jmp irq_return
16290 CFI_ENDPROC
16291 -END(nmi)
16292 +ENDPROC(nmi)
16293
16294 ENTRY(int3)
16295 RING0_INT_FRAME
16296 @@ -1409,7 +1661,7 @@ ENTRY(int3)
16297 call do_int3
16298 jmp ret_from_exception
16299 CFI_ENDPROC
16300 -END(int3)
16301 +ENDPROC(int3)
16302
16303 ENTRY(general_protection)
16304 RING0_EC_FRAME
16305 @@ -1417,7 +1669,7 @@ ENTRY(general_protection)
16306 CFI_ADJUST_CFA_OFFSET 4
16307 jmp error_code
16308 CFI_ENDPROC
16309 -END(general_protection)
16310 +ENDPROC(general_protection)
16311
16312 /*
16313 * End of kprobes section
16314 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
16315 index 34a56a9..87790b4 100644
16316 --- a/arch/x86/kernel/entry_64.S
16317 +++ b/arch/x86/kernel/entry_64.S
16318 @@ -53,6 +53,8 @@
16319 #include <asm/paravirt.h>
16320 #include <asm/ftrace.h>
16321 #include <asm/percpu.h>
16322 +#include <asm/pgtable.h>
16323 +#include <asm/alternative-asm.h>
16324
16325 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
16326 #include <linux/elf-em.h>
16327 @@ -64,8 +66,9 @@
16328 #ifdef CONFIG_FUNCTION_TRACER
16329 #ifdef CONFIG_DYNAMIC_FTRACE
16330 ENTRY(mcount)
16331 + pax_force_retaddr
16332 retq
16333 -END(mcount)
16334 +ENDPROC(mcount)
16335
16336 ENTRY(ftrace_caller)
16337 cmpl $0, function_trace_stop
16338 @@ -88,8 +91,9 @@ GLOBAL(ftrace_graph_call)
16339 #endif
16340
16341 GLOBAL(ftrace_stub)
16342 + pax_force_retaddr
16343 retq
16344 -END(ftrace_caller)
16345 +ENDPROC(ftrace_caller)
16346
16347 #else /* ! CONFIG_DYNAMIC_FTRACE */
16348 ENTRY(mcount)
16349 @@ -108,6 +112,7 @@ ENTRY(mcount)
16350 #endif
16351
16352 GLOBAL(ftrace_stub)
16353 + pax_force_retaddr
16354 retq
16355
16356 trace:
16357 @@ -117,12 +122,13 @@ trace:
16358 movq 8(%rbp), %rsi
16359 subq $MCOUNT_INSN_SIZE, %rdi
16360
16361 + pax_force_fptr ftrace_trace_function
16362 call *ftrace_trace_function
16363
16364 MCOUNT_RESTORE_FRAME
16365
16366 jmp ftrace_stub
16367 -END(mcount)
16368 +ENDPROC(mcount)
16369 #endif /* CONFIG_DYNAMIC_FTRACE */
16370 #endif /* CONFIG_FUNCTION_TRACER */
16371
16372 @@ -142,8 +148,9 @@ ENTRY(ftrace_graph_caller)
16373
16374 MCOUNT_RESTORE_FRAME
16375
16376 + pax_force_retaddr
16377 retq
16378 -END(ftrace_graph_caller)
16379 +ENDPROC(ftrace_graph_caller)
16380
16381 GLOBAL(return_to_handler)
16382 subq $24, %rsp
16383 @@ -159,6 +166,7 @@ GLOBAL(return_to_handler)
16384 movq 8(%rsp), %rdx
16385 movq (%rsp), %rax
16386 addq $16, %rsp
16387 + pax_force_retaddr
16388 retq
16389 #endif
16390
16391 @@ -174,6 +182,282 @@ ENTRY(native_usergs_sysret64)
16392 ENDPROC(native_usergs_sysret64)
16393 #endif /* CONFIG_PARAVIRT */
16394
16395 + .macro ljmpq sel, off
16396 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
16397 + .byte 0x48; ljmp *1234f(%rip)
16398 + .pushsection .rodata
16399 + .align 16
16400 + 1234: .quad \off; .word \sel
16401 + .popsection
16402 +#else
16403 + pushq $\sel
16404 + pushq $\off
16405 + lretq
16406 +#endif
16407 + .endm
16408 +
16409 + .macro pax_enter_kernel
16410 + pax_set_fptr_mask
16411 +#ifdef CONFIG_PAX_KERNEXEC
16412 + call pax_enter_kernel
16413 +#endif
16414 + .endm
16415 +
16416 + .macro pax_exit_kernel
16417 +#ifdef CONFIG_PAX_KERNEXEC
16418 + call pax_exit_kernel
16419 +#endif
16420 + .endm
16421 +
16422 +#ifdef CONFIG_PAX_KERNEXEC
16423 +ENTRY(pax_enter_kernel)
16424 + pushq %rdi
16425 +
16426 +#ifdef CONFIG_PARAVIRT
16427 + PV_SAVE_REGS(CLBR_RDI)
16428 +#endif
16429 +
16430 + GET_CR0_INTO_RDI
16431 + bts $16,%rdi
16432 + jnc 3f
16433 + mov %cs,%edi
16434 + cmp $__KERNEL_CS,%edi
16435 + jnz 2f
16436 +1:
16437 +
16438 +#ifdef CONFIG_PARAVIRT
16439 + PV_RESTORE_REGS(CLBR_RDI)
16440 +#endif
16441 +
16442 + popq %rdi
16443 + pax_force_retaddr
16444 + retq
16445 +
16446 +2: ljmpq __KERNEL_CS,1f
16447 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
16448 +4: SET_RDI_INTO_CR0
16449 + jmp 1b
16450 +ENDPROC(pax_enter_kernel)
16451 +
16452 +ENTRY(pax_exit_kernel)
16453 + pushq %rdi
16454 +
16455 +#ifdef CONFIG_PARAVIRT
16456 + PV_SAVE_REGS(CLBR_RDI)
16457 +#endif
16458 +
16459 + mov %cs,%rdi
16460 + cmp $__KERNEXEC_KERNEL_CS,%edi
16461 + jz 2f
16462 +1:
16463 +
16464 +#ifdef CONFIG_PARAVIRT
16465 + PV_RESTORE_REGS(CLBR_RDI);
16466 +#endif
16467 +
16468 + popq %rdi
16469 + pax_force_retaddr
16470 + retq
16471 +
16472 +2: GET_CR0_INTO_RDI
16473 + btr $16,%rdi
16474 + ljmpq __KERNEL_CS,3f
16475 +3: SET_RDI_INTO_CR0
16476 + jmp 1b
16477 +#ifdef CONFIG_PARAVIRT
16478 + PV_RESTORE_REGS(CLBR_RDI);
16479 +#endif
16480 +
16481 + popq %rdi
16482 + pax_force_retaddr
16483 + retq
16484 +ENDPROC(pax_exit_kernel)
16485 +#endif
16486 +
16487 + .macro pax_enter_kernel_user
16488 + pax_set_fptr_mask
16489 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16490 + call pax_enter_kernel_user
16491 +#endif
16492 + .endm
16493 +
16494 + .macro pax_exit_kernel_user
16495 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16496 + call pax_exit_kernel_user
16497 +#endif
16498 +#ifdef CONFIG_PAX_RANDKSTACK
16499 + pushq %rax
16500 + call pax_randomize_kstack
16501 + popq %rax
16502 +#endif
16503 + .endm
16504 +
16505 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16506 +ENTRY(pax_enter_kernel_user)
16507 + pushq %rdi
16508 + pushq %rbx
16509 +
16510 +#ifdef CONFIG_PARAVIRT
16511 + PV_SAVE_REGS(CLBR_RDI)
16512 +#endif
16513 +
16514 + GET_CR3_INTO_RDI
16515 + mov %rdi,%rbx
16516 + add $__START_KERNEL_map,%rbx
16517 + sub phys_base(%rip),%rbx
16518 +
16519 +#ifdef CONFIG_PARAVIRT
16520 + pushq %rdi
16521 + cmpl $0, pv_info+PARAVIRT_enabled
16522 + jz 1f
16523 + i = 0
16524 + .rept USER_PGD_PTRS
16525 + mov i*8(%rbx),%rsi
16526 + mov $0,%sil
16527 + lea i*8(%rbx),%rdi
16528 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
16529 + i = i + 1
16530 + .endr
16531 + jmp 2f
16532 +1:
16533 +#endif
16534 +
16535 + i = 0
16536 + .rept USER_PGD_PTRS
16537 + movb $0,i*8(%rbx)
16538 + i = i + 1
16539 + .endr
16540 +
16541 +#ifdef CONFIG_PARAVIRT
16542 +2: popq %rdi
16543 +#endif
16544 + SET_RDI_INTO_CR3
16545 +
16546 +#ifdef CONFIG_PAX_KERNEXEC
16547 + GET_CR0_INTO_RDI
16548 + bts $16,%rdi
16549 + SET_RDI_INTO_CR0
16550 +#endif
16551 +
16552 +#ifdef CONFIG_PARAVIRT
16553 + PV_RESTORE_REGS(CLBR_RDI)
16554 +#endif
16555 +
16556 + popq %rbx
16557 + popq %rdi
16558 + pax_force_retaddr
16559 + retq
16560 +ENDPROC(pax_enter_kernel_user)
16561 +
16562 +ENTRY(pax_exit_kernel_user)
16563 + push %rdi
16564 +
16565 +#ifdef CONFIG_PARAVIRT
16566 + pushq %rbx
16567 + PV_SAVE_REGS(CLBR_RDI)
16568 +#endif
16569 +
16570 +#ifdef CONFIG_PAX_KERNEXEC
16571 + GET_CR0_INTO_RDI
16572 + btr $16,%rdi
16573 + SET_RDI_INTO_CR0
16574 +#endif
16575 +
16576 + GET_CR3_INTO_RDI
16577 + add $__START_KERNEL_map,%rdi
16578 + sub phys_base(%rip),%rdi
16579 +
16580 +#ifdef CONFIG_PARAVIRT
16581 + cmpl $0, pv_info+PARAVIRT_enabled
16582 + jz 1f
16583 + mov %rdi,%rbx
16584 + i = 0
16585 + .rept USER_PGD_PTRS
16586 + mov i*8(%rbx),%rsi
16587 + mov $0x67,%sil
16588 + lea i*8(%rbx),%rdi
16589 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
16590 + i = i + 1
16591 + .endr
16592 + jmp 2f
16593 +1:
16594 +#endif
16595 +
16596 + i = 0
16597 + .rept USER_PGD_PTRS
16598 + movb $0x67,i*8(%rdi)
16599 + i = i + 1
16600 + .endr
16601 +
16602 +#ifdef CONFIG_PARAVIRT
16603 +2: PV_RESTORE_REGS(CLBR_RDI)
16604 + popq %rbx
16605 +#endif
16606 +
16607 + popq %rdi
16608 + pax_force_retaddr
16609 + retq
16610 +ENDPROC(pax_exit_kernel_user)
16611 +#endif
16612 +
16613 +.macro pax_erase_kstack
16614 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16615 + call pax_erase_kstack
16616 +#endif
16617 +.endm
16618 +
16619 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16620 +/*
16621 + * r11: thread_info
16622 + * rcx, rdx: can be clobbered
16623 + */
16624 +ENTRY(pax_erase_kstack)
16625 + pushq %rdi
16626 + pushq %rax
16627 + pushq %r11
16628 +
16629 + GET_THREAD_INFO(%r11)
16630 + mov TI_lowest_stack(%r11), %rdi
16631 + mov $-0xBEEF, %rax
16632 + std
16633 +
16634 +1: mov %edi, %ecx
16635 + and $THREAD_SIZE_asm - 1, %ecx
16636 + shr $3, %ecx
16637 + repne scasq
16638 + jecxz 2f
16639 +
16640 + cmp $2*8, %ecx
16641 + jc 2f
16642 +
16643 + mov $2*8, %ecx
16644 + repe scasq
16645 + jecxz 2f
16646 + jne 1b
16647 +
16648 +2: cld
16649 + mov %esp, %ecx
16650 + sub %edi, %ecx
16651 +
16652 + cmp $THREAD_SIZE_asm, %rcx
16653 + jb 3f
16654 + ud2
16655 +3:
16656 +
16657 + shr $3, %ecx
16658 + rep stosq
16659 +
16660 + mov TI_task_thread_sp0(%r11), %rdi
16661 + sub $256, %rdi
16662 + mov %rdi, TI_lowest_stack(%r11)
16663 +
16664 + popq %r11
16665 + popq %rax
16666 + popq %rdi
16667 + pax_force_retaddr
16668 + ret
16669 +ENDPROC(pax_erase_kstack)
16670 +#endif
16671
16672 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
16673 #ifdef CONFIG_TRACE_IRQFLAGS
16674 @@ -233,8 +517,8 @@ ENDPROC(native_usergs_sysret64)
16675 .endm
16676
16677 .macro UNFAKE_STACK_FRAME
16678 - addq $8*6, %rsp
16679 - CFI_ADJUST_CFA_OFFSET -(6*8)
16680 + addq $8*6 + ARG_SKIP, %rsp
16681 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
16682 .endm
16683
16684 /*
16685 @@ -317,7 +601,7 @@ ENTRY(save_args)
16686 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
16687 movq_cfi rbp, 8 /* push %rbp */
16688 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
16689 - testl $3, CS(%rdi)
16690 + testb $3, CS(%rdi)
16691 je 1f
16692 SWAPGS
16693 /*
16694 @@ -337,9 +621,10 @@ ENTRY(save_args)
16695 * We entered an interrupt context - irqs are off:
16696 */
16697 2: TRACE_IRQS_OFF
16698 + pax_force_retaddr
16699 ret
16700 CFI_ENDPROC
16701 -END(save_args)
16702 +ENDPROC(save_args)
16703
16704 ENTRY(save_rest)
16705 PARTIAL_FRAME 1 REST_SKIP+8
16706 @@ -352,9 +637,10 @@ ENTRY(save_rest)
16707 movq_cfi r15, R15+16
16708 movq %r11, 8(%rsp) /* return address */
16709 FIXUP_TOP_OF_STACK %r11, 16
16710 + pax_force_retaddr
16711 ret
16712 CFI_ENDPROC
16713 -END(save_rest)
16714 +ENDPROC(save_rest)
16715
16716 /* save complete stack frame */
16717 .pushsection .kprobes.text, "ax"
16718 @@ -383,9 +669,10 @@ ENTRY(save_paranoid)
16719 js 1f /* negative -> in kernel */
16720 SWAPGS
16721 xorl %ebx,%ebx
16722 -1: ret
16723 +1: pax_force_retaddr_bts
16724 + ret
16725 CFI_ENDPROC
16726 -END(save_paranoid)
16727 +ENDPROC(save_paranoid)
16728 .popsection
16729
16730 /*
16731 @@ -409,7 +696,7 @@ ENTRY(ret_from_fork)
16732
16733 RESTORE_REST
16734
16735 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16736 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16737 je int_ret_from_sys_call
16738
16739 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
16740 @@ -419,7 +706,7 @@ ENTRY(ret_from_fork)
16741 jmp ret_from_sys_call # go to the SYSRET fastpath
16742
16743 CFI_ENDPROC
16744 -END(ret_from_fork)
16745 +ENDPROC(ret_from_fork)
16746
16747 /*
16748 * System call entry. Upto 6 arguments in registers are supported.
16749 @@ -455,7 +742,7 @@ END(ret_from_fork)
16750 ENTRY(system_call)
16751 CFI_STARTPROC simple
16752 CFI_SIGNAL_FRAME
16753 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16754 + CFI_DEF_CFA rsp,0
16755 CFI_REGISTER rip,rcx
16756 /*CFI_REGISTER rflags,r11*/
16757 SWAPGS_UNSAFE_STACK
16758 @@ -468,12 +755,13 @@ ENTRY(system_call_after_swapgs)
16759
16760 movq %rsp,PER_CPU_VAR(old_rsp)
16761 movq PER_CPU_VAR(kernel_stack),%rsp
16762 + SAVE_ARGS 8*6,1
16763 + pax_enter_kernel_user
16764 /*
16765 * No need to follow this irqs off/on section - it's straight
16766 * and short:
16767 */
16768 ENABLE_INTERRUPTS(CLBR_NONE)
16769 - SAVE_ARGS 8,1
16770 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16771 movq %rcx,RIP-ARGOFFSET(%rsp)
16772 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16773 @@ -483,7 +771,7 @@ ENTRY(system_call_after_swapgs)
16774 system_call_fastpath:
16775 cmpq $__NR_syscall_max,%rax
16776 ja badsys
16777 - movq %r10,%rcx
16778 + movq R10-ARGOFFSET(%rsp),%rcx
16779 call *sys_call_table(,%rax,8) # XXX: rip relative
16780 movq %rax,RAX-ARGOFFSET(%rsp)
16781 /*
16782 @@ -502,6 +790,8 @@ sysret_check:
16783 andl %edi,%edx
16784 jnz sysret_careful
16785 CFI_REMEMBER_STATE
16786 + pax_exit_kernel_user
16787 + pax_erase_kstack
16788 /*
16789 * sysretq will re-enable interrupts:
16790 */
16791 @@ -555,14 +845,18 @@ badsys:
16792 * jump back to the normal fast path.
16793 */
16794 auditsys:
16795 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
16796 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16797 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16798 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16799 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16800 movq %rax,%rsi /* 2nd arg: syscall number */
16801 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16802 call audit_syscall_entry
16803 +
16804 + pax_erase_kstack
16805 +
16806 LOAD_ARGS 0 /* reload call-clobbered registers */
16807 + pax_set_fptr_mask
16808 jmp system_call_fastpath
16809
16810 /*
16811 @@ -592,16 +886,20 @@ tracesys:
16812 FIXUP_TOP_OF_STACK %rdi
16813 movq %rsp,%rdi
16814 call syscall_trace_enter
16815 +
16816 + pax_erase_kstack
16817 +
16818 /*
16819 * Reload arg registers from stack in case ptrace changed them.
16820 * We don't reload %rax because syscall_trace_enter() returned
16821 * the value it wants us to use in the table lookup.
16822 */
16823 LOAD_ARGS ARGOFFSET, 1
16824 + pax_set_fptr_mask
16825 RESTORE_REST
16826 cmpq $__NR_syscall_max,%rax
16827 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16828 - movq %r10,%rcx /* fixup for C */
16829 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16830 call *sys_call_table(,%rax,8)
16831 movq %rax,RAX-ARGOFFSET(%rsp)
16832 /* Use IRET because user could have changed frame */
16833 @@ -613,7 +911,7 @@ tracesys:
16834 GLOBAL(int_ret_from_sys_call)
16835 DISABLE_INTERRUPTS(CLBR_NONE)
16836 TRACE_IRQS_OFF
16837 - testl $3,CS-ARGOFFSET(%rsp)
16838 + testb $3,CS-ARGOFFSET(%rsp)
16839 je retint_restore_args
16840 movl $_TIF_ALLWORK_MASK,%edi
16841 /* edi: mask to check */
16842 @@ -624,6 +922,7 @@ GLOBAL(int_with_check)
16843 andl %edi,%edx
16844 jnz int_careful
16845 andl $~TS_COMPAT,TI_status(%rcx)
16846 + pax_erase_kstack
16847 jmp retint_swapgs
16848
16849 /* Either reschedule or signal or syscall exit tracking needed. */
16850 @@ -674,7 +973,7 @@ int_restore_rest:
16851 TRACE_IRQS_OFF
16852 jmp int_with_check
16853 CFI_ENDPROC
16854 -END(system_call)
16855 +ENDPROC(system_call)
16856
16857 /*
16858 * Certain special system calls that need to save a complete full stack frame.
16859 @@ -690,7 +989,7 @@ ENTRY(\label)
16860 call \func
16861 jmp ptregscall_common
16862 CFI_ENDPROC
16863 -END(\label)
16864 +ENDPROC(\label)
16865 .endm
16866
16867 PTREGSCALL stub_clone, sys_clone, %r8
16868 @@ -708,9 +1007,10 @@ ENTRY(ptregscall_common)
16869 movq_cfi_restore R12+8, r12
16870 movq_cfi_restore RBP+8, rbp
16871 movq_cfi_restore RBX+8, rbx
16872 + pax_force_retaddr
16873 ret $REST_SKIP /* pop extended registers */
16874 CFI_ENDPROC
16875 -END(ptregscall_common)
16876 +ENDPROC(ptregscall_common)
16877
16878 ENTRY(stub_execve)
16879 CFI_STARTPROC
16880 @@ -726,7 +1026,7 @@ ENTRY(stub_execve)
16881 RESTORE_REST
16882 jmp int_ret_from_sys_call
16883 CFI_ENDPROC
16884 -END(stub_execve)
16885 +ENDPROC(stub_execve)
16886
16887 /*
16888 * sigreturn is special because it needs to restore all registers on return.
16889 @@ -744,7 +1044,7 @@ ENTRY(stub_rt_sigreturn)
16890 RESTORE_REST
16891 jmp int_ret_from_sys_call
16892 CFI_ENDPROC
16893 -END(stub_rt_sigreturn)
16894 +ENDPROC(stub_rt_sigreturn)
16895
16896 /*
16897 * Build the entry stubs and pointer table with some assembler magic.
16898 @@ -780,7 +1080,7 @@ vector=vector+1
16899 2: jmp common_interrupt
16900 .endr
16901 CFI_ENDPROC
16902 -END(irq_entries_start)
16903 +ENDPROC(irq_entries_start)
16904
16905 .previous
16906 END(interrupt)
16907 @@ -800,6 +1100,16 @@ END(interrupt)
16908 CFI_ADJUST_CFA_OFFSET 10*8
16909 call save_args
16910 PARTIAL_FRAME 0
16911 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16912 + testb $3, CS(%rdi)
16913 + jnz 1f
16914 + pax_enter_kernel
16915 + jmp 2f
16916 +1: pax_enter_kernel_user
16917 +2:
16918 +#else
16919 + pax_enter_kernel
16920 +#endif
16921 call \func
16922 .endm
16923
16924 @@ -822,7 +1132,7 @@ ret_from_intr:
16925 CFI_ADJUST_CFA_OFFSET -8
16926 exit_intr:
16927 GET_THREAD_INFO(%rcx)
16928 - testl $3,CS-ARGOFFSET(%rsp)
16929 + testb $3,CS-ARGOFFSET(%rsp)
16930 je retint_kernel
16931
16932 /* Interrupt came from user space */
16933 @@ -844,12 +1154,15 @@ retint_swapgs: /* return to user-space */
16934 * The iretq could re-enable interrupts:
16935 */
16936 DISABLE_INTERRUPTS(CLBR_ANY)
16937 + pax_exit_kernel_user
16938 TRACE_IRQS_IRETQ
16939 SWAPGS
16940 jmp restore_args
16941
16942 retint_restore_args: /* return to kernel space */
16943 DISABLE_INTERRUPTS(CLBR_ANY)
16944 + pax_exit_kernel
16945 + pax_force_retaddr RIP-ARGOFFSET
16946 /*
16947 * The iretq could re-enable interrupts:
16948 */
16949 @@ -940,7 +1253,7 @@ ENTRY(retint_kernel)
16950 #endif
16951
16952 CFI_ENDPROC
16953 -END(common_interrupt)
16954 +ENDPROC(common_interrupt)
16955
16956 /*
16957 * APIC interrupts.
16958 @@ -953,7 +1266,7 @@ ENTRY(\sym)
16959 interrupt \do_sym
16960 jmp ret_from_intr
16961 CFI_ENDPROC
16962 -END(\sym)
16963 +ENDPROC(\sym)
16964 .endm
16965
16966 #ifdef CONFIG_SMP
16967 @@ -1032,12 +1345,22 @@ ENTRY(\sym)
16968 CFI_ADJUST_CFA_OFFSET 15*8
16969 call error_entry
16970 DEFAULT_FRAME 0
16971 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16972 + testb $3, CS(%rsp)
16973 + jnz 1f
16974 + pax_enter_kernel
16975 + jmp 2f
16976 +1: pax_enter_kernel_user
16977 +2:
16978 +#else
16979 + pax_enter_kernel
16980 +#endif
16981 movq %rsp,%rdi /* pt_regs pointer */
16982 xorl %esi,%esi /* no error code */
16983 call \do_sym
16984 jmp error_exit /* %ebx: no swapgs flag */
16985 CFI_ENDPROC
16986 -END(\sym)
16987 +ENDPROC(\sym)
16988 .endm
16989
16990 .macro paranoidzeroentry sym do_sym
16991 @@ -1049,12 +1372,22 @@ ENTRY(\sym)
16992 subq $15*8, %rsp
16993 call save_paranoid
16994 TRACE_IRQS_OFF
16995 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16996 + testb $3, CS(%rsp)
16997 + jnz 1f
16998 + pax_enter_kernel
16999 + jmp 2f
17000 +1: pax_enter_kernel_user
17001 +2:
17002 +#else
17003 + pax_enter_kernel
17004 +#endif
17005 movq %rsp,%rdi /* pt_regs pointer */
17006 xorl %esi,%esi /* no error code */
17007 call \do_sym
17008 jmp paranoid_exit /* %ebx: no swapgs flag */
17009 CFI_ENDPROC
17010 -END(\sym)
17011 +ENDPROC(\sym)
17012 .endm
17013
17014 .macro paranoidzeroentry_ist sym do_sym ist
17015 @@ -1066,15 +1399,30 @@ ENTRY(\sym)
17016 subq $15*8, %rsp
17017 call save_paranoid
17018 TRACE_IRQS_OFF
17019 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17020 + testb $3, CS(%rsp)
17021 + jnz 1f
17022 + pax_enter_kernel
17023 + jmp 2f
17024 +1: pax_enter_kernel_user
17025 +2:
17026 +#else
17027 + pax_enter_kernel
17028 +#endif
17029 movq %rsp,%rdi /* pt_regs pointer */
17030 xorl %esi,%esi /* no error code */
17031 - PER_CPU(init_tss, %rbp)
17032 +#ifdef CONFIG_SMP
17033 + imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
17034 + lea init_tss(%rbp), %rbp
17035 +#else
17036 + lea init_tss(%rip), %rbp
17037 +#endif
17038 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
17039 call \do_sym
17040 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
17041 jmp paranoid_exit /* %ebx: no swapgs flag */
17042 CFI_ENDPROC
17043 -END(\sym)
17044 +ENDPROC(\sym)
17045 .endm
17046
17047 .macro errorentry sym do_sym
17048 @@ -1085,13 +1433,23 @@ ENTRY(\sym)
17049 CFI_ADJUST_CFA_OFFSET 15*8
17050 call error_entry
17051 DEFAULT_FRAME 0
17052 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17053 + testb $3, CS(%rsp)
17054 + jnz 1f
17055 + pax_enter_kernel
17056 + jmp 2f
17057 +1: pax_enter_kernel_user
17058 +2:
17059 +#else
17060 + pax_enter_kernel
17061 +#endif
17062 movq %rsp,%rdi /* pt_regs pointer */
17063 movq ORIG_RAX(%rsp),%rsi /* get error code */
17064 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
17065 call \do_sym
17066 jmp error_exit /* %ebx: no swapgs flag */
17067 CFI_ENDPROC
17068 -END(\sym)
17069 +ENDPROC(\sym)
17070 .endm
17071
17072 /* error code is on the stack already */
17073 @@ -1104,13 +1462,23 @@ ENTRY(\sym)
17074 call save_paranoid
17075 DEFAULT_FRAME 0
17076 TRACE_IRQS_OFF
17077 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17078 + testb $3, CS(%rsp)
17079 + jnz 1f
17080 + pax_enter_kernel
17081 + jmp 2f
17082 +1: pax_enter_kernel_user
17083 +2:
17084 +#else
17085 + pax_enter_kernel
17086 +#endif
17087 movq %rsp,%rdi /* pt_regs pointer */
17088 movq ORIG_RAX(%rsp),%rsi /* get error code */
17089 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
17090 call \do_sym
17091 jmp paranoid_exit /* %ebx: no swapgs flag */
17092 CFI_ENDPROC
17093 -END(\sym)
17094 +ENDPROC(\sym)
17095 .endm
17096
17097 zeroentry divide_error do_divide_error
17098 @@ -1141,9 +1509,10 @@ gs_change:
17099 SWAPGS
17100 popf
17101 CFI_ADJUST_CFA_OFFSET -8
17102 + pax_force_retaddr
17103 ret
17104 CFI_ENDPROC
17105 -END(native_load_gs_index)
17106 +ENDPROC(native_load_gs_index)
17107
17108 .section __ex_table,"a"
17109 .align 8
17110 @@ -1193,11 +1562,12 @@ ENTRY(kernel_thread)
17111 * of hacks for example to fork off the per-CPU idle tasks.
17112 * [Hopefully no generic code relies on the reschedule -AK]
17113 */
17114 - RESTORE_ALL
17115 + RESTORE_REST
17116 UNFAKE_STACK_FRAME
17117 + pax_force_retaddr
17118 ret
17119 CFI_ENDPROC
17120 -END(kernel_thread)
17121 +ENDPROC(kernel_thread)
17122
17123 ENTRY(child_rip)
17124 pushq $0 # fake return address
17125 @@ -1208,13 +1578,14 @@ ENTRY(child_rip)
17126 */
17127 movq %rdi, %rax
17128 movq %rsi, %rdi
17129 + pax_force_fptr %rax
17130 call *%rax
17131 # exit
17132 mov %eax, %edi
17133 call do_exit
17134 ud2 # padding for call trace
17135 CFI_ENDPROC
17136 -END(child_rip)
17137 +ENDPROC(child_rip)
17138
17139 /*
17140 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
17141 @@ -1241,11 +1612,11 @@ ENTRY(kernel_execve)
17142 RESTORE_REST
17143 testq %rax,%rax
17144 je int_ret_from_sys_call
17145 - RESTORE_ARGS
17146 UNFAKE_STACK_FRAME
17147 + pax_force_retaddr
17148 ret
17149 CFI_ENDPROC
17150 -END(kernel_execve)
17151 +ENDPROC(kernel_execve)
17152
17153 /* Call softirq on interrupt stack. Interrupts are off. */
17154 ENTRY(call_softirq)
17155 @@ -1263,9 +1634,10 @@ ENTRY(call_softirq)
17156 CFI_DEF_CFA_REGISTER rsp
17157 CFI_ADJUST_CFA_OFFSET -8
17158 decl PER_CPU_VAR(irq_count)
17159 + pax_force_retaddr
17160 ret
17161 CFI_ENDPROC
17162 -END(call_softirq)
17163 +ENDPROC(call_softirq)
17164
17165 #ifdef CONFIG_XEN
17166 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
17167 @@ -1303,7 +1675,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
17168 decl PER_CPU_VAR(irq_count)
17169 jmp error_exit
17170 CFI_ENDPROC
17171 -END(xen_do_hypervisor_callback)
17172 +ENDPROC(xen_do_hypervisor_callback)
17173
17174 /*
17175 * Hypervisor uses this for application faults while it executes.
17176 @@ -1362,7 +1734,7 @@ ENTRY(xen_failsafe_callback)
17177 SAVE_ALL
17178 jmp error_exit
17179 CFI_ENDPROC
17180 -END(xen_failsafe_callback)
17181 +ENDPROC(xen_failsafe_callback)
17182
17183 #endif /* CONFIG_XEN */
17184
17185 @@ -1405,16 +1777,31 @@ ENTRY(paranoid_exit)
17186 TRACE_IRQS_OFF
17187 testl %ebx,%ebx /* swapgs needed? */
17188 jnz paranoid_restore
17189 - testl $3,CS(%rsp)
17190 + testb $3,CS(%rsp)
17191 jnz paranoid_userspace
17192 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17193 + pax_exit_kernel
17194 + TRACE_IRQS_IRETQ 0
17195 + SWAPGS_UNSAFE_STACK
17196 + RESTORE_ALL 8
17197 + pax_force_retaddr_bts
17198 + jmp irq_return
17199 +#endif
17200 paranoid_swapgs:
17201 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17202 + pax_exit_kernel_user
17203 +#else
17204 + pax_exit_kernel
17205 +#endif
17206 TRACE_IRQS_IRETQ 0
17207 SWAPGS_UNSAFE_STACK
17208 RESTORE_ALL 8
17209 jmp irq_return
17210 paranoid_restore:
17211 + pax_exit_kernel
17212 TRACE_IRQS_IRETQ 0
17213 RESTORE_ALL 8
17214 + pax_force_retaddr_bts
17215 jmp irq_return
17216 paranoid_userspace:
17217 GET_THREAD_INFO(%rcx)
17218 @@ -1443,7 +1830,7 @@ paranoid_schedule:
17219 TRACE_IRQS_OFF
17220 jmp paranoid_userspace
17221 CFI_ENDPROC
17222 -END(paranoid_exit)
17223 +ENDPROC(paranoid_exit)
17224
17225 /*
17226 * Exception entry point. This expects an error code/orig_rax on the stack.
17227 @@ -1470,12 +1857,13 @@ ENTRY(error_entry)
17228 movq_cfi r14, R14+8
17229 movq_cfi r15, R15+8
17230 xorl %ebx,%ebx
17231 - testl $3,CS+8(%rsp)
17232 + testb $3,CS+8(%rsp)
17233 je error_kernelspace
17234 error_swapgs:
17235 SWAPGS
17236 error_sti:
17237 TRACE_IRQS_OFF
17238 + pax_force_retaddr_bts
17239 ret
17240 CFI_ENDPROC
17241
17242 @@ -1497,7 +1885,7 @@ error_kernelspace:
17243 cmpq $gs_change,RIP+8(%rsp)
17244 je error_swapgs
17245 jmp error_sti
17246 -END(error_entry)
17247 +ENDPROC(error_entry)
17248
17249
17250 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
17251 @@ -1517,7 +1905,7 @@ ENTRY(error_exit)
17252 jnz retint_careful
17253 jmp retint_swapgs
17254 CFI_ENDPROC
17255 -END(error_exit)
17256 +ENDPROC(error_exit)
17257
17258
17259 /* runs on exception stack */
17260 @@ -1529,6 +1917,16 @@ ENTRY(nmi)
17261 CFI_ADJUST_CFA_OFFSET 15*8
17262 call save_paranoid
17263 DEFAULT_FRAME 0
17264 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17265 + testb $3, CS(%rsp)
17266 + jnz 1f
17267 + pax_enter_kernel
17268 + jmp 2f
17269 +1: pax_enter_kernel_user
17270 +2:
17271 +#else
17272 + pax_enter_kernel
17273 +#endif
17274 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
17275 movq %rsp,%rdi
17276 movq $-1,%rsi
17277 @@ -1539,12 +1937,28 @@ ENTRY(nmi)
17278 DISABLE_INTERRUPTS(CLBR_NONE)
17279 testl %ebx,%ebx /* swapgs needed? */
17280 jnz nmi_restore
17281 - testl $3,CS(%rsp)
17282 + testb $3,CS(%rsp)
17283 jnz nmi_userspace
17284 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17285 + pax_exit_kernel
17286 + SWAPGS_UNSAFE_STACK
17287 + RESTORE_ALL 8
17288 + pax_force_retaddr_bts
17289 + jmp irq_return
17290 +#endif
17291 nmi_swapgs:
17292 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17293 + pax_exit_kernel_user
17294 +#else
17295 + pax_exit_kernel
17296 +#endif
17297 SWAPGS_UNSAFE_STACK
17298 + RESTORE_ALL 8
17299 + jmp irq_return
17300 nmi_restore:
17301 + pax_exit_kernel
17302 RESTORE_ALL 8
17303 + pax_force_retaddr_bts
17304 jmp irq_return
17305 nmi_userspace:
17306 GET_THREAD_INFO(%rcx)
17307 @@ -1573,14 +1987,14 @@ nmi_schedule:
17308 jmp paranoid_exit
17309 CFI_ENDPROC
17310 #endif
17311 -END(nmi)
17312 +ENDPROC(nmi)
17313
17314 ENTRY(ignore_sysret)
17315 CFI_STARTPROC
17316 mov $-ENOSYS,%eax
17317 sysret
17318 CFI_ENDPROC
17319 -END(ignore_sysret)
17320 +ENDPROC(ignore_sysret)
17321
17322 /*
17323 * End of kprobes section
17324 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
17325 index 9dbb527..7b3615a 100644
17326 --- a/arch/x86/kernel/ftrace.c
17327 +++ b/arch/x86/kernel/ftrace.c
17328 @@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the IP to write to */
17329 static void *mod_code_newcode; /* holds the text to write to the IP */
17330
17331 static unsigned nmi_wait_count;
17332 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
17333 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
17334
17335 int ftrace_arch_read_dyn_info(char *buf, int size)
17336 {
17337 @@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
17338
17339 r = snprintf(buf, size, "%u %u",
17340 nmi_wait_count,
17341 - atomic_read(&nmi_update_count));
17342 + atomic_read_unchecked(&nmi_update_count));
17343 return r;
17344 }
17345
17346 @@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
17347 {
17348 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
17349 smp_rmb();
17350 + pax_open_kernel();
17351 ftrace_mod_code();
17352 - atomic_inc(&nmi_update_count);
17353 + pax_close_kernel();
17354 + atomic_inc_unchecked(&nmi_update_count);
17355 }
17356 /* Must have previous changes seen before executions */
17357 smp_mb();
17358 @@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
17359
17360
17361
17362 -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
17363 +static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
17364
17365 static unsigned char *ftrace_nop_replace(void)
17366 {
17367 @@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
17368 {
17369 unsigned char replaced[MCOUNT_INSN_SIZE];
17370
17371 + ip = ktla_ktva(ip);
17372 +
17373 /*
17374 * Note: Due to modules and __init, code can
17375 * disappear and change, we need to protect against faulting
17376 @@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
17377 unsigned char old[MCOUNT_INSN_SIZE], *new;
17378 int ret;
17379
17380 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
17381 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
17382 new = ftrace_call_replace(ip, (unsigned long)func);
17383 ret = ftrace_modify_code(ip, old, new);
17384
17385 @@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *data)
17386 switch (faulted) {
17387 case 0:
17388 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
17389 - memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
17390 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
17391 break;
17392 case 1:
17393 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
17394 - memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
17395 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
17396 break;
17397 case 2:
17398 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
17399 - memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
17400 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
17401 break;
17402 }
17403
17404 @@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long ip,
17405 {
17406 unsigned char code[MCOUNT_INSN_SIZE];
17407
17408 + ip = ktla_ktva(ip);
17409 +
17410 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
17411 return -EFAULT;
17412
17413 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
17414 index 4f8e250..df24706 100644
17415 --- a/arch/x86/kernel/head32.c
17416 +++ b/arch/x86/kernel/head32.c
17417 @@ -16,6 +16,7 @@
17418 #include <asm/apic.h>
17419 #include <asm/io_apic.h>
17420 #include <asm/bios_ebda.h>
17421 +#include <asm/boot.h>
17422
17423 static void __init i386_default_early_setup(void)
17424 {
17425 @@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
17426 {
17427 reserve_trampoline_memory();
17428
17429 - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
17430 + reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
17431
17432 #ifdef CONFIG_BLK_DEV_INITRD
17433 /* Reserve INITRD */
17434 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
17435 index 34c3308..6fc4e76 100644
17436 --- a/arch/x86/kernel/head_32.S
17437 +++ b/arch/x86/kernel/head_32.S
17438 @@ -19,10 +19,17 @@
17439 #include <asm/setup.h>
17440 #include <asm/processor-flags.h>
17441 #include <asm/percpu.h>
17442 +#include <asm/msr-index.h>
17443
17444 /* Physical address */
17445 #define pa(X) ((X) - __PAGE_OFFSET)
17446
17447 +#ifdef CONFIG_PAX_KERNEXEC
17448 +#define ta(X) (X)
17449 +#else
17450 +#define ta(X) ((X) - __PAGE_OFFSET)
17451 +#endif
17452 +
17453 /*
17454 * References to members of the new_cpu_data structure.
17455 */
17456 @@ -52,11 +59,7 @@
17457 * and small than max_low_pfn, otherwise will waste some page table entries
17458 */
17459
17460 -#if PTRS_PER_PMD > 1
17461 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
17462 -#else
17463 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
17464 -#endif
17465 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
17466
17467 /* Enough space to fit pagetables for the low memory linear map */
17468 MAPPING_BEYOND_END = \
17469 @@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
17470 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
17471
17472 /*
17473 + * Real beginning of normal "text" segment
17474 + */
17475 +ENTRY(stext)
17476 +ENTRY(_stext)
17477 +
17478 +/*
17479 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
17480 * %esi points to the real-mode code as a 32-bit pointer.
17481 * CS and DS must be 4 GB flat segments, but we don't depend on
17482 @@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
17483 * can.
17484 */
17485 __HEAD
17486 +
17487 +#ifdef CONFIG_PAX_KERNEXEC
17488 + jmp startup_32
17489 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
17490 +.fill PAGE_SIZE-5,1,0xcc
17491 +#endif
17492 +
17493 ENTRY(startup_32)
17494 + movl pa(stack_start),%ecx
17495 +
17496 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
17497 us to not reload segments */
17498 testb $(1<<6), BP_loadflags(%esi)
17499 @@ -95,7 +113,60 @@ ENTRY(startup_32)
17500 movl %eax,%es
17501 movl %eax,%fs
17502 movl %eax,%gs
17503 + movl %eax,%ss
17504 2:
17505 + leal -__PAGE_OFFSET(%ecx),%esp
17506 +
17507 +#ifdef CONFIG_SMP
17508 + movl $pa(cpu_gdt_table),%edi
17509 + movl $__per_cpu_load,%eax
17510 + movw %ax,__KERNEL_PERCPU + 2(%edi)
17511 + rorl $16,%eax
17512 + movb %al,__KERNEL_PERCPU + 4(%edi)
17513 + movb %ah,__KERNEL_PERCPU + 7(%edi)
17514 + movl $__per_cpu_end - 1,%eax
17515 + subl $__per_cpu_start,%eax
17516 + movw %ax,__KERNEL_PERCPU + 0(%edi)
17517 +#endif
17518 +
17519 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17520 + movl $NR_CPUS,%ecx
17521 + movl $pa(cpu_gdt_table),%edi
17522 +1:
17523 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
17524 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
17525 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
17526 + addl $PAGE_SIZE_asm,%edi
17527 + loop 1b
17528 +#endif
17529 +
17530 +#ifdef CONFIG_PAX_KERNEXEC
17531 + movl $pa(boot_gdt),%edi
17532 + movl $__LOAD_PHYSICAL_ADDR,%eax
17533 + movw %ax,__BOOT_CS + 2(%edi)
17534 + rorl $16,%eax
17535 + movb %al,__BOOT_CS + 4(%edi)
17536 + movb %ah,__BOOT_CS + 7(%edi)
17537 + rorl $16,%eax
17538 +
17539 + ljmp $(__BOOT_CS),$1f
17540 +1:
17541 +
17542 + movl $NR_CPUS,%ecx
17543 + movl $pa(cpu_gdt_table),%edi
17544 + addl $__PAGE_OFFSET,%eax
17545 +1:
17546 + movw %ax,__KERNEL_CS + 2(%edi)
17547 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
17548 + rorl $16,%eax
17549 + movb %al,__KERNEL_CS + 4(%edi)
17550 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
17551 + movb %ah,__KERNEL_CS + 7(%edi)
17552 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
17553 + rorl $16,%eax
17554 + addl $PAGE_SIZE_asm,%edi
17555 + loop 1b
17556 +#endif
17557
17558 /*
17559 * Clear BSS first so that there are no surprises...
17560 @@ -140,9 +211,7 @@ ENTRY(startup_32)
17561 cmpl $num_subarch_entries, %eax
17562 jae bad_subarch
17563
17564 - movl pa(subarch_entries)(,%eax,4), %eax
17565 - subl $__PAGE_OFFSET, %eax
17566 - jmp *%eax
17567 + jmp *pa(subarch_entries)(,%eax,4)
17568
17569 bad_subarch:
17570 WEAK(lguest_entry)
17571 @@ -154,10 +223,10 @@ WEAK(xen_entry)
17572 __INITDATA
17573
17574 subarch_entries:
17575 - .long default_entry /* normal x86/PC */
17576 - .long lguest_entry /* lguest hypervisor */
17577 - .long xen_entry /* Xen hypervisor */
17578 - .long default_entry /* Moorestown MID */
17579 + .long ta(default_entry) /* normal x86/PC */
17580 + .long ta(lguest_entry) /* lguest hypervisor */
17581 + .long ta(xen_entry) /* Xen hypervisor */
17582 + .long ta(default_entry) /* Moorestown MID */
17583 num_subarch_entries = (. - subarch_entries) / 4
17584 .previous
17585 #endif /* CONFIG_PARAVIRT */
17586 @@ -218,8 +287,11 @@ default_entry:
17587 movl %eax, pa(max_pfn_mapped)
17588
17589 /* Do early initialization of the fixmap area */
17590 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
17591 - movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
17592 +#ifdef CONFIG_COMPAT_VDSO
17593 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
17594 +#else
17595 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
17596 +#endif
17597 #else /* Not PAE */
17598
17599 page_pde_offset = (__PAGE_OFFSET >> 20);
17600 @@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
17601 movl %eax, pa(max_pfn_mapped)
17602
17603 /* Do early initialization of the fixmap area */
17604 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
17605 - movl %eax,pa(swapper_pg_dir+0xffc)
17606 +#ifdef CONFIG_COMPAT_VDSO
17607 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
17608 +#else
17609 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
17610 +#endif
17611 #endif
17612 jmp 3f
17613 /*
17614 @@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
17615 movl %eax,%es
17616 movl %eax,%fs
17617 movl %eax,%gs
17618 + movl pa(stack_start),%ecx
17619 + movl %eax,%ss
17620 + leal -__PAGE_OFFSET(%ecx),%esp
17621 #endif /* CONFIG_SMP */
17622 3:
17623
17624 @@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
17625 orl %edx,%eax
17626 movl %eax,%cr4
17627
17628 +#ifdef CONFIG_X86_PAE
17629 btl $5, %eax # check if PAE is enabled
17630 jnc 6f
17631
17632 @@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
17633 cpuid
17634 cmpl $0x80000000, %eax
17635 jbe 6f
17636 +
17637 + /* Clear bogus XD_DISABLE bits */
17638 + call verify_cpu
17639 +
17640 mov $0x80000001, %eax
17641 cpuid
17642 /* Execute Disable bit supported? */
17643 @@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
17644 jnc 6f
17645
17646 /* Setup EFER (Extended Feature Enable Register) */
17647 - movl $0xc0000080, %ecx
17648 + movl $MSR_EFER, %ecx
17649 rdmsr
17650
17651 btsl $11, %eax
17652 /* Make changes effective */
17653 wrmsr
17654
17655 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
17656 + movl $1,pa(nx_enabled)
17657 +#endif
17658 +
17659 6:
17660
17661 /*
17662 @@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
17663 movl %eax,%cr0 /* ..and set paging (PG) bit */
17664 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
17665 1:
17666 - /* Set up the stack pointer */
17667 - lss stack_start,%esp
17668 + /* Shift the stack pointer to a virtual address */
17669 + addl $__PAGE_OFFSET, %esp
17670
17671 /*
17672 * Initialize eflags. Some BIOS's leave bits like NT set. This would
17673 @@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
17674
17675 #ifdef CONFIG_SMP
17676 cmpb $0, ready
17677 - jz 1f /* Initial CPU cleans BSS */
17678 - jmp checkCPUtype
17679 -1:
17680 + jnz checkCPUtype
17681 #endif /* CONFIG_SMP */
17682
17683 /*
17684 @@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
17685 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
17686 movl %eax,%ss # after changing gdt.
17687
17688 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
17689 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
17690 movl %eax,%ds
17691 movl %eax,%es
17692
17693 @@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
17694 */
17695 cmpb $0,ready
17696 jne 1f
17697 - movl $per_cpu__gdt_page,%eax
17698 + movl $cpu_gdt_table,%eax
17699 movl $per_cpu__stack_canary,%ecx
17700 +#ifdef CONFIG_SMP
17701 + addl $__per_cpu_load,%ecx
17702 +#endif
17703 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
17704 shrl $16, %ecx
17705 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
17706 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
17707 1:
17708 -#endif
17709 movl $(__KERNEL_STACK_CANARY),%eax
17710 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
17711 + movl $(__USER_DS),%eax
17712 +#else
17713 + xorl %eax,%eax
17714 +#endif
17715 movl %eax,%gs
17716
17717 xorl %eax,%eax # Clear LDT
17718 @@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
17719
17720 cld # gcc2 wants the direction flag cleared at all times
17721 pushl $0 # fake return address for unwinder
17722 -#ifdef CONFIG_SMP
17723 - movb ready, %cl
17724 movb $1, ready
17725 - cmpb $0,%cl # the first CPU calls start_kernel
17726 - je 1f
17727 - movl (stack_start), %esp
17728 -1:
17729 -#endif /* CONFIG_SMP */
17730 jmp *(initial_code)
17731
17732 /*
17733 @@ -546,22 +631,22 @@ early_page_fault:
17734 jmp early_fault
17735
17736 early_fault:
17737 - cld
17738 #ifdef CONFIG_PRINTK
17739 + cmpl $1,%ss:early_recursion_flag
17740 + je hlt_loop
17741 + incl %ss:early_recursion_flag
17742 + cld
17743 pusha
17744 movl $(__KERNEL_DS),%eax
17745 movl %eax,%ds
17746 movl %eax,%es
17747 - cmpl $2,early_recursion_flag
17748 - je hlt_loop
17749 - incl early_recursion_flag
17750 movl %cr2,%eax
17751 pushl %eax
17752 pushl %edx /* trapno */
17753 pushl $fault_msg
17754 call printk
17755 +; call dump_stack
17756 #endif
17757 - call dump_stack
17758 hlt_loop:
17759 hlt
17760 jmp hlt_loop
17761 @@ -569,8 +654,11 @@ hlt_loop:
17762 /* This is the default interrupt "handler" :-) */
17763 ALIGN
17764 ignore_int:
17765 - cld
17766 #ifdef CONFIG_PRINTK
17767 + cmpl $2,%ss:early_recursion_flag
17768 + je hlt_loop
17769 + incl %ss:early_recursion_flag
17770 + cld
17771 pushl %eax
17772 pushl %ecx
17773 pushl %edx
17774 @@ -579,9 +667,6 @@ ignore_int:
17775 movl $(__KERNEL_DS),%eax
17776 movl %eax,%ds
17777 movl %eax,%es
17778 - cmpl $2,early_recursion_flag
17779 - je hlt_loop
17780 - incl early_recursion_flag
17781 pushl 16(%esp)
17782 pushl 24(%esp)
17783 pushl 32(%esp)
17784 @@ -600,6 +685,8 @@ ignore_int:
17785 #endif
17786 iret
17787
17788 +#include "verify_cpu.S"
17789 +
17790 __REFDATA
17791 .align 4
17792 ENTRY(initial_code)
17793 @@ -610,31 +697,47 @@ ENTRY(initial_page_table)
17794 /*
17795 * BSS section
17796 */
17797 -__PAGE_ALIGNED_BSS
17798 - .align PAGE_SIZE_asm
17799 #ifdef CONFIG_X86_PAE
17800 +.section .swapper_pg_pmd,"a",@progbits
17801 swapper_pg_pmd:
17802 .fill 1024*KPMDS,4,0
17803 #else
17804 +.section .swapper_pg_dir,"a",@progbits
17805 ENTRY(swapper_pg_dir)
17806 .fill 1024,4,0
17807 #endif
17808 +.section .swapper_pg_fixmap,"a",@progbits
17809 swapper_pg_fixmap:
17810 .fill 1024,4,0
17811 #ifdef CONFIG_X86_TRAMPOLINE
17812 +.section .trampoline_pg_dir,"a",@progbits
17813 ENTRY(trampoline_pg_dir)
17814 +#ifdef CONFIG_X86_PAE
17815 + .fill 4,8,0
17816 +#else
17817 .fill 1024,4,0
17818 #endif
17819 +#endif
17820 +
17821 +.section .empty_zero_page,"a",@progbits
17822 ENTRY(empty_zero_page)
17823 .fill 4096,1,0
17824
17825 /*
17826 + * The IDT has to be page-aligned to simplify the Pentium
17827 + * F0 0F bug workaround.. We have a special link segment
17828 + * for this.
17829 + */
17830 +.section .idt,"a",@progbits
17831 +ENTRY(idt_table)
17832 + .fill 256,8,0
17833 +
17834 +/*
17835 * This starts the data section.
17836 */
17837 #ifdef CONFIG_X86_PAE
17838 -__PAGE_ALIGNED_DATA
17839 - /* Page-aligned for the benefit of paravirt? */
17840 - .align PAGE_SIZE_asm
17841 +.section .swapper_pg_dir,"a",@progbits
17842 +
17843 ENTRY(swapper_pg_dir)
17844 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17845 # if KPMDS == 3
17846 @@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
17847 # error "Kernel PMDs should be 1, 2 or 3"
17848 # endif
17849 .align PAGE_SIZE_asm /* needs to be page-sized too */
17850 +
17851 +#ifdef CONFIG_PAX_PER_CPU_PGD
17852 +ENTRY(cpu_pgd)
17853 + .rept NR_CPUS
17854 + .fill 4,8,0
17855 + .endr
17856 +#endif
17857 +
17858 #endif
17859
17860 .data
17861 +.balign 4
17862 ENTRY(stack_start)
17863 - .long init_thread_union+THREAD_SIZE
17864 - .long __BOOT_DS
17865 + .long init_thread_union+THREAD_SIZE-8
17866
17867 ready: .byte 0
17868
17869 +.section .rodata,"a",@progbits
17870 early_recursion_flag:
17871 .long 0
17872
17873 @@ -697,7 +809,7 @@ fault_msg:
17874 .word 0 # 32 bit align gdt_desc.address
17875 boot_gdt_descr:
17876 .word __BOOT_DS+7
17877 - .long boot_gdt - __PAGE_OFFSET
17878 + .long pa(boot_gdt)
17879
17880 .word 0 # 32-bit align idt_desc.address
17881 idt_descr:
17882 @@ -708,7 +820,7 @@ idt_descr:
17883 .word 0 # 32 bit align gdt_desc.address
17884 ENTRY(early_gdt_descr)
17885 .word GDT_ENTRIES*8-1
17886 - .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
17887 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
17888
17889 /*
17890 * The boot_gdt must mirror the equivalent in setup.S and is
17891 @@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
17892 .align L1_CACHE_BYTES
17893 ENTRY(boot_gdt)
17894 .fill GDT_ENTRY_BOOT_CS,8,0
17895 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17896 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17897 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17898 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17899 +
17900 + .align PAGE_SIZE_asm
17901 +ENTRY(cpu_gdt_table)
17902 + .rept NR_CPUS
17903 + .quad 0x0000000000000000 /* NULL descriptor */
17904 + .quad 0x0000000000000000 /* 0x0b reserved */
17905 + .quad 0x0000000000000000 /* 0x13 reserved */
17906 + .quad 0x0000000000000000 /* 0x1b reserved */
17907 +
17908 +#ifdef CONFIG_PAX_KERNEXEC
17909 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17910 +#else
17911 + .quad 0x0000000000000000 /* 0x20 unused */
17912 +#endif
17913 +
17914 + .quad 0x0000000000000000 /* 0x28 unused */
17915 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17916 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17917 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17918 + .quad 0x0000000000000000 /* 0x4b reserved */
17919 + .quad 0x0000000000000000 /* 0x53 reserved */
17920 + .quad 0x0000000000000000 /* 0x5b reserved */
17921 +
17922 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17923 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17924 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17925 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17926 +
17927 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17928 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17929 +
17930 + /*
17931 + * Segments used for calling PnP BIOS have byte granularity.
17932 + * The code segments and data segments have fixed 64k limits,
17933 + * the transfer segment sizes are set at run time.
17934 + */
17935 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
17936 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
17937 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
17938 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
17939 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
17940 +
17941 + /*
17942 + * The APM segments have byte granularity and their bases
17943 + * are set at run time. All have 64k limits.
17944 + */
17945 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17946 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17947 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
17948 +
17949 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17950 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17951 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17952 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17953 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17954 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17955 +
17956 + /* Be sure this is zeroed to avoid false validations in Xen */
17957 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17958 + .endr
17959 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17960 index 780cd92..758b2a6 100644
17961 --- a/arch/x86/kernel/head_64.S
17962 +++ b/arch/x86/kernel/head_64.S
17963 @@ -19,6 +19,8 @@
17964 #include <asm/cache.h>
17965 #include <asm/processor-flags.h>
17966 #include <asm/percpu.h>
17967 +#include <asm/cpufeature.h>
17968 +#include <asm/alternative-asm.h>
17969
17970 #ifdef CONFIG_PARAVIRT
17971 #include <asm/asm-offsets.h>
17972 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17973 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17974 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17975 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17976 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
17977 +L3_VMALLOC_START = pud_index(VMALLOC_START)
17978 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
17979 +L3_VMALLOC_END = pud_index(VMALLOC_END)
17980 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17981 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17982
17983 .text
17984 __HEAD
17985 @@ -85,35 +93,23 @@ startup_64:
17986 */
17987 addq %rbp, init_level4_pgt + 0(%rip)
17988 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17989 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17990 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
17991 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17992 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17993
17994 addq %rbp, level3_ident_pgt + 0(%rip)
17995 +#ifndef CONFIG_XEN
17996 + addq %rbp, level3_ident_pgt + 8(%rip)
17997 +#endif
17998
17999 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
18000 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
18001 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
18002 +
18003 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
18004 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
18005
18006 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
18007 -
18008 - /* Add an Identity mapping if I am above 1G */
18009 - leaq _text(%rip), %rdi
18010 - andq $PMD_PAGE_MASK, %rdi
18011 -
18012 - movq %rdi, %rax
18013 - shrq $PUD_SHIFT, %rax
18014 - andq $(PTRS_PER_PUD - 1), %rax
18015 - jz ident_complete
18016 -
18017 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
18018 - leaq level3_ident_pgt(%rip), %rbx
18019 - movq %rdx, 0(%rbx, %rax, 8)
18020 -
18021 - movq %rdi, %rax
18022 - shrq $PMD_SHIFT, %rax
18023 - andq $(PTRS_PER_PMD - 1), %rax
18024 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
18025 - leaq level2_spare_pgt(%rip), %rbx
18026 - movq %rdx, 0(%rbx, %rax, 8)
18027 -ident_complete:
18028 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
18029
18030 /*
18031 * Fixup the kernel text+data virtual addresses. Note that
18032 @@ -161,8 +157,8 @@ ENTRY(secondary_startup_64)
18033 * after the boot processor executes this code.
18034 */
18035
18036 - /* Enable PAE mode and PGE */
18037 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
18038 + /* Enable PAE mode and PSE/PGE */
18039 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
18040 movq %rax, %cr4
18041
18042 /* Setup early boot stage 4 level pagetables. */
18043 @@ -184,9 +180,16 @@ ENTRY(secondary_startup_64)
18044 movl $MSR_EFER, %ecx
18045 rdmsr
18046 btsl $_EFER_SCE, %eax /* Enable System Call */
18047 - btl $20,%edi /* No Execute supported? */
18048 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
18049 jnc 1f
18050 btsl $_EFER_NX, %eax
18051 + leaq init_level4_pgt(%rip), %rdi
18052 +#ifndef CONFIG_EFI
18053 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
18054 +#endif
18055 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
18056 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
18057 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
18058 1: wrmsr /* Make changes effective */
18059
18060 /* Setup cr0 */
18061 @@ -249,6 +252,7 @@ ENTRY(secondary_startup_64)
18062 * jump. In addition we need to ensure %cs is set so we make this
18063 * a far return.
18064 */
18065 + pax_set_fptr_mask
18066 movq initial_code(%rip),%rax
18067 pushq $0 # fake return address to stop unwinder
18068 pushq $__KERNEL_CS # set correct cs
18069 @@ -262,16 +266,16 @@ ENTRY(secondary_startup_64)
18070 .quad x86_64_start_kernel
18071 ENTRY(initial_gs)
18072 .quad INIT_PER_CPU_VAR(irq_stack_union)
18073 - __FINITDATA
18074
18075 ENTRY(stack_start)
18076 .quad init_thread_union+THREAD_SIZE-8
18077 .word 0
18078 + __FINITDATA
18079
18080 bad_address:
18081 jmp bad_address
18082
18083 - .section ".init.text","ax"
18084 + __INIT
18085 #ifdef CONFIG_EARLY_PRINTK
18086 .globl early_idt_handlers
18087 early_idt_handlers:
18088 @@ -316,18 +320,23 @@ ENTRY(early_idt_handler)
18089 #endif /* EARLY_PRINTK */
18090 1: hlt
18091 jmp 1b
18092 + .previous
18093
18094 #ifdef CONFIG_EARLY_PRINTK
18095 + __INITDATA
18096 early_recursion_flag:
18097 .long 0
18098 + .previous
18099
18100 + .section .rodata,"a",@progbits
18101 early_idt_msg:
18102 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
18103 early_idt_ripmsg:
18104 .asciz "RIP %s\n"
18105 + .previous
18106 #endif /* CONFIG_EARLY_PRINTK */
18107 - .previous
18108
18109 + .section .rodata,"a",@progbits
18110 #define NEXT_PAGE(name) \
18111 .balign PAGE_SIZE; \
18112 ENTRY(name)
18113 @@ -350,13 +359,41 @@ NEXT_PAGE(init_level4_pgt)
18114 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18115 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
18116 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18117 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
18118 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
18119 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
18120 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
18121 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
18122 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
18123 .org init_level4_pgt + L4_START_KERNEL*8, 0
18124 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
18125 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
18126
18127 +#ifdef CONFIG_PAX_PER_CPU_PGD
18128 +NEXT_PAGE(cpu_pgd)
18129 + .rept NR_CPUS
18130 + .fill 512,8,0
18131 + .endr
18132 +#endif
18133 +
18134 NEXT_PAGE(level3_ident_pgt)
18135 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18136 +#ifdef CONFIG_XEN
18137 .fill 511,8,0
18138 +#else
18139 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
18140 + .fill 510,8,0
18141 +#endif
18142 +
18143 +NEXT_PAGE(level3_vmalloc_start_pgt)
18144 + .fill 512,8,0
18145 +
18146 +NEXT_PAGE(level3_vmalloc_end_pgt)
18147 + .fill 512,8,0
18148 +
18149 +NEXT_PAGE(level3_vmemmap_pgt)
18150 + .fill L3_VMEMMAP_START,8,0
18151 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
18152
18153 NEXT_PAGE(level3_kernel_pgt)
18154 .fill L3_START_KERNEL,8,0
18155 @@ -364,20 +401,23 @@ NEXT_PAGE(level3_kernel_pgt)
18156 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
18157 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
18158
18159 +NEXT_PAGE(level2_vmemmap_pgt)
18160 + .fill 512,8,0
18161 +
18162 NEXT_PAGE(level2_fixmap_pgt)
18163 - .fill 506,8,0
18164 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
18165 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
18166 - .fill 5,8,0
18167 + .fill 507,8,0
18168 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
18169 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
18170 + .fill 4,8,0
18171
18172 -NEXT_PAGE(level1_fixmap_pgt)
18173 +NEXT_PAGE(level1_vsyscall_pgt)
18174 .fill 512,8,0
18175
18176 -NEXT_PAGE(level2_ident_pgt)
18177 - /* Since I easily can, map the first 1G.
18178 + /* Since I easily can, map the first 2G.
18179 * Don't set NX because code runs from these pages.
18180 */
18181 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
18182 +NEXT_PAGE(level2_ident_pgt)
18183 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
18184
18185 NEXT_PAGE(level2_kernel_pgt)
18186 /*
18187 @@ -390,33 +430,55 @@ NEXT_PAGE(level2_kernel_pgt)
18188 * If you want to increase this then increase MODULES_VADDR
18189 * too.)
18190 */
18191 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
18192 - KERNEL_IMAGE_SIZE/PMD_SIZE)
18193 -
18194 -NEXT_PAGE(level2_spare_pgt)
18195 - .fill 512, 8, 0
18196 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
18197
18198 #undef PMDS
18199 #undef NEXT_PAGE
18200
18201 - .data
18202 + .align PAGE_SIZE
18203 +ENTRY(cpu_gdt_table)
18204 + .rept NR_CPUS
18205 + .quad 0x0000000000000000 /* NULL descriptor */
18206 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
18207 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
18208 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
18209 + .quad 0x00cffb000000ffff /* __USER32_CS */
18210 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
18211 + .quad 0x00affb000000ffff /* __USER_CS */
18212 +
18213 +#ifdef CONFIG_PAX_KERNEXEC
18214 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
18215 +#else
18216 + .quad 0x0 /* unused */
18217 +#endif
18218 +
18219 + .quad 0,0 /* TSS */
18220 + .quad 0,0 /* LDT */
18221 + .quad 0,0,0 /* three TLS descriptors */
18222 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
18223 + /* asm/segment.h:GDT_ENTRIES must match this */
18224 +
18225 + /* zero the remaining page */
18226 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
18227 + .endr
18228 +
18229 .align 16
18230 .globl early_gdt_descr
18231 early_gdt_descr:
18232 .word GDT_ENTRIES*8-1
18233 early_gdt_descr_base:
18234 - .quad INIT_PER_CPU_VAR(gdt_page)
18235 + .quad cpu_gdt_table
18236
18237 ENTRY(phys_base)
18238 /* This must match the first entry in level2_kernel_pgt */
18239 .quad 0x0000000000000000
18240
18241 #include "../../x86/xen/xen-head.S"
18242 -
18243 - .section .bss, "aw", @nobits
18244 +
18245 + .section .rodata,"a",@progbits
18246 .align L1_CACHE_BYTES
18247 ENTRY(idt_table)
18248 - .skip IDT_ENTRIES * 16
18249 + .fill 512,8,0
18250
18251 __PAGE_ALIGNED_BSS
18252 .align PAGE_SIZE
18253 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
18254 index 9c3bd4a..e1d9b35 100644
18255 --- a/arch/x86/kernel/i386_ksyms_32.c
18256 +++ b/arch/x86/kernel/i386_ksyms_32.c
18257 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
18258 EXPORT_SYMBOL(cmpxchg8b_emu);
18259 #endif
18260
18261 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
18262 +
18263 /* Networking helper routines. */
18264 EXPORT_SYMBOL(csum_partial_copy_generic);
18265 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
18266 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
18267
18268 EXPORT_SYMBOL(__get_user_1);
18269 EXPORT_SYMBOL(__get_user_2);
18270 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
18271
18272 EXPORT_SYMBOL(csum_partial);
18273 EXPORT_SYMBOL(empty_zero_page);
18274 +
18275 +#ifdef CONFIG_PAX_KERNEXEC
18276 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
18277 +#endif
18278 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
18279 index df89102..a244320 100644
18280 --- a/arch/x86/kernel/i8259.c
18281 +++ b/arch/x86/kernel/i8259.c
18282 @@ -208,7 +208,7 @@ spurious_8259A_irq:
18283 "spurious 8259A interrupt: IRQ%d.\n", irq);
18284 spurious_irq_mask |= irqmask;
18285 }
18286 - atomic_inc(&irq_err_count);
18287 + atomic_inc_unchecked(&irq_err_count);
18288 /*
18289 * Theoretically we do not have to handle this IRQ,
18290 * but in Linux this does not cause problems and is
18291 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
18292 index 3a54dcb..1c22348 100644
18293 --- a/arch/x86/kernel/init_task.c
18294 +++ b/arch/x86/kernel/init_task.c
18295 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
18296 * way process stacks are handled. This is done by having a special
18297 * "init_task" linker map entry..
18298 */
18299 -union thread_union init_thread_union __init_task_data =
18300 - { INIT_THREAD_INFO(init_task) };
18301 +union thread_union init_thread_union __init_task_data;
18302
18303 /*
18304 * Initial task structure.
18305 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
18306 * section. Since TSS's are completely CPU-local, we want them
18307 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
18308 */
18309 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
18310 -
18311 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
18312 +EXPORT_SYMBOL(init_tss);
18313 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
18314 index 99c4d30..74c84e9 100644
18315 --- a/arch/x86/kernel/ioport.c
18316 +++ b/arch/x86/kernel/ioport.c
18317 @@ -6,6 +6,7 @@
18318 #include <linux/sched.h>
18319 #include <linux/kernel.h>
18320 #include <linux/capability.h>
18321 +#include <linux/security.h>
18322 #include <linux/errno.h>
18323 #include <linux/types.h>
18324 #include <linux/ioport.h>
18325 @@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
18326
18327 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
18328 return -EINVAL;
18329 +#ifdef CONFIG_GRKERNSEC_IO
18330 + if (turn_on && grsec_disable_privio) {
18331 + gr_handle_ioperm();
18332 + return -EPERM;
18333 + }
18334 +#endif
18335 if (turn_on && !capable(CAP_SYS_RAWIO))
18336 return -EPERM;
18337
18338 @@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
18339 * because the ->io_bitmap_max value must match the bitmap
18340 * contents:
18341 */
18342 - tss = &per_cpu(init_tss, get_cpu());
18343 + tss = init_tss + get_cpu();
18344
18345 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
18346
18347 @@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, struct pt_regs *regs)
18348 return -EINVAL;
18349 /* Trying to gain more privileges? */
18350 if (level > old) {
18351 +#ifdef CONFIG_GRKERNSEC_IO
18352 + if (grsec_disable_privio) {
18353 + gr_handle_iopl();
18354 + return -EPERM;
18355 + }
18356 +#endif
18357 if (!capable(CAP_SYS_RAWIO))
18358 return -EPERM;
18359 }
18360 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
18361 index 04bbd52..83a07d9 100644
18362 --- a/arch/x86/kernel/irq.c
18363 +++ b/arch/x86/kernel/irq.c
18364 @@ -15,7 +15,7 @@
18365 #include <asm/mce.h>
18366 #include <asm/hw_irq.h>
18367
18368 -atomic_t irq_err_count;
18369 +atomic_unchecked_t irq_err_count;
18370
18371 /* Function pointer for generic interrupt vector handling */
18372 void (*generic_interrupt_extension)(void) = NULL;
18373 @@ -114,9 +114,9 @@ static int show_other_interrupts(struct seq_file *p, int prec)
18374 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
18375 seq_printf(p, " Machine check polls\n");
18376 #endif
18377 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
18378 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
18379 #if defined(CONFIG_X86_IO_APIC)
18380 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
18381 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
18382 #endif
18383 return 0;
18384 }
18385 @@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
18386
18387 u64 arch_irq_stat(void)
18388 {
18389 - u64 sum = atomic_read(&irq_err_count);
18390 + u64 sum = atomic_read_unchecked(&irq_err_count);
18391
18392 #ifdef CONFIG_X86_IO_APIC
18393 - sum += atomic_read(&irq_mis_count);
18394 + sum += atomic_read_unchecked(&irq_mis_count);
18395 #endif
18396 return sum;
18397 }
18398 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
18399 index 7d35d0f..03f1d52 100644
18400 --- a/arch/x86/kernel/irq_32.c
18401 +++ b/arch/x86/kernel/irq_32.c
18402 @@ -35,7 +35,7 @@ static int check_stack_overflow(void)
18403 __asm__ __volatile__("andl %%esp,%0" :
18404 "=r" (sp) : "0" (THREAD_SIZE - 1));
18405
18406 - return sp < (sizeof(struct thread_info) + STACK_WARN);
18407 + return sp < STACK_WARN;
18408 }
18409
18410 static void print_stack_overflow(void)
18411 @@ -54,9 +54,9 @@ static inline void print_stack_overflow(void) { }
18412 * per-CPU IRQ handling contexts (thread information and stack)
18413 */
18414 union irq_ctx {
18415 - struct thread_info tinfo;
18416 - u32 stack[THREAD_SIZE/sizeof(u32)];
18417 -} __attribute__((aligned(PAGE_SIZE)));
18418 + unsigned long previous_esp;
18419 + u32 stack[THREAD_SIZE/sizeof(u32)];
18420 +} __attribute__((aligned(THREAD_SIZE)));
18421
18422 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
18423 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
18424 @@ -78,10 +78,9 @@ static void call_on_stack(void *func, void *stack)
18425 static inline int
18426 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18427 {
18428 - union irq_ctx *curctx, *irqctx;
18429 + union irq_ctx *irqctx;
18430 u32 *isp, arg1, arg2;
18431
18432 - curctx = (union irq_ctx *) current_thread_info();
18433 irqctx = __get_cpu_var(hardirq_ctx);
18434
18435 /*
18436 @@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18437 * handler) we can't do that and just have to keep using the
18438 * current stack (which is the irq stack already after all)
18439 */
18440 - if (unlikely(curctx == irqctx))
18441 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
18442 return 0;
18443
18444 /* build the stack frame on the IRQ stack */
18445 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
18446 - irqctx->tinfo.task = curctx->tinfo.task;
18447 - irqctx->tinfo.previous_esp = current_stack_pointer;
18448 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
18449 + irqctx->previous_esp = current_stack_pointer;
18450
18451 - /*
18452 - * Copy the softirq bits in preempt_count so that the
18453 - * softirq checks work in the hardirq context.
18454 - */
18455 - irqctx->tinfo.preempt_count =
18456 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
18457 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
18458 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18459 + __set_fs(MAKE_MM_SEG(0));
18460 +#endif
18461
18462 if (unlikely(overflow))
18463 call_on_stack(print_stack_overflow, isp);
18464 @@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18465 : "0" (irq), "1" (desc), "2" (isp),
18466 "D" (desc->handle_irq)
18467 : "memory", "cc", "ecx");
18468 +
18469 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18470 + __set_fs(current_thread_info()->addr_limit);
18471 +#endif
18472 +
18473 return 1;
18474 }
18475
18476 @@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18477 */
18478 void __cpuinit irq_ctx_init(int cpu)
18479 {
18480 - union irq_ctx *irqctx;
18481 -
18482 if (per_cpu(hardirq_ctx, cpu))
18483 return;
18484
18485 - irqctx = &per_cpu(hardirq_stack, cpu);
18486 - irqctx->tinfo.task = NULL;
18487 - irqctx->tinfo.exec_domain = NULL;
18488 - irqctx->tinfo.cpu = cpu;
18489 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
18490 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
18491 -
18492 - per_cpu(hardirq_ctx, cpu) = irqctx;
18493 -
18494 - irqctx = &per_cpu(softirq_stack, cpu);
18495 - irqctx->tinfo.task = NULL;
18496 - irqctx->tinfo.exec_domain = NULL;
18497 - irqctx->tinfo.cpu = cpu;
18498 - irqctx->tinfo.preempt_count = 0;
18499 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
18500 -
18501 - per_cpu(softirq_ctx, cpu) = irqctx;
18502 + per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
18503 + per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
18504
18505 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
18506 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
18507 @@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
18508 asmlinkage void do_softirq(void)
18509 {
18510 unsigned long flags;
18511 - struct thread_info *curctx;
18512 union irq_ctx *irqctx;
18513 u32 *isp;
18514
18515 @@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
18516 local_irq_save(flags);
18517
18518 if (local_softirq_pending()) {
18519 - curctx = current_thread_info();
18520 irqctx = __get_cpu_var(softirq_ctx);
18521 - irqctx->tinfo.task = curctx->task;
18522 - irqctx->tinfo.previous_esp = current_stack_pointer;
18523 + irqctx->previous_esp = current_stack_pointer;
18524
18525 /* build the stack frame on the softirq stack */
18526 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
18527 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
18528 +
18529 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18530 + __set_fs(MAKE_MM_SEG(0));
18531 +#endif
18532
18533 call_on_stack(__do_softirq, isp);
18534 +
18535 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18536 + __set_fs(current_thread_info()->addr_limit);
18537 +#endif
18538 +
18539 /*
18540 * Shouldnt happen, we returned above if in_interrupt():
18541 */
18542 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
18543 index 8d82a77..0baf312 100644
18544 --- a/arch/x86/kernel/kgdb.c
18545 +++ b/arch/x86/kernel/kgdb.c
18546 @@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
18547
18548 /* clear the trace bit */
18549 linux_regs->flags &= ~X86_EFLAGS_TF;
18550 - atomic_set(&kgdb_cpu_doing_single_step, -1);
18551 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
18552
18553 /* set the trace bit if we're stepping */
18554 if (remcomInBuffer[0] == 's') {
18555 linux_regs->flags |= X86_EFLAGS_TF;
18556 kgdb_single_step = 1;
18557 - atomic_set(&kgdb_cpu_doing_single_step,
18558 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
18559 raw_smp_processor_id());
18560 }
18561
18562 @@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
18563 break;
18564
18565 case DIE_DEBUG:
18566 - if (atomic_read(&kgdb_cpu_doing_single_step) ==
18567 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
18568 raw_smp_processor_id()) {
18569 if (user_mode(regs))
18570 return single_step_cont(regs, args);
18571 @@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
18572 return instruction_pointer(regs);
18573 }
18574
18575 -struct kgdb_arch arch_kgdb_ops = {
18576 +const struct kgdb_arch arch_kgdb_ops = {
18577 /* Breakpoint instruction: */
18578 .gdb_bpt_instr = { 0xcc },
18579 .flags = KGDB_HW_BREAKPOINT,
18580 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
18581 index 7a67820..70ea187 100644
18582 --- a/arch/x86/kernel/kprobes.c
18583 +++ b/arch/x86/kernel/kprobes.c
18584 @@ -168,9 +168,13 @@ static void __kprobes set_jmp_op(void *from, void *to)
18585 char op;
18586 s32 raddr;
18587 } __attribute__((packed)) * jop;
18588 - jop = (struct __arch_jmp_op *)from;
18589 +
18590 + jop = (struct __arch_jmp_op *)(ktla_ktva(from));
18591 +
18592 + pax_open_kernel();
18593 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
18594 jop->op = RELATIVEJUMP_INSTRUCTION;
18595 + pax_close_kernel();
18596 }
18597
18598 /*
18599 @@ -195,7 +199,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
18600 kprobe_opcode_t opcode;
18601 kprobe_opcode_t *orig_opcodes = opcodes;
18602
18603 - if (search_exception_tables((unsigned long)opcodes))
18604 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
18605 return 0; /* Page fault may occur on this address. */
18606
18607 retry:
18608 @@ -339,7 +343,9 @@ static void __kprobes fix_riprel(struct kprobe *p)
18609 disp = (u8 *) p->addr + *((s32 *) insn) -
18610 (u8 *) p->ainsn.insn;
18611 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
18612 + pax_open_kernel();
18613 *(s32 *)insn = (s32) disp;
18614 + pax_close_kernel();
18615 }
18616 }
18617 #endif
18618 @@ -347,16 +353,18 @@ static void __kprobes fix_riprel(struct kprobe *p)
18619
18620 static void __kprobes arch_copy_kprobe(struct kprobe *p)
18621 {
18622 - memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
18623 + pax_open_kernel();
18624 + memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
18625 + pax_close_kernel();
18626
18627 fix_riprel(p);
18628
18629 - if (can_boost(p->addr))
18630 + if (can_boost(ktla_ktva(p->addr)))
18631 p->ainsn.boostable = 0;
18632 else
18633 p->ainsn.boostable = -1;
18634
18635 - p->opcode = *p->addr;
18636 + p->opcode = *(ktla_ktva(p->addr));
18637 }
18638
18639 int __kprobes arch_prepare_kprobe(struct kprobe *p)
18640 @@ -434,7 +442,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
18641 if (p->opcode == BREAKPOINT_INSTRUCTION)
18642 regs->ip = (unsigned long)p->addr;
18643 else
18644 - regs->ip = (unsigned long)p->ainsn.insn;
18645 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
18646 }
18647
18648 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
18649 @@ -455,7 +463,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
18650 if (p->ainsn.boostable == 1 && !p->post_handler) {
18651 /* Boost up -- we can execute copied instructions directly */
18652 reset_current_kprobe();
18653 - regs->ip = (unsigned long)p->ainsn.insn;
18654 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
18655 preempt_enable_no_resched();
18656 return;
18657 }
18658 @@ -525,7 +533,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
18659 struct kprobe_ctlblk *kcb;
18660
18661 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
18662 - if (*addr != BREAKPOINT_INSTRUCTION) {
18663 + if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
18664 /*
18665 * The breakpoint instruction was removed right
18666 * after we hit it. Another cpu has removed
18667 @@ -637,6 +645,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
18668 /* Skip orig_ax, ip, cs */
18669 " addq $24, %rsp\n"
18670 " popfq\n"
18671 +#ifdef KERNEXEC_PLUGIN
18672 + " btsq $63,(%rsp)\n"
18673 +#endif
18674 #else
18675 " pushf\n"
18676 /*
18677 @@ -777,7 +788,7 @@ static void __kprobes resume_execution(struct kprobe *p,
18678 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
18679 {
18680 unsigned long *tos = stack_addr(regs);
18681 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
18682 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
18683 unsigned long orig_ip = (unsigned long)p->addr;
18684 kprobe_opcode_t *insn = p->ainsn.insn;
18685
18686 @@ -960,7 +971,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
18687 struct die_args *args = data;
18688 int ret = NOTIFY_DONE;
18689
18690 - if (args->regs && user_mode_vm(args->regs))
18691 + if (args->regs && user_mode(args->regs))
18692 return ret;
18693
18694 switch (val) {
18695 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
18696 index 63b0ec8..6d92227 100644
18697 --- a/arch/x86/kernel/kvm.c
18698 +++ b/arch/x86/kernel/kvm.c
18699 @@ -216,6 +216,7 @@ static void __init paravirt_ops_setup(void)
18700 pv_mmu_ops.set_pud = kvm_set_pud;
18701 #if PAGETABLE_LEVELS == 4
18702 pv_mmu_ops.set_pgd = kvm_set_pgd;
18703 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
18704 #endif
18705 #endif
18706 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
18707 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
18708 index ec6ef60..ab2c824 100644
18709 --- a/arch/x86/kernel/ldt.c
18710 +++ b/arch/x86/kernel/ldt.c
18711 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
18712 if (reload) {
18713 #ifdef CONFIG_SMP
18714 preempt_disable();
18715 - load_LDT(pc);
18716 + load_LDT_nolock(pc);
18717 if (!cpumask_equal(mm_cpumask(current->mm),
18718 cpumask_of(smp_processor_id())))
18719 smp_call_function(flush_ldt, current->mm, 1);
18720 preempt_enable();
18721 #else
18722 - load_LDT(pc);
18723 + load_LDT_nolock(pc);
18724 #endif
18725 }
18726 if (oldsize) {
18727 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
18728 return err;
18729
18730 for (i = 0; i < old->size; i++)
18731 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
18732 + write_ldt_entry(new->ldt, i, old->ldt + i);
18733 return 0;
18734 }
18735
18736 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
18737 retval = copy_ldt(&mm->context, &old_mm->context);
18738 mutex_unlock(&old_mm->context.lock);
18739 }
18740 +
18741 + if (tsk == current) {
18742 + mm->context.vdso = 0;
18743 +
18744 +#ifdef CONFIG_X86_32
18745 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18746 + mm->context.user_cs_base = 0UL;
18747 + mm->context.user_cs_limit = ~0UL;
18748 +
18749 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18750 + cpus_clear(mm->context.cpu_user_cs_mask);
18751 +#endif
18752 +
18753 +#endif
18754 +#endif
18755 +
18756 + }
18757 +
18758 return retval;
18759 }
18760
18761 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
18762 }
18763 }
18764
18765 +#ifdef CONFIG_PAX_SEGMEXEC
18766 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
18767 + error = -EINVAL;
18768 + goto out_unlock;
18769 + }
18770 +#endif
18771 +
18772 fill_ldt(&ldt, &ldt_info);
18773 if (oldmode)
18774 ldt.avl = 0;
18775 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18776 index c1c429d..f02eaf9 100644
18777 --- a/arch/x86/kernel/machine_kexec_32.c
18778 +++ b/arch/x86/kernel/machine_kexec_32.c
18779 @@ -26,7 +26,7 @@
18780 #include <asm/system.h>
18781 #include <asm/cacheflush.h>
18782
18783 -static void set_idt(void *newidt, __u16 limit)
18784 +static void set_idt(struct desc_struct *newidt, __u16 limit)
18785 {
18786 struct desc_ptr curidt;
18787
18788 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
18789 }
18790
18791
18792 -static void set_gdt(void *newgdt, __u16 limit)
18793 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18794 {
18795 struct desc_ptr curgdt;
18796
18797 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
18798 }
18799
18800 control_page = page_address(image->control_code_page);
18801 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18802 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18803
18804 relocate_kernel_ptr = control_page;
18805 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18806 diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
18807 index 1e47679..e73449d 100644
18808 --- a/arch/x86/kernel/microcode_amd.c
18809 +++ b/arch/x86/kernel/microcode_amd.c
18810 @@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int cpu)
18811 uci->mc = NULL;
18812 }
18813
18814 -static struct microcode_ops microcode_amd_ops = {
18815 +static const struct microcode_ops microcode_amd_ops = {
18816 .request_microcode_user = request_microcode_user,
18817 .request_microcode_fw = request_microcode_fw,
18818 .collect_cpu_info = collect_cpu_info_amd,
18819 @@ -372,7 +372,7 @@ static struct microcode_ops microcode_amd_ops = {
18820 .microcode_fini_cpu = microcode_fini_cpu_amd,
18821 };
18822
18823 -struct microcode_ops * __init init_amd_microcode(void)
18824 +const struct microcode_ops * __init init_amd_microcode(void)
18825 {
18826 return &microcode_amd_ops;
18827 }
18828 diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
18829 index 378e9a8..b5a6ea9 100644
18830 --- a/arch/x86/kernel/microcode_core.c
18831 +++ b/arch/x86/kernel/microcode_core.c
18832 @@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
18833
18834 #define MICROCODE_VERSION "2.00"
18835
18836 -static struct microcode_ops *microcode_ops;
18837 +static const struct microcode_ops *microcode_ops;
18838
18839 /*
18840 * Synchronization.
18841 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18842 index 0d334dd..14cedaf 100644
18843 --- a/arch/x86/kernel/microcode_intel.c
18844 +++ b/arch/x86/kernel/microcode_intel.c
18845 @@ -443,13 +443,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18846
18847 static int get_ucode_user(void *to, const void *from, size_t n)
18848 {
18849 - return copy_from_user(to, from, n);
18850 + return copy_from_user(to, (const void __force_user *)from, n);
18851 }
18852
18853 static enum ucode_state
18854 request_microcode_user(int cpu, const void __user *buf, size_t size)
18855 {
18856 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18857 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18858 }
18859
18860 static void microcode_fini_cpu(int cpu)
18861 @@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
18862 uci->mc = NULL;
18863 }
18864
18865 -static struct microcode_ops microcode_intel_ops = {
18866 +static const struct microcode_ops microcode_intel_ops = {
18867 .request_microcode_user = request_microcode_user,
18868 .request_microcode_fw = request_microcode_fw,
18869 .collect_cpu_info = collect_cpu_info,
18870 @@ -468,7 +468,7 @@ static struct microcode_ops microcode_intel_ops = {
18871 .microcode_fini_cpu = microcode_fini_cpu,
18872 };
18873
18874 -struct microcode_ops * __init init_intel_microcode(void)
18875 +const struct microcode_ops * __init init_intel_microcode(void)
18876 {
18877 return &microcode_intel_ops;
18878 }
18879 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18880 index 89f386f..9028f51 100644
18881 --- a/arch/x86/kernel/module.c
18882 +++ b/arch/x86/kernel/module.c
18883 @@ -34,7 +34,7 @@
18884 #define DEBUGP(fmt...)
18885 #endif
18886
18887 -void *module_alloc(unsigned long size)
18888 +static void *__module_alloc(unsigned long size, pgprot_t prot)
18889 {
18890 struct vm_struct *area;
18891
18892 @@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
18893 if (!area)
18894 return NULL;
18895
18896 - return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
18897 - PAGE_KERNEL_EXEC);
18898 + return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
18899 +}
18900 +
18901 +void *module_alloc(unsigned long size)
18902 +{
18903 +
18904 +#ifdef CONFIG_PAX_KERNEXEC
18905 + return __module_alloc(size, PAGE_KERNEL);
18906 +#else
18907 + return __module_alloc(size, PAGE_KERNEL_EXEC);
18908 +#endif
18909 +
18910 }
18911
18912 /* Free memory returned from module_alloc */
18913 @@ -58,6 +68,40 @@ void module_free(struct module *mod, void *module_region)
18914 vfree(module_region);
18915 }
18916
18917 +#ifdef CONFIG_PAX_KERNEXEC
18918 +#ifdef CONFIG_X86_32
18919 +void *module_alloc_exec(unsigned long size)
18920 +{
18921 + struct vm_struct *area;
18922 +
18923 + if (size == 0)
18924 + return NULL;
18925 +
18926 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18927 + return area ? area->addr : NULL;
18928 +}
18929 +EXPORT_SYMBOL(module_alloc_exec);
18930 +
18931 +void module_free_exec(struct module *mod, void *module_region)
18932 +{
18933 + vunmap(module_region);
18934 +}
18935 +EXPORT_SYMBOL(module_free_exec);
18936 +#else
18937 +void module_free_exec(struct module *mod, void *module_region)
18938 +{
18939 + module_free(mod, module_region);
18940 +}
18941 +EXPORT_SYMBOL(module_free_exec);
18942 +
18943 +void *module_alloc_exec(unsigned long size)
18944 +{
18945 + return __module_alloc(size, PAGE_KERNEL_RX);
18946 +}
18947 +EXPORT_SYMBOL(module_alloc_exec);
18948 +#endif
18949 +#endif
18950 +
18951 /* We don't need anything special. */
18952 int module_frob_arch_sections(Elf_Ehdr *hdr,
18953 Elf_Shdr *sechdrs,
18954 @@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18955 unsigned int i;
18956 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18957 Elf32_Sym *sym;
18958 - uint32_t *location;
18959 + uint32_t *plocation, location;
18960
18961 DEBUGP("Applying relocate section %u to %u\n", relsec,
18962 sechdrs[relsec].sh_info);
18963 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18964 /* This is where to make the change */
18965 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18966 - + rel[i].r_offset;
18967 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18968 + location = (uint32_t)plocation;
18969 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18970 + plocation = ktla_ktva((void *)plocation);
18971 /* This is the symbol it is referring to. Note that all
18972 undefined symbols have been resolved. */
18973 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18974 @@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18975 switch (ELF32_R_TYPE(rel[i].r_info)) {
18976 case R_386_32:
18977 /* We add the value into the location given */
18978 - *location += sym->st_value;
18979 + pax_open_kernel();
18980 + *plocation += sym->st_value;
18981 + pax_close_kernel();
18982 break;
18983 case R_386_PC32:
18984 /* Add the value, subtract its postition */
18985 - *location += sym->st_value - (uint32_t)location;
18986 + pax_open_kernel();
18987 + *plocation += sym->st_value - location;
18988 + pax_close_kernel();
18989 break;
18990 default:
18991 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18992 @@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18993 case R_X86_64_NONE:
18994 break;
18995 case R_X86_64_64:
18996 + pax_open_kernel();
18997 *(u64 *)loc = val;
18998 + pax_close_kernel();
18999 break;
19000 case R_X86_64_32:
19001 + pax_open_kernel();
19002 *(u32 *)loc = val;
19003 + pax_close_kernel();
19004 if (val != *(u32 *)loc)
19005 goto overflow;
19006 break;
19007 case R_X86_64_32S:
19008 + pax_open_kernel();
19009 *(s32 *)loc = val;
19010 + pax_close_kernel();
19011 if ((s64)val != *(s32 *)loc)
19012 goto overflow;
19013 break;
19014 case R_X86_64_PC32:
19015 val -= (u64)loc;
19016 + pax_open_kernel();
19017 *(u32 *)loc = val;
19018 + pax_close_kernel();
19019 +
19020 #if 0
19021 if ((s64)val != *(s32 *)loc)
19022 goto overflow;
19023 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
19024 index 3a7c5a4..9191528 100644
19025 --- a/arch/x86/kernel/paravirt-spinlocks.c
19026 +++ b/arch/x86/kernel/paravirt-spinlocks.c
19027 @@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
19028 __raw_spin_lock(lock);
19029 }
19030
19031 -struct pv_lock_ops pv_lock_ops = {
19032 +struct pv_lock_ops pv_lock_ops __read_only = {
19033 #ifdef CONFIG_SMP
19034 .spin_is_locked = __ticket_spin_is_locked,
19035 .spin_is_contended = __ticket_spin_is_contended,
19036 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
19037 index 1b1739d..dea6077 100644
19038 --- a/arch/x86/kernel/paravirt.c
19039 +++ b/arch/x86/kernel/paravirt.c
19040 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
19041 {
19042 return x;
19043 }
19044 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
19045 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
19046 +#endif
19047
19048 void __init default_banner(void)
19049 {
19050 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
19051 * corresponding structure. */
19052 static void *get_call_destination(u8 type)
19053 {
19054 - struct paravirt_patch_template tmpl = {
19055 + const struct paravirt_patch_template tmpl = {
19056 .pv_init_ops = pv_init_ops,
19057 .pv_time_ops = pv_time_ops,
19058 .pv_cpu_ops = pv_cpu_ops,
19059 @@ -133,6 +136,8 @@ static void *get_call_destination(u8 type)
19060 .pv_lock_ops = pv_lock_ops,
19061 #endif
19062 };
19063 +
19064 + pax_track_stack();
19065 return *((void **)&tmpl + type);
19066 }
19067
19068 @@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
19069 if (opfunc == NULL)
19070 /* If there's no function, patch it with a ud2a (BUG) */
19071 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
19072 - else if (opfunc == _paravirt_nop)
19073 + else if (opfunc == (void *)_paravirt_nop)
19074 /* If the operation is a nop, then nop the callsite */
19075 ret = paravirt_patch_nop();
19076
19077 /* identity functions just return their single argument */
19078 - else if (opfunc == _paravirt_ident_32)
19079 + else if (opfunc == (void *)_paravirt_ident_32)
19080 ret = paravirt_patch_ident_32(insnbuf, len);
19081 - else if (opfunc == _paravirt_ident_64)
19082 + else if (opfunc == (void *)_paravirt_ident_64)
19083 ret = paravirt_patch_ident_64(insnbuf, len);
19084 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
19085 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
19086 + ret = paravirt_patch_ident_64(insnbuf, len);
19087 +#endif
19088
19089 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
19090 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
19091 @@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
19092 if (insn_len > len || start == NULL)
19093 insn_len = len;
19094 else
19095 - memcpy(insnbuf, start, insn_len);
19096 + memcpy(insnbuf, ktla_ktva(start), insn_len);
19097
19098 return insn_len;
19099 }
19100 @@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
19101 preempt_enable();
19102 }
19103
19104 -struct pv_info pv_info = {
19105 +struct pv_info pv_info __read_only = {
19106 .name = "bare hardware",
19107 .paravirt_enabled = 0,
19108 .kernel_rpl = 0,
19109 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
19110 };
19111
19112 -struct pv_init_ops pv_init_ops = {
19113 +struct pv_init_ops pv_init_ops __read_only = {
19114 .patch = native_patch,
19115 };
19116
19117 -struct pv_time_ops pv_time_ops = {
19118 +struct pv_time_ops pv_time_ops __read_only = {
19119 .sched_clock = native_sched_clock,
19120 };
19121
19122 -struct pv_irq_ops pv_irq_ops = {
19123 +struct pv_irq_ops pv_irq_ops __read_only = {
19124 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
19125 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
19126 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
19127 @@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
19128 #endif
19129 };
19130
19131 -struct pv_cpu_ops pv_cpu_ops = {
19132 +struct pv_cpu_ops pv_cpu_ops __read_only = {
19133 .cpuid = native_cpuid,
19134 .get_debugreg = native_get_debugreg,
19135 .set_debugreg = native_set_debugreg,
19136 @@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
19137 .end_context_switch = paravirt_nop,
19138 };
19139
19140 -struct pv_apic_ops pv_apic_ops = {
19141 +struct pv_apic_ops pv_apic_ops __read_only = {
19142 #ifdef CONFIG_X86_LOCAL_APIC
19143 .startup_ipi_hook = paravirt_nop,
19144 #endif
19145 };
19146
19147 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
19148 +#ifdef CONFIG_X86_32
19149 +#ifdef CONFIG_X86_PAE
19150 +/* 64-bit pagetable entries */
19151 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
19152 +#else
19153 /* 32-bit pagetable entries */
19154 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
19155 +#endif
19156 #else
19157 /* 64-bit pagetable entries */
19158 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
19159 #endif
19160
19161 -struct pv_mmu_ops pv_mmu_ops = {
19162 +struct pv_mmu_ops pv_mmu_ops __read_only = {
19163
19164 .read_cr2 = native_read_cr2,
19165 .write_cr2 = native_write_cr2,
19166 @@ -448,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = {
19167 .make_pud = PTE_IDENT,
19168
19169 .set_pgd = native_set_pgd,
19170 + .set_pgd_batched = native_set_pgd_batched,
19171 #endif
19172 #endif /* PAGETABLE_LEVELS >= 3 */
19173
19174 @@ -467,6 +482,12 @@ struct pv_mmu_ops pv_mmu_ops = {
19175 },
19176
19177 .set_fixmap = native_set_fixmap,
19178 +
19179 +#ifdef CONFIG_PAX_KERNEXEC
19180 + .pax_open_kernel = native_pax_open_kernel,
19181 + .pax_close_kernel = native_pax_close_kernel,
19182 +#endif
19183 +
19184 };
19185
19186 EXPORT_SYMBOL_GPL(pv_time_ops);
19187 diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
19188 index 1a2d4b1..6a0dd55 100644
19189 --- a/arch/x86/kernel/pci-calgary_64.c
19190 +++ b/arch/x86/kernel/pci-calgary_64.c
19191 @@ -477,7 +477,7 @@ static void calgary_free_coherent(struct device *dev, size_t size,
19192 free_pages((unsigned long)vaddr, get_order(size));
19193 }
19194
19195 -static struct dma_map_ops calgary_dma_ops = {
19196 +static const struct dma_map_ops calgary_dma_ops = {
19197 .alloc_coherent = calgary_alloc_coherent,
19198 .free_coherent = calgary_free_coherent,
19199 .map_sg = calgary_map_sg,
19200 diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
19201 index 6ac3931..42b4414 100644
19202 --- a/arch/x86/kernel/pci-dma.c
19203 +++ b/arch/x86/kernel/pci-dma.c
19204 @@ -14,7 +14,7 @@
19205
19206 static int forbid_dac __read_mostly;
19207
19208 -struct dma_map_ops *dma_ops;
19209 +const struct dma_map_ops *dma_ops;
19210 EXPORT_SYMBOL(dma_ops);
19211
19212 static int iommu_sac_force __read_mostly;
19213 @@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
19214
19215 int dma_supported(struct device *dev, u64 mask)
19216 {
19217 - struct dma_map_ops *ops = get_dma_ops(dev);
19218 + const struct dma_map_ops *ops = get_dma_ops(dev);
19219
19220 #ifdef CONFIG_PCI
19221 if (mask > 0xffffffff && forbid_dac > 0) {
19222 diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
19223 index 1c76691..e3632db 100644
19224 --- a/arch/x86/kernel/pci-gart_64.c
19225 +++ b/arch/x86/kernel/pci-gart_64.c
19226 @@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
19227 return -1;
19228 }
19229
19230 -static struct dma_map_ops gart_dma_ops = {
19231 +static const struct dma_map_ops gart_dma_ops = {
19232 .map_sg = gart_map_sg,
19233 .unmap_sg = gart_unmap_sg,
19234 .map_page = gart_map_page,
19235 diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
19236 index a3933d4..c898869 100644
19237 --- a/arch/x86/kernel/pci-nommu.c
19238 +++ b/arch/x86/kernel/pci-nommu.c
19239 @@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(struct device *dev,
19240 flush_write_buffers();
19241 }
19242
19243 -struct dma_map_ops nommu_dma_ops = {
19244 +const struct dma_map_ops nommu_dma_ops = {
19245 .alloc_coherent = dma_generic_alloc_coherent,
19246 .free_coherent = nommu_free_coherent,
19247 .map_sg = nommu_map_sg,
19248 diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
19249 index aaa6b78..4de1881 100644
19250 --- a/arch/x86/kernel/pci-swiotlb.c
19251 +++ b/arch/x86/kernel/pci-swiotlb.c
19252 @@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
19253 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
19254 }
19255
19256 -static struct dma_map_ops swiotlb_dma_ops = {
19257 +static const struct dma_map_ops swiotlb_dma_ops = {
19258 .mapping_error = swiotlb_dma_mapping_error,
19259 .alloc_coherent = x86_swiotlb_alloc_coherent,
19260 .free_coherent = swiotlb_free_coherent,
19261 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
19262 index fc6c84d..0312ca2 100644
19263 --- a/arch/x86/kernel/process.c
19264 +++ b/arch/x86/kernel/process.c
19265 @@ -51,16 +51,33 @@ void free_thread_xstate(struct task_struct *tsk)
19266
19267 void free_thread_info(struct thread_info *ti)
19268 {
19269 - free_thread_xstate(ti->task);
19270 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
19271 }
19272
19273 +static struct kmem_cache *task_struct_cachep;
19274 +
19275 void arch_task_cache_init(void)
19276 {
19277 - task_xstate_cachep =
19278 - kmem_cache_create("task_xstate", xstate_size,
19279 + /* create a slab on which task_structs can be allocated */
19280 + task_struct_cachep =
19281 + kmem_cache_create("task_struct", sizeof(struct task_struct),
19282 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
19283 +
19284 + task_xstate_cachep =
19285 + kmem_cache_create("task_xstate", xstate_size,
19286 __alignof__(union thread_xstate),
19287 - SLAB_PANIC | SLAB_NOTRACK, NULL);
19288 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
19289 +}
19290 +
19291 +struct task_struct *alloc_task_struct(void)
19292 +{
19293 + return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
19294 +}
19295 +
19296 +void free_task_struct(struct task_struct *task)
19297 +{
19298 + free_thread_xstate(task);
19299 + kmem_cache_free(task_struct_cachep, task);
19300 }
19301
19302 /*
19303 @@ -73,7 +90,7 @@ void exit_thread(void)
19304 unsigned long *bp = t->io_bitmap_ptr;
19305
19306 if (bp) {
19307 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
19308 + struct tss_struct *tss = init_tss + get_cpu();
19309
19310 t->io_bitmap_ptr = NULL;
19311 clear_thread_flag(TIF_IO_BITMAP);
19312 @@ -93,6 +110,9 @@ void flush_thread(void)
19313
19314 clear_tsk_thread_flag(tsk, TIF_DEBUG);
19315
19316 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19317 + loadsegment(gs, 0);
19318 +#endif
19319 tsk->thread.debugreg0 = 0;
19320 tsk->thread.debugreg1 = 0;
19321 tsk->thread.debugreg2 = 0;
19322 @@ -307,7 +327,7 @@ void default_idle(void)
19323 EXPORT_SYMBOL(default_idle);
19324 #endif
19325
19326 -void stop_this_cpu(void *dummy)
19327 +__noreturn void stop_this_cpu(void *dummy)
19328 {
19329 local_irq_disable();
19330 /*
19331 @@ -568,16 +588,38 @@ static int __init idle_setup(char *str)
19332 }
19333 early_param("idle", idle_setup);
19334
19335 -unsigned long arch_align_stack(unsigned long sp)
19336 +#ifdef CONFIG_PAX_RANDKSTACK
19337 +void pax_randomize_kstack(struct pt_regs *regs)
19338 {
19339 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
19340 - sp -= get_random_int() % 8192;
19341 - return sp & ~0xf;
19342 -}
19343 + struct thread_struct *thread = &current->thread;
19344 + unsigned long time;
19345
19346 -unsigned long arch_randomize_brk(struct mm_struct *mm)
19347 -{
19348 - unsigned long range_end = mm->brk + 0x02000000;
19349 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
19350 + if (!randomize_va_space)
19351 + return;
19352 +
19353 + if (v8086_mode(regs))
19354 + return;
19355 +
19356 + rdtscl(time);
19357 +
19358 + /* P4 seems to return a 0 LSB, ignore it */
19359 +#ifdef CONFIG_MPENTIUM4
19360 + time &= 0x3EUL;
19361 + time <<= 2;
19362 +#elif defined(CONFIG_X86_64)
19363 + time &= 0xFUL;
19364 + time <<= 4;
19365 +#else
19366 + time &= 0x1FUL;
19367 + time <<= 3;
19368 +#endif
19369 +
19370 + thread->sp0 ^= time;
19371 + load_sp0(init_tss + smp_processor_id(), thread);
19372 +
19373 +#ifdef CONFIG_X86_64
19374 + percpu_write(kernel_stack, thread->sp0);
19375 +#endif
19376 }
19377 +#endif
19378
19379 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
19380 index c40c432..6e1df72 100644
19381 --- a/arch/x86/kernel/process_32.c
19382 +++ b/arch/x86/kernel/process_32.c
19383 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
19384 unsigned long thread_saved_pc(struct task_struct *tsk)
19385 {
19386 return ((unsigned long *)tsk->thread.sp)[3];
19387 +//XXX return tsk->thread.eip;
19388 }
19389
19390 #ifndef CONFIG_SMP
19391 @@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, int all)
19392 unsigned short ss, gs;
19393 const char *board;
19394
19395 - if (user_mode_vm(regs)) {
19396 + if (user_mode(regs)) {
19397 sp = regs->sp;
19398 ss = regs->ss & 0xffff;
19399 - gs = get_user_gs(regs);
19400 } else {
19401 sp = (unsigned long) (&regs->sp);
19402 savesegment(ss, ss);
19403 - savesegment(gs, gs);
19404 }
19405 + gs = get_user_gs(regs);
19406
19407 printk("\n");
19408
19409 @@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
19410 regs.bx = (unsigned long) fn;
19411 regs.dx = (unsigned long) arg;
19412
19413 - regs.ds = __USER_DS;
19414 - regs.es = __USER_DS;
19415 + regs.ds = __KERNEL_DS;
19416 + regs.es = __KERNEL_DS;
19417 regs.fs = __KERNEL_PERCPU;
19418 - regs.gs = __KERNEL_STACK_CANARY;
19419 + savesegment(gs, regs.gs);
19420 regs.orig_ax = -1;
19421 regs.ip = (unsigned long) kernel_thread_helper;
19422 regs.cs = __KERNEL_CS | get_kernel_rpl();
19423 @@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
19424 struct task_struct *tsk;
19425 int err;
19426
19427 - childregs = task_pt_regs(p);
19428 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
19429 *childregs = *regs;
19430 childregs->ax = 0;
19431 childregs->sp = sp;
19432
19433 p->thread.sp = (unsigned long) childregs;
19434 p->thread.sp0 = (unsigned long) (childregs+1);
19435 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
19436
19437 p->thread.ip = (unsigned long) ret_from_fork;
19438
19439 @@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19440 struct thread_struct *prev = &prev_p->thread,
19441 *next = &next_p->thread;
19442 int cpu = smp_processor_id();
19443 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
19444 + struct tss_struct *tss = init_tss + cpu;
19445 bool preload_fpu;
19446
19447 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
19448 @@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19449 */
19450 lazy_save_gs(prev->gs);
19451
19452 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19453 + __set_fs(task_thread_info(next_p)->addr_limit);
19454 +#endif
19455 +
19456 /*
19457 * Load the per-thread Thread-Local Storage descriptor.
19458 */
19459 @@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19460 */
19461 arch_end_context_switch(next_p);
19462
19463 + percpu_write(current_task, next_p);
19464 + percpu_write(current_tinfo, &next_p->tinfo);
19465 +
19466 if (preload_fpu)
19467 __math_state_restore();
19468
19469 @@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19470 if (prev->gs | next->gs)
19471 lazy_load_gs(next->gs);
19472
19473 - percpu_write(current_task, next_p);
19474 -
19475 return prev_p;
19476 }
19477
19478 @@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_struct *p)
19479 } while (count++ < 16);
19480 return 0;
19481 }
19482 -
19483 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
19484 index 39493bc..196816d 100644
19485 --- a/arch/x86/kernel/process_64.c
19486 +++ b/arch/x86/kernel/process_64.c
19487 @@ -91,7 +91,7 @@ static void __exit_idle(void)
19488 void exit_idle(void)
19489 {
19490 /* idle loop has pid 0 */
19491 - if (current->pid)
19492 + if (task_pid_nr(current))
19493 return;
19494 __exit_idle();
19495 }
19496 @@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, int all)
19497 if (!board)
19498 board = "";
19499 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
19500 - current->pid, current->comm, print_tainted(),
19501 + task_pid_nr(current), current->comm, print_tainted(),
19502 init_utsname()->release,
19503 (int)strcspn(init_utsname()->version, " "),
19504 init_utsname()->version, board);
19505 @@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
19506 struct pt_regs *childregs;
19507 struct task_struct *me = current;
19508
19509 - childregs = ((struct pt_regs *)
19510 - (THREAD_SIZE + task_stack_page(p))) - 1;
19511 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
19512 *childregs = *regs;
19513
19514 childregs->ax = 0;
19515 @@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
19516 p->thread.sp = (unsigned long) childregs;
19517 p->thread.sp0 = (unsigned long) (childregs+1);
19518 p->thread.usersp = me->thread.usersp;
19519 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
19520
19521 set_tsk_thread_flag(p, TIF_FORK);
19522
19523 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19524 struct thread_struct *prev = &prev_p->thread;
19525 struct thread_struct *next = &next_p->thread;
19526 int cpu = smp_processor_id();
19527 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
19528 + struct tss_struct *tss = init_tss + cpu;
19529 unsigned fsindex, gsindex;
19530 bool preload_fpu;
19531
19532 @@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19533 prev->usersp = percpu_read(old_rsp);
19534 percpu_write(old_rsp, next->usersp);
19535 percpu_write(current_task, next_p);
19536 + percpu_write(current_tinfo, &next_p->tinfo);
19537
19538 - percpu_write(kernel_stack,
19539 - (unsigned long)task_stack_page(next_p) +
19540 - THREAD_SIZE - KERNEL_STACK_OFFSET);
19541 + percpu_write(kernel_stack, next->sp0);
19542
19543 /*
19544 * Now maybe reload the debug registers and handle I/O bitmaps
19545 @@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_struct *p)
19546 if (!p || p == current || p->state == TASK_RUNNING)
19547 return 0;
19548 stack = (unsigned long)task_stack_page(p);
19549 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
19550 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
19551 return 0;
19552 fp = *(u64 *)(p->thread.sp);
19553 do {
19554 - if (fp < (unsigned long)stack ||
19555 - fp >= (unsigned long)stack+THREAD_SIZE)
19556 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
19557 return 0;
19558 ip = *(u64 *)(fp+8);
19559 if (!in_sched_functions(ip))
19560 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
19561 index c06acdd..3f5fff5 100644
19562 --- a/arch/x86/kernel/ptrace.c
19563 +++ b/arch/x86/kernel/ptrace.c
19564 @@ -925,7 +925,7 @@ static const struct user_regset_view user_x86_32_view; /* Initialized below. */
19565 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19566 {
19567 int ret;
19568 - unsigned long __user *datap = (unsigned long __user *)data;
19569 + unsigned long __user *datap = (__force unsigned long __user *)data;
19570
19571 switch (request) {
19572 /* read the word at location addr in the USER area. */
19573 @@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19574 if (addr < 0)
19575 return -EIO;
19576 ret = do_get_thread_area(child, addr,
19577 - (struct user_desc __user *) data);
19578 + (__force struct user_desc __user *) data);
19579 break;
19580
19581 case PTRACE_SET_THREAD_AREA:
19582 if (addr < 0)
19583 return -EIO;
19584 ret = do_set_thread_area(child, addr,
19585 - (struct user_desc __user *) data, 0);
19586 + (__force struct user_desc __user *) data, 0);
19587 break;
19588 #endif
19589
19590 @@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19591 #ifdef CONFIG_X86_PTRACE_BTS
19592 case PTRACE_BTS_CONFIG:
19593 ret = ptrace_bts_config
19594 - (child, data, (struct ptrace_bts_config __user *)addr);
19595 + (child, data, (__force struct ptrace_bts_config __user *)addr);
19596 break;
19597
19598 case PTRACE_BTS_STATUS:
19599 ret = ptrace_bts_status
19600 - (child, data, (struct ptrace_bts_config __user *)addr);
19601 + (child, data, (__force struct ptrace_bts_config __user *)addr);
19602 break;
19603
19604 case PTRACE_BTS_SIZE:
19605 @@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19606
19607 case PTRACE_BTS_GET:
19608 ret = ptrace_bts_read_record
19609 - (child, data, (struct bts_struct __user *) addr);
19610 + (child, data, (__force struct bts_struct __user *) addr);
19611 break;
19612
19613 case PTRACE_BTS_CLEAR:
19614 @@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19615
19616 case PTRACE_BTS_DRAIN:
19617 ret = ptrace_bts_drain
19618 - (child, data, (struct bts_struct __user *) addr);
19619 + (child, data, (__force struct bts_struct __user *) addr);
19620 break;
19621 #endif /* CONFIG_X86_PTRACE_BTS */
19622
19623 @@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
19624 info.si_code = si_code;
19625
19626 /* User-mode ip? */
19627 - info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
19628 + info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
19629
19630 /* Send us the fake SIGTRAP */
19631 force_sig_info(SIGTRAP, &info, tsk);
19632 @@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
19633 * We must return the syscall number to actually look up in the table.
19634 * This can be -1L to skip running any syscall at all.
19635 */
19636 -asmregparm long syscall_trace_enter(struct pt_regs *regs)
19637 +long syscall_trace_enter(struct pt_regs *regs)
19638 {
19639 long ret = 0;
19640
19641 @@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
19642 return ret ?: regs->orig_ax;
19643 }
19644
19645 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
19646 +void syscall_trace_leave(struct pt_regs *regs)
19647 {
19648 if (unlikely(current->audit_context))
19649 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
19650 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
19651 index cf98100..e76e03d 100644
19652 --- a/arch/x86/kernel/reboot.c
19653 +++ b/arch/x86/kernel/reboot.c
19654 @@ -33,7 +33,7 @@ void (*pm_power_off)(void);
19655 EXPORT_SYMBOL(pm_power_off);
19656
19657 static const struct desc_ptr no_idt = {};
19658 -static int reboot_mode;
19659 +static unsigned short reboot_mode;
19660 enum reboot_type reboot_type = BOOT_KBD;
19661 int reboot_force;
19662
19663 @@ -292,12 +292,12 @@ core_initcall(reboot_init);
19664 controller to pulse the CPU reset line, which is more thorough, but
19665 doesn't work with at least one type of 486 motherboard. It is easy
19666 to stop this code working; hence the copious comments. */
19667 -static const unsigned long long
19668 -real_mode_gdt_entries [3] =
19669 +static struct desc_struct
19670 +real_mode_gdt_entries [3] __read_only =
19671 {
19672 - 0x0000000000000000ULL, /* Null descriptor */
19673 - 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
19674 - 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
19675 + GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
19676 + GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
19677 + GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
19678 };
19679
19680 static const struct desc_ptr
19681 @@ -346,7 +346,7 @@ static const unsigned char jump_to_bios [] =
19682 * specified by the code and length parameters.
19683 * We assume that length will aways be less that 100!
19684 */
19685 -void machine_real_restart(const unsigned char *code, int length)
19686 +__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
19687 {
19688 local_irq_disable();
19689
19690 @@ -366,8 +366,8 @@ void machine_real_restart(const unsigned char *code, int length)
19691 /* Remap the kernel at virtual address zero, as well as offset zero
19692 from the kernel segment. This assumes the kernel segment starts at
19693 virtual address PAGE_OFFSET. */
19694 - memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19695 - sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
19696 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19697 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
19698
19699 /*
19700 * Use `swapper_pg_dir' as our page directory.
19701 @@ -379,16 +379,15 @@ void machine_real_restart(const unsigned char *code, int length)
19702 boot)". This seems like a fairly standard thing that gets set by
19703 REBOOT.COM programs, and the previous reset routine did this
19704 too. */
19705 - *((unsigned short *)0x472) = reboot_mode;
19706 + *(unsigned short *)(__va(0x472)) = reboot_mode;
19707
19708 /* For the switch to real mode, copy some code to low memory. It has
19709 to be in the first 64k because it is running in 16-bit mode, and it
19710 has to have the same physical and virtual address, because it turns
19711 off paging. Copy it near the end of the first page, out of the way
19712 of BIOS variables. */
19713 - memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
19714 - real_mode_switch, sizeof (real_mode_switch));
19715 - memcpy((void *)(0x1000 - 100), code, length);
19716 + memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
19717 + memcpy(__va(0x1000 - 100), code, length);
19718
19719 /* Set up the IDT for real mode. */
19720 load_idt(&real_mode_idt);
19721 @@ -416,6 +415,7 @@ void machine_real_restart(const unsigned char *code, int length)
19722 __asm__ __volatile__ ("ljmp $0x0008,%0"
19723 :
19724 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
19725 + do { } while (1);
19726 }
19727 #ifdef CONFIG_APM_MODULE
19728 EXPORT_SYMBOL(machine_real_restart);
19729 @@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
19730 {
19731 }
19732
19733 -static void native_machine_emergency_restart(void)
19734 +__noreturn static void native_machine_emergency_restart(void)
19735 {
19736 int i;
19737
19738 @@ -659,13 +659,13 @@ void native_machine_shutdown(void)
19739 #endif
19740 }
19741
19742 -static void __machine_emergency_restart(int emergency)
19743 +static __noreturn void __machine_emergency_restart(int emergency)
19744 {
19745 reboot_emergency = emergency;
19746 machine_ops.emergency_restart();
19747 }
19748
19749 -static void native_machine_restart(char *__unused)
19750 +static __noreturn void native_machine_restart(char *__unused)
19751 {
19752 printk("machine restart\n");
19753
19754 @@ -674,7 +674,7 @@ static void native_machine_restart(char *__unused)
19755 __machine_emergency_restart(0);
19756 }
19757
19758 -static void native_machine_halt(void)
19759 +static __noreturn void native_machine_halt(void)
19760 {
19761 /* stop other cpus and apics */
19762 machine_shutdown();
19763 @@ -685,7 +685,7 @@ static void native_machine_halt(void)
19764 stop_this_cpu(NULL);
19765 }
19766
19767 -static void native_machine_power_off(void)
19768 +__noreturn static void native_machine_power_off(void)
19769 {
19770 if (pm_power_off) {
19771 if (!reboot_force)
19772 @@ -694,6 +694,7 @@ static void native_machine_power_off(void)
19773 }
19774 /* a fallback in case there is no PM info available */
19775 tboot_shutdown(TB_SHUTDOWN_HALT);
19776 + do { } while (1);
19777 }
19778
19779 struct machine_ops machine_ops = {
19780 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
19781 index 7a6f3b3..976a959 100644
19782 --- a/arch/x86/kernel/relocate_kernel_64.S
19783 +++ b/arch/x86/kernel/relocate_kernel_64.S
19784 @@ -11,6 +11,7 @@
19785 #include <asm/kexec.h>
19786 #include <asm/processor-flags.h>
19787 #include <asm/pgtable_types.h>
19788 +#include <asm/alternative-asm.h>
19789
19790 /*
19791 * Must be relocatable PIC code callable as a C function
19792 @@ -167,6 +168,7 @@ identity_mapped:
19793 xorq %r14, %r14
19794 xorq %r15, %r15
19795
19796 + pax_force_retaddr 0, 1
19797 ret
19798
19799 1:
19800 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
19801 index 5449a26..0b6c759 100644
19802 --- a/arch/x86/kernel/setup.c
19803 +++ b/arch/x86/kernel/setup.c
19804 @@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
19805
19806 if (!boot_params.hdr.root_flags)
19807 root_mountflags &= ~MS_RDONLY;
19808 - init_mm.start_code = (unsigned long) _text;
19809 - init_mm.end_code = (unsigned long) _etext;
19810 + init_mm.start_code = ktla_ktva((unsigned long) _text);
19811 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
19812 init_mm.end_data = (unsigned long) _edata;
19813 init_mm.brk = _brk_end;
19814
19815 - code_resource.start = virt_to_phys(_text);
19816 - code_resource.end = virt_to_phys(_etext)-1;
19817 - data_resource.start = virt_to_phys(_etext);
19818 + code_resource.start = virt_to_phys(ktla_ktva(_text));
19819 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19820 + data_resource.start = virt_to_phys(_sdata);
19821 data_resource.end = virt_to_phys(_edata)-1;
19822 bss_resource.start = virt_to_phys(&__bss_start);
19823 bss_resource.end = virt_to_phys(&__bss_stop)-1;
19824 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
19825 index d559af9..524c6ad 100644
19826 --- a/arch/x86/kernel/setup_percpu.c
19827 +++ b/arch/x86/kernel/setup_percpu.c
19828 @@ -25,19 +25,17 @@
19829 # define DBG(x...)
19830 #endif
19831
19832 -DEFINE_PER_CPU(int, cpu_number);
19833 +#ifdef CONFIG_SMP
19834 +DEFINE_PER_CPU(unsigned int, cpu_number);
19835 EXPORT_PER_CPU_SYMBOL(cpu_number);
19836 +#endif
19837
19838 -#ifdef CONFIG_X86_64
19839 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19840 -#else
19841 -#define BOOT_PERCPU_OFFSET 0
19842 -#endif
19843
19844 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19845 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19846
19847 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19848 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19849 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19850 };
19851 EXPORT_SYMBOL(__per_cpu_offset);
19852 @@ -159,10 +157,10 @@ static inline void setup_percpu_segment(int cpu)
19853 {
19854 #ifdef CONFIG_X86_32
19855 struct desc_struct gdt;
19856 + unsigned long base = per_cpu_offset(cpu);
19857
19858 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19859 - 0x2 | DESCTYPE_S, 0x8);
19860 - gdt.s = 1;
19861 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19862 + 0x83 | DESCTYPE_S, 0xC);
19863 write_gdt_entry(get_cpu_gdt_table(cpu),
19864 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19865 #endif
19866 @@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
19867 /* alrighty, percpu areas up and running */
19868 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19869 for_each_possible_cpu(cpu) {
19870 +#ifdef CONFIG_CC_STACKPROTECTOR
19871 +#ifdef CONFIG_X86_32
19872 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
19873 +#endif
19874 +#endif
19875 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19876 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19877 per_cpu(cpu_number, cpu) = cpu;
19878 @@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
19879 early_per_cpu_map(x86_cpu_to_node_map, cpu);
19880 #endif
19881 #endif
19882 +#ifdef CONFIG_CC_STACKPROTECTOR
19883 +#ifdef CONFIG_X86_32
19884 + if (!cpu)
19885 + per_cpu(stack_canary.canary, cpu) = canary;
19886 +#endif
19887 +#endif
19888 /*
19889 * Up to this point, the boot CPU has been using .data.init
19890 * area. Reload any changed state for the boot CPU.
19891 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19892 index 6a44a76..a9287a1 100644
19893 --- a/arch/x86/kernel/signal.c
19894 +++ b/arch/x86/kernel/signal.c
19895 @@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsigned long sp)
19896 * Align the stack pointer according to the i386 ABI,
19897 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19898 */
19899 - sp = ((sp + 4) & -16ul) - 4;
19900 + sp = ((sp - 12) & -16ul) - 4;
19901 #else /* !CONFIG_X86_32 */
19902 sp = round_down(sp, 16) - 8;
19903 #endif
19904 @@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19905 * Return an always-bogus address instead so we will die with SIGSEGV.
19906 */
19907 if (onsigstack && !likely(on_sig_stack(sp)))
19908 - return (void __user *)-1L;
19909 + return (__force void __user *)-1L;
19910
19911 /* save i387 state */
19912 if (used_math() && save_i387_xstate(*fpstate) < 0)
19913 - return (void __user *)-1L;
19914 + return (__force void __user *)-1L;
19915
19916 return (void __user *)sp;
19917 }
19918 @@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19919 }
19920
19921 if (current->mm->context.vdso)
19922 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19923 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19924 else
19925 - restorer = &frame->retcode;
19926 + restorer = (void __user *)&frame->retcode;
19927 if (ka->sa.sa_flags & SA_RESTORER)
19928 restorer = ka->sa.sa_restorer;
19929
19930 @@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19931 * reasons and because gdb uses it as a signature to notice
19932 * signal handler stack frames.
19933 */
19934 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19935 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19936
19937 if (err)
19938 return -EFAULT;
19939 @@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19940 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19941
19942 /* Set up to return from userspace. */
19943 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19944 + if (current->mm->context.vdso)
19945 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19946 + else
19947 + restorer = (void __user *)&frame->retcode;
19948 if (ka->sa.sa_flags & SA_RESTORER)
19949 restorer = ka->sa.sa_restorer;
19950 put_user_ex(restorer, &frame->pretcode);
19951 @@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19952 * reasons and because gdb uses it as a signature to notice
19953 * signal handler stack frames.
19954 */
19955 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19956 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19957 } put_user_catch(err);
19958
19959 if (err)
19960 @@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *regs)
19961 int signr;
19962 sigset_t *oldset;
19963
19964 + pax_track_stack();
19965 +
19966 /*
19967 * We want the common case to go fast, which is why we may in certain
19968 * cases get here from kernel mode. Just return without doing anything
19969 @@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *regs)
19970 * X86_32: vm86 regs switched out by assembly code before reaching
19971 * here, so testing against kernel CS suffices.
19972 */
19973 - if (!user_mode(regs))
19974 + if (!user_mode_novm(regs))
19975 return;
19976
19977 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
19978 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19979 index 7e8e905..64d5c32 100644
19980 --- a/arch/x86/kernel/smpboot.c
19981 +++ b/arch/x86/kernel/smpboot.c
19982 @@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
19983 */
19984 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
19985
19986 -void cpu_hotplug_driver_lock()
19987 +void cpu_hotplug_driver_lock(void)
19988 {
19989 - mutex_lock(&x86_cpu_hotplug_driver_mutex);
19990 + mutex_lock(&x86_cpu_hotplug_driver_mutex);
19991 }
19992
19993 -void cpu_hotplug_driver_unlock()
19994 +void cpu_hotplug_driver_unlock(void)
19995 {
19996 - mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19997 + mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19998 }
19999
20000 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
20001 @@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
20002 * target processor state.
20003 */
20004 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
20005 - (unsigned long)stack_start.sp);
20006 + stack_start);
20007
20008 /*
20009 * Run STARTUP IPI loop.
20010 @@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
20011 set_idle_for_cpu(cpu, c_idle.idle);
20012 do_rest:
20013 per_cpu(current_task, cpu) = c_idle.idle;
20014 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
20015 #ifdef CONFIG_X86_32
20016 /* Stack for startup_32 can be just as for start_secondary onwards */
20017 irq_ctx_init(cpu);
20018 @@ -750,13 +751,15 @@ do_rest:
20019 #else
20020 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
20021 initial_gs = per_cpu_offset(cpu);
20022 - per_cpu(kernel_stack, cpu) =
20023 - (unsigned long)task_stack_page(c_idle.idle) -
20024 - KERNEL_STACK_OFFSET + THREAD_SIZE;
20025 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
20026 #endif
20027 +
20028 + pax_open_kernel();
20029 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
20030 + pax_close_kernel();
20031 +
20032 initial_code = (unsigned long)start_secondary;
20033 - stack_start.sp = (void *) c_idle.idle->thread.sp;
20034 + stack_start = c_idle.idle->thread.sp;
20035
20036 /* start_ip had better be page-aligned! */
20037 start_ip = setup_trampoline();
20038 @@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
20039
20040 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
20041
20042 +#ifdef CONFIG_PAX_PER_CPU_PGD
20043 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
20044 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20045 + KERNEL_PGD_PTRS);
20046 +#endif
20047 +
20048 err = do_boot_cpu(apicid, cpu);
20049
20050 if (err) {
20051 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
20052 index 3149032..14f1053 100644
20053 --- a/arch/x86/kernel/step.c
20054 +++ b/arch/x86/kernel/step.c
20055 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
20056 struct desc_struct *desc;
20057 unsigned long base;
20058
20059 - seg &= ~7UL;
20060 + seg >>= 3;
20061
20062 mutex_lock(&child->mm->context.lock);
20063 - if (unlikely((seg >> 3) >= child->mm->context.size))
20064 + if (unlikely(seg >= child->mm->context.size))
20065 addr = -1L; /* bogus selector, access would fault */
20066 else {
20067 desc = child->mm->context.ldt + seg;
20068 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
20069 addr += base;
20070 }
20071 mutex_unlock(&child->mm->context.lock);
20072 - }
20073 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
20074 + addr = ktla_ktva(addr);
20075
20076 return addr;
20077 }
20078 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
20079 unsigned char opcode[15];
20080 unsigned long addr = convert_ip_to_linear(child, regs);
20081
20082 + if (addr == -EINVAL)
20083 + return 0;
20084 +
20085 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
20086 for (i = 0; i < copied; i++) {
20087 switch (opcode[i]) {
20088 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
20089
20090 #ifdef CONFIG_X86_64
20091 case 0x40 ... 0x4f:
20092 - if (regs->cs != __USER_CS)
20093 + if ((regs->cs & 0xffff) != __USER_CS)
20094 /* 32-bit mode: register increment */
20095 return 0;
20096 /* 64-bit mode: REX prefix */
20097 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
20098 index dee1ff7..a397f7f 100644
20099 --- a/arch/x86/kernel/sys_i386_32.c
20100 +++ b/arch/x86/kernel/sys_i386_32.c
20101 @@ -24,6 +24,21 @@
20102
20103 #include <asm/syscalls.h>
20104
20105 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
20106 +{
20107 + unsigned long pax_task_size = TASK_SIZE;
20108 +
20109 +#ifdef CONFIG_PAX_SEGMEXEC
20110 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
20111 + pax_task_size = SEGMEXEC_TASK_SIZE;
20112 +#endif
20113 +
20114 + if (len > pax_task_size || addr > pax_task_size - len)
20115 + return -EINVAL;
20116 +
20117 + return 0;
20118 +}
20119 +
20120 /*
20121 * Perform the select(nd, in, out, ex, tv) and mmap() system
20122 * calls. Linux/i386 didn't use to be able to handle more than
20123 @@ -58,6 +73,212 @@ out:
20124 return err;
20125 }
20126
20127 +unsigned long
20128 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
20129 + unsigned long len, unsigned long pgoff, unsigned long flags)
20130 +{
20131 + struct mm_struct *mm = current->mm;
20132 + struct vm_area_struct *vma;
20133 + unsigned long start_addr, pax_task_size = TASK_SIZE;
20134 +
20135 +#ifdef CONFIG_PAX_SEGMEXEC
20136 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20137 + pax_task_size = SEGMEXEC_TASK_SIZE;
20138 +#endif
20139 +
20140 + pax_task_size -= PAGE_SIZE;
20141 +
20142 + if (len > pax_task_size)
20143 + return -ENOMEM;
20144 +
20145 + if (flags & MAP_FIXED)
20146 + return addr;
20147 +
20148 +#ifdef CONFIG_PAX_RANDMMAP
20149 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20150 +#endif
20151 +
20152 + if (addr) {
20153 + addr = PAGE_ALIGN(addr);
20154 + if (pax_task_size - len >= addr) {
20155 + vma = find_vma(mm, addr);
20156 + if (check_heap_stack_gap(vma, addr, len))
20157 + return addr;
20158 + }
20159 + }
20160 + if (len > mm->cached_hole_size) {
20161 + start_addr = addr = mm->free_area_cache;
20162 + } else {
20163 + start_addr = addr = mm->mmap_base;
20164 + mm->cached_hole_size = 0;
20165 + }
20166 +
20167 +#ifdef CONFIG_PAX_PAGEEXEC
20168 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
20169 + start_addr = 0x00110000UL;
20170 +
20171 +#ifdef CONFIG_PAX_RANDMMAP
20172 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20173 + start_addr += mm->delta_mmap & 0x03FFF000UL;
20174 +#endif
20175 +
20176 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
20177 + start_addr = addr = mm->mmap_base;
20178 + else
20179 + addr = start_addr;
20180 + }
20181 +#endif
20182 +
20183 +full_search:
20184 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
20185 + /* At this point: (!vma || addr < vma->vm_end). */
20186 + if (pax_task_size - len < addr) {
20187 + /*
20188 + * Start a new search - just in case we missed
20189 + * some holes.
20190 + */
20191 + if (start_addr != mm->mmap_base) {
20192 + start_addr = addr = mm->mmap_base;
20193 + mm->cached_hole_size = 0;
20194 + goto full_search;
20195 + }
20196 + return -ENOMEM;
20197 + }
20198 + if (check_heap_stack_gap(vma, addr, len))
20199 + break;
20200 + if (addr + mm->cached_hole_size < vma->vm_start)
20201 + mm->cached_hole_size = vma->vm_start - addr;
20202 + addr = vma->vm_end;
20203 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
20204 + start_addr = addr = mm->mmap_base;
20205 + mm->cached_hole_size = 0;
20206 + goto full_search;
20207 + }
20208 + }
20209 +
20210 + /*
20211 + * Remember the place where we stopped the search:
20212 + */
20213 + mm->free_area_cache = addr + len;
20214 + return addr;
20215 +}
20216 +
20217 +unsigned long
20218 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20219 + const unsigned long len, const unsigned long pgoff,
20220 + const unsigned long flags)
20221 +{
20222 + struct vm_area_struct *vma;
20223 + struct mm_struct *mm = current->mm;
20224 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
20225 +
20226 +#ifdef CONFIG_PAX_SEGMEXEC
20227 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20228 + pax_task_size = SEGMEXEC_TASK_SIZE;
20229 +#endif
20230 +
20231 + pax_task_size -= PAGE_SIZE;
20232 +
20233 + /* requested length too big for entire address space */
20234 + if (len > pax_task_size)
20235 + return -ENOMEM;
20236 +
20237 + if (flags & MAP_FIXED)
20238 + return addr;
20239 +
20240 +#ifdef CONFIG_PAX_PAGEEXEC
20241 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
20242 + goto bottomup;
20243 +#endif
20244 +
20245 +#ifdef CONFIG_PAX_RANDMMAP
20246 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20247 +#endif
20248 +
20249 + /* requesting a specific address */
20250 + if (addr) {
20251 + addr = PAGE_ALIGN(addr);
20252 + if (pax_task_size - len >= addr) {
20253 + vma = find_vma(mm, addr);
20254 + if (check_heap_stack_gap(vma, addr, len))
20255 + return addr;
20256 + }
20257 + }
20258 +
20259 + /* check if free_area_cache is useful for us */
20260 + if (len <= mm->cached_hole_size) {
20261 + mm->cached_hole_size = 0;
20262 + mm->free_area_cache = mm->mmap_base;
20263 + }
20264 +
20265 + /* either no address requested or can't fit in requested address hole */
20266 + addr = mm->free_area_cache;
20267 +
20268 + /* make sure it can fit in the remaining address space */
20269 + if (addr > len) {
20270 + vma = find_vma(mm, addr-len);
20271 + if (check_heap_stack_gap(vma, addr - len, len))
20272 + /* remember the address as a hint for next time */
20273 + return (mm->free_area_cache = addr-len);
20274 + }
20275 +
20276 + if (mm->mmap_base < len)
20277 + goto bottomup;
20278 +
20279 + addr = mm->mmap_base-len;
20280 +
20281 + do {
20282 + /*
20283 + * Lookup failure means no vma is above this address,
20284 + * else if new region fits below vma->vm_start,
20285 + * return with success:
20286 + */
20287 + vma = find_vma(mm, addr);
20288 + if (check_heap_stack_gap(vma, addr, len))
20289 + /* remember the address as a hint for next time */
20290 + return (mm->free_area_cache = addr);
20291 +
20292 + /* remember the largest hole we saw so far */
20293 + if (addr + mm->cached_hole_size < vma->vm_start)
20294 + mm->cached_hole_size = vma->vm_start - addr;
20295 +
20296 + /* try just below the current vma->vm_start */
20297 + addr = skip_heap_stack_gap(vma, len);
20298 + } while (!IS_ERR_VALUE(addr));
20299 +
20300 +bottomup:
20301 + /*
20302 + * A failed mmap() very likely causes application failure,
20303 + * so fall back to the bottom-up function here. This scenario
20304 + * can happen with large stack limits and large mmap()
20305 + * allocations.
20306 + */
20307 +
20308 +#ifdef CONFIG_PAX_SEGMEXEC
20309 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20310 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
20311 + else
20312 +#endif
20313 +
20314 + mm->mmap_base = TASK_UNMAPPED_BASE;
20315 +
20316 +#ifdef CONFIG_PAX_RANDMMAP
20317 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20318 + mm->mmap_base += mm->delta_mmap;
20319 +#endif
20320 +
20321 + mm->free_area_cache = mm->mmap_base;
20322 + mm->cached_hole_size = ~0UL;
20323 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
20324 + /*
20325 + * Restore the topdown base:
20326 + */
20327 + mm->mmap_base = base;
20328 + mm->free_area_cache = base;
20329 + mm->cached_hole_size = ~0UL;
20330 +
20331 + return addr;
20332 +}
20333
20334 struct sel_arg_struct {
20335 unsigned long n;
20336 @@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
20337 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
20338 case SEMTIMEDOP:
20339 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
20340 - (const struct timespec __user *)fifth);
20341 + (__force const struct timespec __user *)fifth);
20342
20343 case SEMGET:
20344 return sys_semget(first, second, third);
20345 @@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
20346 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
20347 if (ret)
20348 return ret;
20349 - return put_user(raddr, (ulong __user *) third);
20350 + return put_user(raddr, (__force ulong __user *) third);
20351 }
20352 case 1: /* iBCS2 emulator entry point */
20353 if (!segment_eq(get_fs(), get_ds()))
20354 @@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldold_utsname __user *name)
20355
20356 return error;
20357 }
20358 -
20359 -
20360 -/*
20361 - * Do a system call from kernel instead of calling sys_execve so we
20362 - * end up with proper pt_regs.
20363 - */
20364 -int kernel_execve(const char *filename, char *const argv[], char *const envp[])
20365 -{
20366 - long __res;
20367 - asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
20368 - : "=a" (__res)
20369 - : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
20370 - return __res;
20371 -}
20372 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
20373 index 8aa2057..b604bc1 100644
20374 --- a/arch/x86/kernel/sys_x86_64.c
20375 +++ b/arch/x86/kernel/sys_x86_64.c
20376 @@ -32,8 +32,8 @@ out:
20377 return error;
20378 }
20379
20380 -static void find_start_end(unsigned long flags, unsigned long *begin,
20381 - unsigned long *end)
20382 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
20383 + unsigned long *begin, unsigned long *end)
20384 {
20385 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
20386 unsigned long new_begin;
20387 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
20388 *begin = new_begin;
20389 }
20390 } else {
20391 - *begin = TASK_UNMAPPED_BASE;
20392 + *begin = mm->mmap_base;
20393 *end = TASK_SIZE;
20394 }
20395 }
20396 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
20397 if (flags & MAP_FIXED)
20398 return addr;
20399
20400 - find_start_end(flags, &begin, &end);
20401 + find_start_end(mm, flags, &begin, &end);
20402
20403 if (len > end)
20404 return -ENOMEM;
20405
20406 +#ifdef CONFIG_PAX_RANDMMAP
20407 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20408 +#endif
20409 +
20410 if (addr) {
20411 addr = PAGE_ALIGN(addr);
20412 vma = find_vma(mm, addr);
20413 - if (end - len >= addr &&
20414 - (!vma || addr + len <= vma->vm_start))
20415 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
20416 return addr;
20417 }
20418 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
20419 @@ -106,7 +109,7 @@ full_search:
20420 }
20421 return -ENOMEM;
20422 }
20423 - if (!vma || addr + len <= vma->vm_start) {
20424 + if (check_heap_stack_gap(vma, addr, len)) {
20425 /*
20426 * Remember the place where we stopped the search:
20427 */
20428 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20429 {
20430 struct vm_area_struct *vma;
20431 struct mm_struct *mm = current->mm;
20432 - unsigned long addr = addr0;
20433 + unsigned long base = mm->mmap_base, addr = addr0;
20434
20435 /* requested length too big for entire address space */
20436 if (len > TASK_SIZE)
20437 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20438 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
20439 goto bottomup;
20440
20441 +#ifdef CONFIG_PAX_RANDMMAP
20442 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20443 +#endif
20444 +
20445 /* requesting a specific address */
20446 if (addr) {
20447 addr = PAGE_ALIGN(addr);
20448 - vma = find_vma(mm, addr);
20449 - if (TASK_SIZE - len >= addr &&
20450 - (!vma || addr + len <= vma->vm_start))
20451 - return addr;
20452 + if (TASK_SIZE - len >= addr) {
20453 + vma = find_vma(mm, addr);
20454 + if (check_heap_stack_gap(vma, addr, len))
20455 + return addr;
20456 + }
20457 }
20458
20459 /* check if free_area_cache is useful for us */
20460 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20461 /* make sure it can fit in the remaining address space */
20462 if (addr > len) {
20463 vma = find_vma(mm, addr-len);
20464 - if (!vma || addr <= vma->vm_start)
20465 + if (check_heap_stack_gap(vma, addr - len, len))
20466 /* remember the address as a hint for next time */
20467 return mm->free_area_cache = addr-len;
20468 }
20469 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20470 * return with success:
20471 */
20472 vma = find_vma(mm, addr);
20473 - if (!vma || addr+len <= vma->vm_start)
20474 + if (check_heap_stack_gap(vma, addr, len))
20475 /* remember the address as a hint for next time */
20476 return mm->free_area_cache = addr;
20477
20478 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20479 mm->cached_hole_size = vma->vm_start - addr;
20480
20481 /* try just below the current vma->vm_start */
20482 - addr = vma->vm_start-len;
20483 - } while (len < vma->vm_start);
20484 + addr = skip_heap_stack_gap(vma, len);
20485 + } while (!IS_ERR_VALUE(addr));
20486
20487 bottomup:
20488 /*
20489 @@ -198,13 +206,21 @@ bottomup:
20490 * can happen with large stack limits and large mmap()
20491 * allocations.
20492 */
20493 + mm->mmap_base = TASK_UNMAPPED_BASE;
20494 +
20495 +#ifdef CONFIG_PAX_RANDMMAP
20496 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20497 + mm->mmap_base += mm->delta_mmap;
20498 +#endif
20499 +
20500 + mm->free_area_cache = mm->mmap_base;
20501 mm->cached_hole_size = ~0UL;
20502 - mm->free_area_cache = TASK_UNMAPPED_BASE;
20503 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
20504 /*
20505 * Restore the topdown base:
20506 */
20507 - mm->free_area_cache = mm->mmap_base;
20508 + mm->mmap_base = base;
20509 + mm->free_area_cache = base;
20510 mm->cached_hole_size = ~0UL;
20511
20512 return addr;
20513 diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
20514 index 76d70a4..4c94a44 100644
20515 --- a/arch/x86/kernel/syscall_table_32.S
20516 +++ b/arch/x86/kernel/syscall_table_32.S
20517 @@ -1,3 +1,4 @@
20518 +.section .rodata,"a",@progbits
20519 ENTRY(sys_call_table)
20520 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
20521 .long sys_exit
20522 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
20523 index 46b8277..3349d55 100644
20524 --- a/arch/x86/kernel/tboot.c
20525 +++ b/arch/x86/kernel/tboot.c
20526 @@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
20527
20528 void tboot_shutdown(u32 shutdown_type)
20529 {
20530 - void (*shutdown)(void);
20531 + void (* __noreturn shutdown)(void);
20532
20533 if (!tboot_enabled())
20534 return;
20535 @@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
20536
20537 switch_to_tboot_pt();
20538
20539 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
20540 + shutdown = (void *)tboot->shutdown_entry;
20541 shutdown();
20542
20543 /* should not reach here */
20544 @@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
20545 tboot_shutdown(acpi_shutdown_map[sleep_state]);
20546 }
20547
20548 -static atomic_t ap_wfs_count;
20549 +static atomic_unchecked_t ap_wfs_count;
20550
20551 static int tboot_wait_for_aps(int num_aps)
20552 {
20553 @@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
20554 {
20555 switch (action) {
20556 case CPU_DYING:
20557 - atomic_inc(&ap_wfs_count);
20558 + atomic_inc_unchecked(&ap_wfs_count);
20559 if (num_online_cpus() == 1)
20560 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
20561 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
20562 return NOTIFY_BAD;
20563 break;
20564 }
20565 @@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
20566
20567 tboot_create_trampoline();
20568
20569 - atomic_set(&ap_wfs_count, 0);
20570 + atomic_set_unchecked(&ap_wfs_count, 0);
20571 register_hotcpu_notifier(&tboot_cpu_notifier);
20572 return 0;
20573 }
20574 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
20575 index be25734..87fe232 100644
20576 --- a/arch/x86/kernel/time.c
20577 +++ b/arch/x86/kernel/time.c
20578 @@ -26,17 +26,13 @@
20579 int timer_ack;
20580 #endif
20581
20582 -#ifdef CONFIG_X86_64
20583 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
20584 -#endif
20585 -
20586 unsigned long profile_pc(struct pt_regs *regs)
20587 {
20588 unsigned long pc = instruction_pointer(regs);
20589
20590 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
20591 + if (!user_mode(regs) && in_lock_functions(pc)) {
20592 #ifdef CONFIG_FRAME_POINTER
20593 - return *(unsigned long *)(regs->bp + sizeof(long));
20594 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
20595 #else
20596 unsigned long *sp =
20597 (unsigned long *)kernel_stack_pointer(regs);
20598 @@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
20599 * or above a saved flags. Eflags has bits 22-31 zero,
20600 * kernel addresses don't.
20601 */
20602 +
20603 +#ifdef CONFIG_PAX_KERNEXEC
20604 + return ktla_ktva(sp[0]);
20605 +#else
20606 if (sp[0] >> 22)
20607 return sp[0];
20608 if (sp[1] >> 22)
20609 return sp[1];
20610 #endif
20611 +
20612 +#endif
20613 }
20614 return pc;
20615 }
20616 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
20617 index 6bb7b85..dd853e1 100644
20618 --- a/arch/x86/kernel/tls.c
20619 +++ b/arch/x86/kernel/tls.c
20620 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
20621 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
20622 return -EINVAL;
20623
20624 +#ifdef CONFIG_PAX_SEGMEXEC
20625 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
20626 + return -EINVAL;
20627 +#endif
20628 +
20629 set_tls_desc(p, idx, &info, 1);
20630
20631 return 0;
20632 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
20633 index 8508237..229b664 100644
20634 --- a/arch/x86/kernel/trampoline_32.S
20635 +++ b/arch/x86/kernel/trampoline_32.S
20636 @@ -32,6 +32,12 @@
20637 #include <asm/segment.h>
20638 #include <asm/page_types.h>
20639
20640 +#ifdef CONFIG_PAX_KERNEXEC
20641 +#define ta(X) (X)
20642 +#else
20643 +#define ta(X) ((X) - __PAGE_OFFSET)
20644 +#endif
20645 +
20646 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
20647 __CPUINITRODATA
20648 .code16
20649 @@ -60,7 +66,7 @@ r_base = .
20650 inc %ax # protected mode (PE) bit
20651 lmsw %ax # into protected mode
20652 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
20653 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
20654 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
20655
20656 # These need to be in the same 64K segment as the above;
20657 # hence we don't use the boot_gdt_descr defined in head.S
20658 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
20659 index 3af2dff..ba8aa49 100644
20660 --- a/arch/x86/kernel/trampoline_64.S
20661 +++ b/arch/x86/kernel/trampoline_64.S
20662 @@ -91,7 +91,7 @@ startup_32:
20663 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
20664 movl %eax, %ds
20665
20666 - movl $X86_CR4_PAE, %eax
20667 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
20668 movl %eax, %cr4 # Enable PAE mode
20669
20670 # Setup trampoline 4 level pagetables
20671 @@ -127,7 +127,7 @@ startup_64:
20672 no_longmode:
20673 hlt
20674 jmp no_longmode
20675 -#include "verify_cpu_64.S"
20676 +#include "verify_cpu.S"
20677
20678 # Careful these need to be in the same 64K segment as the above;
20679 tidt:
20680 @@ -138,7 +138,7 @@ tidt:
20681 # so the kernel can live anywhere
20682 .balign 4
20683 tgdt:
20684 - .short tgdt_end - tgdt # gdt limit
20685 + .short tgdt_end - tgdt - 1 # gdt limit
20686 .long tgdt - r_base
20687 .short 0
20688 .quad 0x00cf9b000000ffff # __KERNEL32_CS
20689 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
20690 index 7e37dce..ec3f8e5 100644
20691 --- a/arch/x86/kernel/traps.c
20692 +++ b/arch/x86/kernel/traps.c
20693 @@ -69,12 +69,6 @@ asmlinkage int system_call(void);
20694
20695 /* Do we ignore FPU interrupts ? */
20696 char ignore_fpu_irq;
20697 -
20698 -/*
20699 - * The IDT has to be page-aligned to simplify the Pentium
20700 - * F0 0F bug workaround.
20701 - */
20702 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
20703 #endif
20704
20705 DECLARE_BITMAP(used_vectors, NR_VECTORS);
20706 @@ -112,19 +106,19 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
20707 static inline void
20708 die_if_kernel(const char *str, struct pt_regs *regs, long err)
20709 {
20710 - if (!user_mode_vm(regs))
20711 + if (!user_mode(regs))
20712 die(str, regs, err);
20713 }
20714 #endif
20715
20716 static void __kprobes
20717 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20718 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
20719 long error_code, siginfo_t *info)
20720 {
20721 struct task_struct *tsk = current;
20722
20723 #ifdef CONFIG_X86_32
20724 - if (regs->flags & X86_VM_MASK) {
20725 + if (v8086_mode(regs)) {
20726 /*
20727 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
20728 * On nmi (interrupt 2), do_trap should not be called.
20729 @@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20730 }
20731 #endif
20732
20733 - if (!user_mode(regs))
20734 + if (!user_mode_novm(regs))
20735 goto kernel_trap;
20736
20737 #ifdef CONFIG_X86_32
20738 @@ -158,7 +152,7 @@ trap_signal:
20739 printk_ratelimit()) {
20740 printk(KERN_INFO
20741 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
20742 - tsk->comm, tsk->pid, str,
20743 + tsk->comm, task_pid_nr(tsk), str,
20744 regs->ip, regs->sp, error_code);
20745 print_vma_addr(" in ", regs->ip);
20746 printk("\n");
20747 @@ -175,8 +169,20 @@ kernel_trap:
20748 if (!fixup_exception(regs)) {
20749 tsk->thread.error_code = error_code;
20750 tsk->thread.trap_no = trapnr;
20751 +
20752 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20753 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
20754 + str = "PAX: suspicious stack segment fault";
20755 +#endif
20756 +
20757 die(str, regs, error_code);
20758 }
20759 +
20760 +#ifdef CONFIG_PAX_REFCOUNT
20761 + if (trapnr == 4)
20762 + pax_report_refcount_overflow(regs);
20763 +#endif
20764 +
20765 return;
20766
20767 #ifdef CONFIG_X86_32
20768 @@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
20769 conditional_sti(regs);
20770
20771 #ifdef CONFIG_X86_32
20772 - if (regs->flags & X86_VM_MASK)
20773 + if (v8086_mode(regs))
20774 goto gp_in_vm86;
20775 #endif
20776
20777 tsk = current;
20778 - if (!user_mode(regs))
20779 + if (!user_mode_novm(regs))
20780 goto gp_in_kernel;
20781
20782 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20783 + if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
20784 + struct mm_struct *mm = tsk->mm;
20785 + unsigned long limit;
20786 +
20787 + down_write(&mm->mmap_sem);
20788 + limit = mm->context.user_cs_limit;
20789 + if (limit < TASK_SIZE) {
20790 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
20791 + up_write(&mm->mmap_sem);
20792 + return;
20793 + }
20794 + up_write(&mm->mmap_sem);
20795 + }
20796 +#endif
20797 +
20798 tsk->thread.error_code = error_code;
20799 tsk->thread.trap_no = 13;
20800
20801 @@ -305,6 +327,13 @@ gp_in_kernel:
20802 if (notify_die(DIE_GPF, "general protection fault", regs,
20803 error_code, 13, SIGSEGV) == NOTIFY_STOP)
20804 return;
20805 +
20806 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20807 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
20808 + die("PAX: suspicious general protection fault", regs, error_code);
20809 + else
20810 +#endif
20811 +
20812 die("general protection fault", regs, error_code);
20813 }
20814
20815 @@ -435,6 +464,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
20816 dotraplinkage notrace __kprobes void
20817 do_nmi(struct pt_regs *regs, long error_code)
20818 {
20819 +
20820 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20821 + if (!user_mode(regs)) {
20822 + unsigned long cs = regs->cs & 0xFFFF;
20823 + unsigned long ip = ktva_ktla(regs->ip);
20824 +
20825 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
20826 + regs->ip = ip;
20827 + }
20828 +#endif
20829 +
20830 nmi_enter();
20831
20832 inc_irq_stat(__nmi_count);
20833 @@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20834 }
20835
20836 #ifdef CONFIG_X86_32
20837 - if (regs->flags & X86_VM_MASK)
20838 + if (v8086_mode(regs))
20839 goto debug_vm86;
20840 #endif
20841
20842 @@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20843 * kernel space (but re-enable TF when returning to user mode).
20844 */
20845 if (condition & DR_STEP) {
20846 - if (!user_mode(regs))
20847 + if (!user_mode_novm(regs))
20848 goto clear_TF_reenable;
20849 }
20850
20851 @@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
20852 * Handle strange cache flush from user space exception
20853 * in all other cases. This is undocumented behaviour.
20854 */
20855 - if (regs->flags & X86_VM_MASK) {
20856 + if (v8086_mode(regs)) {
20857 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
20858 return;
20859 }
20860 @@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
20861 void __math_state_restore(void)
20862 {
20863 struct thread_info *thread = current_thread_info();
20864 - struct task_struct *tsk = thread->task;
20865 + struct task_struct *tsk = current;
20866
20867 /*
20868 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
20869 @@ -825,8 +865,7 @@ void __math_state_restore(void)
20870 */
20871 asmlinkage void math_state_restore(void)
20872 {
20873 - struct thread_info *thread = current_thread_info();
20874 - struct task_struct *tsk = thread->task;
20875 + struct task_struct *tsk = current;
20876
20877 if (!tsk_used_math(tsk)) {
20878 local_irq_enable();
20879 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
20880 new file mode 100644
20881 index 0000000..50c5edd
20882 --- /dev/null
20883 +++ b/arch/x86/kernel/verify_cpu.S
20884 @@ -0,0 +1,140 @@
20885 +/*
20886 + *
20887 + * verify_cpu.S - Code for cpu long mode and SSE verification. This
20888 + * code has been borrowed from boot/setup.S and was introduced by
20889 + * Andi Kleen.
20890 + *
20891 + * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20892 + * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20893 + * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20894 + * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
20895 + *
20896 + * This source code is licensed under the GNU General Public License,
20897 + * Version 2. See the file COPYING for more details.
20898 + *
20899 + * This is a common code for verification whether CPU supports
20900 + * long mode and SSE or not. It is not called directly instead this
20901 + * file is included at various places and compiled in that context.
20902 + * This file is expected to run in 32bit code. Currently:
20903 + *
20904 + * arch/x86/boot/compressed/head_64.S: Boot cpu verification
20905 + * arch/x86/kernel/trampoline_64.S: secondary processor verification
20906 + * arch/x86/kernel/head_32.S: processor startup
20907 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
20908 + *
20909 + * verify_cpu, returns the status of longmode and SSE in register %eax.
20910 + * 0: Success 1: Failure
20911 + *
20912 + * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
20913 + *
20914 + * The caller needs to check for the error code and take the action
20915 + * appropriately. Either display a message or halt.
20916 + */
20917 +
20918 +#include <asm/cpufeature.h>
20919 +#include <asm/msr-index.h>
20920 +
20921 +verify_cpu:
20922 + pushfl # Save caller passed flags
20923 + pushl $0 # Kill any dangerous flags
20924 + popfl
20925 +
20926 + pushfl # standard way to check for cpuid
20927 + popl %eax
20928 + movl %eax,%ebx
20929 + xorl $0x200000,%eax
20930 + pushl %eax
20931 + popfl
20932 + pushfl
20933 + popl %eax
20934 + cmpl %eax,%ebx
20935 + jz verify_cpu_no_longmode # cpu has no cpuid
20936 +
20937 + movl $0x0,%eax # See if cpuid 1 is implemented
20938 + cpuid
20939 + cmpl $0x1,%eax
20940 + jb verify_cpu_no_longmode # no cpuid 1
20941 +
20942 + xor %di,%di
20943 + cmpl $0x68747541,%ebx # AuthenticAMD
20944 + jnz verify_cpu_noamd
20945 + cmpl $0x69746e65,%edx
20946 + jnz verify_cpu_noamd
20947 + cmpl $0x444d4163,%ecx
20948 + jnz verify_cpu_noamd
20949 + mov $1,%di # cpu is from AMD
20950 + jmp verify_cpu_check
20951 +
20952 +verify_cpu_noamd:
20953 + cmpl $0x756e6547,%ebx # GenuineIntel?
20954 + jnz verify_cpu_check
20955 + cmpl $0x49656e69,%edx
20956 + jnz verify_cpu_check
20957 + cmpl $0x6c65746e,%ecx
20958 + jnz verify_cpu_check
20959 +
20960 + # only call IA32_MISC_ENABLE when:
20961 + # family > 6 || (family == 6 && model >= 0xd)
20962 + movl $0x1, %eax # check CPU family and model
20963 + cpuid
20964 + movl %eax, %ecx
20965 +
20966 + andl $0x0ff00f00, %eax # mask family and extended family
20967 + shrl $8, %eax
20968 + cmpl $6, %eax
20969 + ja verify_cpu_clear_xd # family > 6, ok
20970 + jb verify_cpu_check # family < 6, skip
20971 +
20972 + andl $0x000f00f0, %ecx # mask model and extended model
20973 + shrl $4, %ecx
20974 + cmpl $0xd, %ecx
20975 + jb verify_cpu_check # family == 6, model < 0xd, skip
20976 +
20977 +verify_cpu_clear_xd:
20978 + movl $MSR_IA32_MISC_ENABLE, %ecx
20979 + rdmsr
20980 + btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
20981 + jnc verify_cpu_check # only write MSR if bit was changed
20982 + wrmsr
20983 +
20984 +verify_cpu_check:
20985 + movl $0x1,%eax # Does the cpu have what it takes
20986 + cpuid
20987 + andl $REQUIRED_MASK0,%edx
20988 + xorl $REQUIRED_MASK0,%edx
20989 + jnz verify_cpu_no_longmode
20990 +
20991 + movl $0x80000000,%eax # See if extended cpuid is implemented
20992 + cpuid
20993 + cmpl $0x80000001,%eax
20994 + jb verify_cpu_no_longmode # no extended cpuid
20995 +
20996 + movl $0x80000001,%eax # Does the cpu have what it takes
20997 + cpuid
20998 + andl $REQUIRED_MASK1,%edx
20999 + xorl $REQUIRED_MASK1,%edx
21000 + jnz verify_cpu_no_longmode
21001 +
21002 +verify_cpu_sse_test:
21003 + movl $1,%eax
21004 + cpuid
21005 + andl $SSE_MASK,%edx
21006 + cmpl $SSE_MASK,%edx
21007 + je verify_cpu_sse_ok
21008 + test %di,%di
21009 + jz verify_cpu_no_longmode # only try to force SSE on AMD
21010 + movl $MSR_K7_HWCR,%ecx
21011 + rdmsr
21012 + btr $15,%eax # enable SSE
21013 + wrmsr
21014 + xor %di,%di # don't loop
21015 + jmp verify_cpu_sse_test # try again
21016 +
21017 +verify_cpu_no_longmode:
21018 + popfl # Restore caller passed flags
21019 + movl $1,%eax
21020 + ret
21021 +verify_cpu_sse_ok:
21022 + popfl # Restore caller passed flags
21023 + xorl %eax, %eax
21024 + ret
21025 diff --git a/arch/x86/kernel/verify_cpu_64.S b/arch/x86/kernel/verify_cpu_64.S
21026 deleted file mode 100644
21027 index 45b6f8a..0000000
21028 --- a/arch/x86/kernel/verify_cpu_64.S
21029 +++ /dev/null
21030 @@ -1,105 +0,0 @@
21031 -/*
21032 - *
21033 - * verify_cpu.S - Code for cpu long mode and SSE verification. This
21034 - * code has been borrowed from boot/setup.S and was introduced by
21035 - * Andi Kleen.
21036 - *
21037 - * Copyright (c) 2007 Andi Kleen (ak@suse.de)
21038 - * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
21039 - * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
21040 - *
21041 - * This source code is licensed under the GNU General Public License,
21042 - * Version 2. See the file COPYING for more details.
21043 - *
21044 - * This is a common code for verification whether CPU supports
21045 - * long mode and SSE or not. It is not called directly instead this
21046 - * file is included at various places and compiled in that context.
21047 - * Following are the current usage.
21048 - *
21049 - * This file is included by both 16bit and 32bit code.
21050 - *
21051 - * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
21052 - * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
21053 - * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
21054 - * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
21055 - *
21056 - * verify_cpu, returns the status of cpu check in register %eax.
21057 - * 0: Success 1: Failure
21058 - *
21059 - * The caller needs to check for the error code and take the action
21060 - * appropriately. Either display a message or halt.
21061 - */
21062 -
21063 -#include <asm/cpufeature.h>
21064 -
21065 -verify_cpu:
21066 - pushfl # Save caller passed flags
21067 - pushl $0 # Kill any dangerous flags
21068 - popfl
21069 -
21070 - pushfl # standard way to check for cpuid
21071 - popl %eax
21072 - movl %eax,%ebx
21073 - xorl $0x200000,%eax
21074 - pushl %eax
21075 - popfl
21076 - pushfl
21077 - popl %eax
21078 - cmpl %eax,%ebx
21079 - jz verify_cpu_no_longmode # cpu has no cpuid
21080 -
21081 - movl $0x0,%eax # See if cpuid 1 is implemented
21082 - cpuid
21083 - cmpl $0x1,%eax
21084 - jb verify_cpu_no_longmode # no cpuid 1
21085 -
21086 - xor %di,%di
21087 - cmpl $0x68747541,%ebx # AuthenticAMD
21088 - jnz verify_cpu_noamd
21089 - cmpl $0x69746e65,%edx
21090 - jnz verify_cpu_noamd
21091 - cmpl $0x444d4163,%ecx
21092 - jnz verify_cpu_noamd
21093 - mov $1,%di # cpu is from AMD
21094 -
21095 -verify_cpu_noamd:
21096 - movl $0x1,%eax # Does the cpu have what it takes
21097 - cpuid
21098 - andl $REQUIRED_MASK0,%edx
21099 - xorl $REQUIRED_MASK0,%edx
21100 - jnz verify_cpu_no_longmode
21101 -
21102 - movl $0x80000000,%eax # See if extended cpuid is implemented
21103 - cpuid
21104 - cmpl $0x80000001,%eax
21105 - jb verify_cpu_no_longmode # no extended cpuid
21106 -
21107 - movl $0x80000001,%eax # Does the cpu have what it takes
21108 - cpuid
21109 - andl $REQUIRED_MASK1,%edx
21110 - xorl $REQUIRED_MASK1,%edx
21111 - jnz verify_cpu_no_longmode
21112 -
21113 -verify_cpu_sse_test:
21114 - movl $1,%eax
21115 - cpuid
21116 - andl $SSE_MASK,%edx
21117 - cmpl $SSE_MASK,%edx
21118 - je verify_cpu_sse_ok
21119 - test %di,%di
21120 - jz verify_cpu_no_longmode # only try to force SSE on AMD
21121 - movl $0xc0010015,%ecx # HWCR
21122 - rdmsr
21123 - btr $15,%eax # enable SSE
21124 - wrmsr
21125 - xor %di,%di # don't loop
21126 - jmp verify_cpu_sse_test # try again
21127 -
21128 -verify_cpu_no_longmode:
21129 - popfl # Restore caller passed flags
21130 - movl $1,%eax
21131 - ret
21132 -verify_cpu_sse_ok:
21133 - popfl # Restore caller passed flags
21134 - xorl %eax, %eax
21135 - ret
21136 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
21137 index 9c4e625..c992817 100644
21138 --- a/arch/x86/kernel/vm86_32.c
21139 +++ b/arch/x86/kernel/vm86_32.c
21140 @@ -41,6 +41,7 @@
21141 #include <linux/ptrace.h>
21142 #include <linux/audit.h>
21143 #include <linux/stddef.h>
21144 +#include <linux/grsecurity.h>
21145
21146 #include <asm/uaccess.h>
21147 #include <asm/io.h>
21148 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
21149 do_exit(SIGSEGV);
21150 }
21151
21152 - tss = &per_cpu(init_tss, get_cpu());
21153 + tss = init_tss + get_cpu();
21154 current->thread.sp0 = current->thread.saved_sp0;
21155 current->thread.sysenter_cs = __KERNEL_CS;
21156 load_sp0(tss, &current->thread);
21157 @@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
21158 struct task_struct *tsk;
21159 int tmp, ret = -EPERM;
21160
21161 +#ifdef CONFIG_GRKERNSEC_VM86
21162 + if (!capable(CAP_SYS_RAWIO)) {
21163 + gr_handle_vm86();
21164 + goto out;
21165 + }
21166 +#endif
21167 +
21168 tsk = current;
21169 if (tsk->thread.saved_sp0)
21170 goto out;
21171 @@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
21172 int tmp, ret;
21173 struct vm86plus_struct __user *v86;
21174
21175 +#ifdef CONFIG_GRKERNSEC_VM86
21176 + if (!capable(CAP_SYS_RAWIO)) {
21177 + gr_handle_vm86();
21178 + ret = -EPERM;
21179 + goto out;
21180 + }
21181 +#endif
21182 +
21183 tsk = current;
21184 switch (regs->bx) {
21185 case VM86_REQUEST_IRQ:
21186 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
21187 tsk->thread.saved_fs = info->regs32->fs;
21188 tsk->thread.saved_gs = get_user_gs(info->regs32);
21189
21190 - tss = &per_cpu(init_tss, get_cpu());
21191 + tss = init_tss + get_cpu();
21192 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
21193 if (cpu_has_sep)
21194 tsk->thread.sysenter_cs = 0;
21195 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
21196 goto cannot_handle;
21197 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
21198 goto cannot_handle;
21199 - intr_ptr = (unsigned long __user *) (i << 2);
21200 + intr_ptr = (__force unsigned long __user *) (i << 2);
21201 if (get_user(segoffs, intr_ptr))
21202 goto cannot_handle;
21203 if ((segoffs >> 16) == BIOSSEG)
21204 diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
21205 index d430e4c..831f817 100644
21206 --- a/arch/x86/kernel/vmi_32.c
21207 +++ b/arch/x86/kernel/vmi_32.c
21208 @@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
21209 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
21210
21211 #define call_vrom_func(rom,func) \
21212 - (((VROMFUNC *)(rom->func))())
21213 + (((VROMFUNC *)(ktva_ktla(rom.func)))())
21214
21215 #define call_vrom_long_func(rom,func,arg) \
21216 - (((VROMLONGFUNC *)(rom->func)) (arg))
21217 +({\
21218 + u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
21219 + struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
21220 + __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
21221 + __reloc;\
21222 +})
21223
21224 -static struct vrom_header *vmi_rom;
21225 +static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
21226 static int disable_pge;
21227 static int disable_pse;
21228 static int disable_sep;
21229 @@ -76,10 +81,10 @@ static struct {
21230 void (*set_initial_ap_state)(int, int);
21231 void (*halt)(void);
21232 void (*set_lazy_mode)(int mode);
21233 -} vmi_ops;
21234 +} __no_const vmi_ops __read_only;
21235
21236 /* Cached VMI operations */
21237 -struct vmi_timer_ops vmi_timer_ops;
21238 +struct vmi_timer_ops vmi_timer_ops __read_only;
21239
21240 /*
21241 * VMI patching routines.
21242 @@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
21243 static inline void patch_offset(void *insnbuf,
21244 unsigned long ip, unsigned long dest)
21245 {
21246 - *(unsigned long *)(insnbuf+1) = dest-ip-5;
21247 + *(unsigned long *)(insnbuf+1) = dest-ip-5;
21248 }
21249
21250 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
21251 @@ -102,6 +107,7 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
21252 {
21253 u64 reloc;
21254 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
21255 +
21256 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
21257 switch(rel->type) {
21258 case VMI_RELOCATION_CALL_REL:
21259 @@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud_t pudval)
21260
21261 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
21262 {
21263 - const pte_t pte = { .pte = 0 };
21264 + const pte_t pte = __pte(0ULL);
21265 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
21266 }
21267
21268 static void vmi_pmd_clear(pmd_t *pmd)
21269 {
21270 - const pte_t pte = { .pte = 0 };
21271 + const pte_t pte = __pte(0ULL);
21272 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
21273 }
21274 #endif
21275 @@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
21276 ap.ss = __KERNEL_DS;
21277 ap.esp = (unsigned long) start_esp;
21278
21279 - ap.ds = __USER_DS;
21280 - ap.es = __USER_DS;
21281 + ap.ds = __KERNEL_DS;
21282 + ap.es = __KERNEL_DS;
21283 ap.fs = __KERNEL_PERCPU;
21284 - ap.gs = __KERNEL_STACK_CANARY;
21285 + savesegment(gs, ap.gs);
21286
21287 ap.eflags = 0;
21288
21289 @@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
21290 paravirt_leave_lazy_mmu();
21291 }
21292
21293 +#ifdef CONFIG_PAX_KERNEXEC
21294 +static unsigned long vmi_pax_open_kernel(void)
21295 +{
21296 + return 0;
21297 +}
21298 +
21299 +static unsigned long vmi_pax_close_kernel(void)
21300 +{
21301 + return 0;
21302 +}
21303 +#endif
21304 +
21305 static inline int __init check_vmi_rom(struct vrom_header *rom)
21306 {
21307 struct pci_header *pci;
21308 @@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(struct vrom_header *rom)
21309 return 0;
21310 if (rom->vrom_signature != VMI_SIGNATURE)
21311 return 0;
21312 + if (rom->rom_length * 512 > sizeof(*rom)) {
21313 + printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
21314 + return 0;
21315 + }
21316 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
21317 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
21318 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
21319 @@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(void)
21320 struct vrom_header *romstart;
21321 romstart = (struct vrom_header *)isa_bus_to_virt(base);
21322 if (check_vmi_rom(romstart)) {
21323 - vmi_rom = romstart;
21324 + vmi_rom = *romstart;
21325 return 1;
21326 }
21327 }
21328 @@ -836,6 +858,11 @@ static inline int __init activate_vmi(void)
21329
21330 para_fill(pv_irq_ops.safe_halt, Halt);
21331
21332 +#ifdef CONFIG_PAX_KERNEXEC
21333 + pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
21334 + pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
21335 +#endif
21336 +
21337 /*
21338 * Alternative instruction rewriting doesn't happen soon enough
21339 * to convert VMI_IRET to a call instead of a jump; so we have
21340 @@ -853,16 +880,16 @@ static inline int __init activate_vmi(void)
21341
21342 void __init vmi_init(void)
21343 {
21344 - if (!vmi_rom)
21345 + if (!vmi_rom.rom_signature)
21346 probe_vmi_rom();
21347 else
21348 - check_vmi_rom(vmi_rom);
21349 + check_vmi_rom(&vmi_rom);
21350
21351 /* In case probing for or validating the ROM failed, basil */
21352 - if (!vmi_rom)
21353 + if (!vmi_rom.rom_signature)
21354 return;
21355
21356 - reserve_top_address(-vmi_rom->virtual_top);
21357 + reserve_top_address(-vmi_rom.virtual_top);
21358
21359 #ifdef CONFIG_X86_IO_APIC
21360 /* This is virtual hardware; timer routing is wired correctly */
21361 @@ -874,7 +901,7 @@ void __init vmi_activate(void)
21362 {
21363 unsigned long flags;
21364
21365 - if (!vmi_rom)
21366 + if (!vmi_rom.rom_signature)
21367 return;
21368
21369 local_irq_save(flags);
21370 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
21371 index 3c68fe2..12c8280 100644
21372 --- a/arch/x86/kernel/vmlinux.lds.S
21373 +++ b/arch/x86/kernel/vmlinux.lds.S
21374 @@ -26,6 +26,13 @@
21375 #include <asm/page_types.h>
21376 #include <asm/cache.h>
21377 #include <asm/boot.h>
21378 +#include <asm/segment.h>
21379 +
21380 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21381 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
21382 +#else
21383 +#define __KERNEL_TEXT_OFFSET 0
21384 +#endif
21385
21386 #undef i386 /* in case the preprocessor is a 32bit one */
21387
21388 @@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
21389 #ifdef CONFIG_X86_32
21390 OUTPUT_ARCH(i386)
21391 ENTRY(phys_startup_32)
21392 -jiffies = jiffies_64;
21393 #else
21394 OUTPUT_ARCH(i386:x86-64)
21395 ENTRY(phys_startup_64)
21396 -jiffies_64 = jiffies;
21397 #endif
21398
21399 PHDRS {
21400 text PT_LOAD FLAGS(5); /* R_E */
21401 - data PT_LOAD FLAGS(7); /* RWE */
21402 +#ifdef CONFIG_X86_32
21403 + module PT_LOAD FLAGS(5); /* R_E */
21404 +#endif
21405 +#ifdef CONFIG_XEN
21406 + rodata PT_LOAD FLAGS(5); /* R_E */
21407 +#else
21408 + rodata PT_LOAD FLAGS(4); /* R__ */
21409 +#endif
21410 + data PT_LOAD FLAGS(6); /* RW_ */
21411 #ifdef CONFIG_X86_64
21412 user PT_LOAD FLAGS(5); /* R_E */
21413 +#endif
21414 + init.begin PT_LOAD FLAGS(6); /* RW_ */
21415 #ifdef CONFIG_SMP
21416 percpu PT_LOAD FLAGS(6); /* RW_ */
21417 #endif
21418 + text.init PT_LOAD FLAGS(5); /* R_E */
21419 + text.exit PT_LOAD FLAGS(5); /* R_E */
21420 init PT_LOAD FLAGS(7); /* RWE */
21421 -#endif
21422 note PT_NOTE FLAGS(0); /* ___ */
21423 }
21424
21425 SECTIONS
21426 {
21427 #ifdef CONFIG_X86_32
21428 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
21429 - phys_startup_32 = startup_32 - LOAD_OFFSET;
21430 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
21431 #else
21432 - . = __START_KERNEL;
21433 - phys_startup_64 = startup_64 - LOAD_OFFSET;
21434 + . = __START_KERNEL;
21435 #endif
21436
21437 /* Text and read-only data */
21438 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
21439 - _text = .;
21440 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
21441 /* bootstrapping code */
21442 +#ifdef CONFIG_X86_32
21443 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
21444 +#else
21445 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
21446 +#endif
21447 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
21448 + _text = .;
21449 HEAD_TEXT
21450 #ifdef CONFIG_X86_32
21451 . = ALIGN(PAGE_SIZE);
21452 @@ -82,28 +102,71 @@ SECTIONS
21453 IRQENTRY_TEXT
21454 *(.fixup)
21455 *(.gnu.warning)
21456 - /* End of text section */
21457 - _etext = .;
21458 } :text = 0x9090
21459
21460 - NOTES :text :note
21461 + . += __KERNEL_TEXT_OFFSET;
21462
21463 - EXCEPTION_TABLE(16) :text = 0x9090
21464 +#ifdef CONFIG_X86_32
21465 + . = ALIGN(PAGE_SIZE);
21466 + .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
21467 + *(.vmi.rom)
21468 + } :module
21469 +
21470 + . = ALIGN(PAGE_SIZE);
21471 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
21472 +
21473 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
21474 + MODULES_EXEC_VADDR = .;
21475 + BYTE(0)
21476 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
21477 + . = ALIGN(HPAGE_SIZE);
21478 + MODULES_EXEC_END = . - 1;
21479 +#endif
21480 +
21481 + } :module
21482 +#endif
21483 +
21484 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
21485 + /* End of text section */
21486 + _etext = . - __KERNEL_TEXT_OFFSET;
21487 + }
21488 +
21489 +#ifdef CONFIG_X86_32
21490 + . = ALIGN(PAGE_SIZE);
21491 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
21492 + *(.idt)
21493 + . = ALIGN(PAGE_SIZE);
21494 + *(.empty_zero_page)
21495 + *(.swapper_pg_fixmap)
21496 + *(.swapper_pg_pmd)
21497 + *(.swapper_pg_dir)
21498 + *(.trampoline_pg_dir)
21499 + } :rodata
21500 +#endif
21501 +
21502 + . = ALIGN(PAGE_SIZE);
21503 + NOTES :rodata :note
21504 +
21505 + EXCEPTION_TABLE(16) :rodata
21506
21507 RO_DATA(PAGE_SIZE)
21508
21509 /* Data */
21510 .data : AT(ADDR(.data) - LOAD_OFFSET) {
21511 +
21512 +#ifdef CONFIG_PAX_KERNEXEC
21513 + . = ALIGN(HPAGE_SIZE);
21514 +#else
21515 + . = ALIGN(PAGE_SIZE);
21516 +#endif
21517 +
21518 /* Start of data section */
21519 _sdata = .;
21520
21521 /* init_task */
21522 INIT_TASK_DATA(THREAD_SIZE)
21523
21524 -#ifdef CONFIG_X86_32
21525 - /* 32 bit has nosave before _edata */
21526 NOSAVE_DATA
21527 -#endif
21528
21529 PAGE_ALIGNED_DATA(PAGE_SIZE)
21530
21531 @@ -112,6 +175,8 @@ SECTIONS
21532 DATA_DATA
21533 CONSTRUCTORS
21534
21535 + jiffies = jiffies_64;
21536 +
21537 /* rarely changed data like cpu maps */
21538 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
21539
21540 @@ -166,12 +231,6 @@ SECTIONS
21541 }
21542 vgetcpu_mode = VVIRT(.vgetcpu_mode);
21543
21544 - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
21545 - .jiffies : AT(VLOAD(.jiffies)) {
21546 - *(.jiffies)
21547 - }
21548 - jiffies = VVIRT(.jiffies);
21549 -
21550 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
21551 *(.vsyscall_3)
21552 }
21553 @@ -187,12 +246,19 @@ SECTIONS
21554 #endif /* CONFIG_X86_64 */
21555
21556 /* Init code and data - will be freed after init */
21557 - . = ALIGN(PAGE_SIZE);
21558 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
21559 + BYTE(0)
21560 +
21561 +#ifdef CONFIG_PAX_KERNEXEC
21562 + . = ALIGN(HPAGE_SIZE);
21563 +#else
21564 + . = ALIGN(PAGE_SIZE);
21565 +#endif
21566 +
21567 __init_begin = .; /* paired with __init_end */
21568 - }
21569 + } :init.begin
21570
21571 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
21572 +#ifdef CONFIG_SMP
21573 /*
21574 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
21575 * output PHDR, so the next output section - .init.text - should
21576 @@ -201,12 +267,27 @@ SECTIONS
21577 PERCPU_VADDR(0, :percpu)
21578 #endif
21579
21580 - INIT_TEXT_SECTION(PAGE_SIZE)
21581 -#ifdef CONFIG_X86_64
21582 - :init
21583 -#endif
21584 + . = ALIGN(PAGE_SIZE);
21585 + init_begin = .;
21586 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
21587 + VMLINUX_SYMBOL(_sinittext) = .;
21588 + INIT_TEXT
21589 + VMLINUX_SYMBOL(_einittext) = .;
21590 + . = ALIGN(PAGE_SIZE);
21591 + } :text.init
21592
21593 - INIT_DATA_SECTION(16)
21594 + /*
21595 + * .exit.text is discard at runtime, not link time, to deal with
21596 + * references from .altinstructions and .eh_frame
21597 + */
21598 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
21599 + EXIT_TEXT
21600 + . = ALIGN(16);
21601 + } :text.exit
21602 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
21603 +
21604 + . = ALIGN(PAGE_SIZE);
21605 + INIT_DATA_SECTION(16) :init
21606
21607 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
21608 __x86_cpu_dev_start = .;
21609 @@ -232,19 +313,11 @@ SECTIONS
21610 *(.altinstr_replacement)
21611 }
21612
21613 - /*
21614 - * .exit.text is discard at runtime, not link time, to deal with
21615 - * references from .altinstructions and .eh_frame
21616 - */
21617 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
21618 - EXIT_TEXT
21619 - }
21620 -
21621 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
21622 EXIT_DATA
21623 }
21624
21625 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
21626 +#ifndef CONFIG_SMP
21627 PERCPU(PAGE_SIZE)
21628 #endif
21629
21630 @@ -267,12 +340,6 @@ SECTIONS
21631 . = ALIGN(PAGE_SIZE);
21632 }
21633
21634 -#ifdef CONFIG_X86_64
21635 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
21636 - NOSAVE_DATA
21637 - }
21638 -#endif
21639 -
21640 /* BSS */
21641 . = ALIGN(PAGE_SIZE);
21642 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
21643 @@ -288,6 +355,7 @@ SECTIONS
21644 __brk_base = .;
21645 . += 64 * 1024; /* 64k alignment slop space */
21646 *(.brk_reservation) /* areas brk users have reserved */
21647 + . = ALIGN(HPAGE_SIZE);
21648 __brk_limit = .;
21649 }
21650
21651 @@ -316,13 +384,12 @@ SECTIONS
21652 * for the boot processor.
21653 */
21654 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
21655 -INIT_PER_CPU(gdt_page);
21656 INIT_PER_CPU(irq_stack_union);
21657
21658 /*
21659 * Build-time check on the image size:
21660 */
21661 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
21662 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
21663 "kernel image bigger than KERNEL_IMAGE_SIZE");
21664
21665 #ifdef CONFIG_SMP
21666 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
21667 index 62f39d7..3bc46a1 100644
21668 --- a/arch/x86/kernel/vsyscall_64.c
21669 +++ b/arch/x86/kernel/vsyscall_64.c
21670 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
21671
21672 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
21673 /* copy vsyscall data */
21674 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
21675 vsyscall_gtod_data.clock.vread = clock->vread;
21676 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
21677 vsyscall_gtod_data.clock.mask = clock->mask;
21678 @@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
21679 We do this here because otherwise user space would do it on
21680 its own in a likely inferior way (no access to jiffies).
21681 If you don't like it pass NULL. */
21682 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
21683 + if (tcache && tcache->blob[0] == (j = jiffies)) {
21684 p = tcache->blob[1];
21685 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
21686 /* Load per CPU data from RDTSCP */
21687 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
21688 index 3909e3b..5433a97 100644
21689 --- a/arch/x86/kernel/x8664_ksyms_64.c
21690 +++ b/arch/x86/kernel/x8664_ksyms_64.c
21691 @@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
21692
21693 EXPORT_SYMBOL(copy_user_generic);
21694 EXPORT_SYMBOL(__copy_user_nocache);
21695 -EXPORT_SYMBOL(copy_from_user);
21696 -EXPORT_SYMBOL(copy_to_user);
21697 EXPORT_SYMBOL(__copy_from_user_inatomic);
21698
21699 EXPORT_SYMBOL(copy_page);
21700 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
21701 index c5ee17e..d63218f 100644
21702 --- a/arch/x86/kernel/xsave.c
21703 +++ b/arch/x86/kernel/xsave.c
21704 @@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
21705 fx_sw_user->xstate_size > fx_sw_user->extended_size)
21706 return -1;
21707
21708 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
21709 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
21710 fx_sw_user->extended_size -
21711 FP_XSTATE_MAGIC2_SIZE));
21712 /*
21713 @@ -196,7 +196,7 @@ fx_only:
21714 * the other extended state.
21715 */
21716 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
21717 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
21718 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
21719 }
21720
21721 /*
21722 @@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf)
21723 if (task_thread_info(tsk)->status & TS_XSAVE)
21724 err = restore_user_xstate(buf);
21725 else
21726 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
21727 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
21728 buf);
21729 if (unlikely(err)) {
21730 /*
21731 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
21732 index 1350e43..a94b011 100644
21733 --- a/arch/x86/kvm/emulate.c
21734 +++ b/arch/x86/kvm/emulate.c
21735 @@ -81,8 +81,8 @@
21736 #define Src2CL (1<<29)
21737 #define Src2ImmByte (2<<29)
21738 #define Src2One (3<<29)
21739 -#define Src2Imm16 (4<<29)
21740 -#define Src2Mask (7<<29)
21741 +#define Src2Imm16 (4U<<29)
21742 +#define Src2Mask (7U<<29)
21743
21744 enum {
21745 Group1_80, Group1_81, Group1_82, Group1_83,
21746 @@ -411,6 +411,7 @@ static u32 group2_table[] = {
21747
21748 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
21749 do { \
21750 + unsigned long _tmp; \
21751 __asm__ __volatile__ ( \
21752 _PRE_EFLAGS("0", "4", "2") \
21753 _op _suffix " %"_x"3,%1; " \
21754 @@ -424,8 +425,6 @@ static u32 group2_table[] = {
21755 /* Raw emulation: instruction has two explicit operands. */
21756 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
21757 do { \
21758 - unsigned long _tmp; \
21759 - \
21760 switch ((_dst).bytes) { \
21761 case 2: \
21762 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
21763 @@ -441,7 +440,6 @@ static u32 group2_table[] = {
21764
21765 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
21766 do { \
21767 - unsigned long _tmp; \
21768 switch ((_dst).bytes) { \
21769 case 1: \
21770 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
21771 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
21772 index 8dfeaaa..4daa395 100644
21773 --- a/arch/x86/kvm/lapic.c
21774 +++ b/arch/x86/kvm/lapic.c
21775 @@ -52,7 +52,7 @@
21776 #define APIC_BUS_CYCLE_NS 1
21777
21778 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
21779 -#define apic_debug(fmt, arg...)
21780 +#define apic_debug(fmt, arg...) do {} while (0)
21781
21782 #define APIC_LVT_NUM 6
21783 /* 14 is the version for Xeon and Pentium 8.4.8*/
21784 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
21785 index 3bc2707..dd157e2 100644
21786 --- a/arch/x86/kvm/paging_tmpl.h
21787 +++ b/arch/x86/kvm/paging_tmpl.h
21788 @@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21789 int level = PT_PAGE_TABLE_LEVEL;
21790 unsigned long mmu_seq;
21791
21792 + pax_track_stack();
21793 +
21794 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
21795 kvm_mmu_audit(vcpu, "pre page fault");
21796
21797 @@ -461,6 +463,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21798 kvm_mmu_free_some_pages(vcpu);
21799 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
21800 level, &write_pt, pfn);
21801 + (void)sptep;
21802 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
21803 sptep, *sptep, write_pt);
21804
21805 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
21806 index 7c6e63e..c5d92c1 100644
21807 --- a/arch/x86/kvm/svm.c
21808 +++ b/arch/x86/kvm/svm.c
21809 @@ -2486,7 +2486,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
21810 int cpu = raw_smp_processor_id();
21811
21812 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
21813 +
21814 + pax_open_kernel();
21815 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
21816 + pax_close_kernel();
21817 +
21818 load_TR_desc();
21819 }
21820
21821 @@ -2947,7 +2951,7 @@ static bool svm_gb_page_enable(void)
21822 return true;
21823 }
21824
21825 -static struct kvm_x86_ops svm_x86_ops = {
21826 +static const struct kvm_x86_ops svm_x86_ops = {
21827 .cpu_has_kvm_support = has_svm,
21828 .disabled_by_bios = is_disabled,
21829 .hardware_setup = svm_hardware_setup,
21830 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
21831 index e6d925f..e7a4af8 100644
21832 --- a/arch/x86/kvm/vmx.c
21833 +++ b/arch/x86/kvm/vmx.c
21834 @@ -570,7 +570,11 @@ static void reload_tss(void)
21835
21836 kvm_get_gdt(&gdt);
21837 descs = (void *)gdt.base;
21838 +
21839 + pax_open_kernel();
21840 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
21841 + pax_close_kernel();
21842 +
21843 load_TR_desc();
21844 }
21845
21846 @@ -1410,8 +1414,11 @@ static __init int hardware_setup(void)
21847 if (!cpu_has_vmx_flexpriority())
21848 flexpriority_enabled = 0;
21849
21850 - if (!cpu_has_vmx_tpr_shadow())
21851 - kvm_x86_ops->update_cr8_intercept = NULL;
21852 + if (!cpu_has_vmx_tpr_shadow()) {
21853 + pax_open_kernel();
21854 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
21855 + pax_close_kernel();
21856 + }
21857
21858 if (enable_ept && !cpu_has_vmx_ept_2m_page())
21859 kvm_disable_largepages();
21860 @@ -2362,7 +2369,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
21861 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
21862
21863 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
21864 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
21865 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
21866 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
21867 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
21868 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
21869 @@ -3718,6 +3725,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21870 "jmp .Lkvm_vmx_return \n\t"
21871 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
21872 ".Lkvm_vmx_return: "
21873 +
21874 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21875 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
21876 + ".Lkvm_vmx_return2: "
21877 +#endif
21878 +
21879 /* Save guest registers, load host registers, keep flags */
21880 "xchg %0, (%%"R"sp) \n\t"
21881 "mov %%"R"ax, %c[rax](%0) \n\t"
21882 @@ -3764,8 +3777,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21883 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
21884 #endif
21885 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
21886 +
21887 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21888 + ,[cs]"i"(__KERNEL_CS)
21889 +#endif
21890 +
21891 : "cc", "memory"
21892 - , R"bx", R"di", R"si"
21893 + , R"ax", R"bx", R"di", R"si"
21894 #ifdef CONFIG_X86_64
21895 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
21896 #endif
21897 @@ -3782,7 +3800,16 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21898 if (vmx->rmode.irq.pending)
21899 fixup_rmode_irq(vmx);
21900
21901 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
21902 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
21903 +
21904 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21905 + loadsegment(fs, __KERNEL_PERCPU);
21906 +#endif
21907 +
21908 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21909 + __set_fs(current_thread_info()->addr_limit);
21910 +#endif
21911 +
21912 vmx->launched = 1;
21913
21914 vmx_complete_interrupts(vmx);
21915 @@ -3957,7 +3984,7 @@ static bool vmx_gb_page_enable(void)
21916 return false;
21917 }
21918
21919 -static struct kvm_x86_ops vmx_x86_ops = {
21920 +static const struct kvm_x86_ops vmx_x86_ops = {
21921 .cpu_has_kvm_support = cpu_has_kvm_support,
21922 .disabled_by_bios = vmx_disabled_by_bios,
21923 .hardware_setup = hardware_setup,
21924 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
21925 index df1cefb..5e882ad 100644
21926 --- a/arch/x86/kvm/x86.c
21927 +++ b/arch/x86/kvm/x86.c
21928 @@ -82,7 +82,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
21929 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
21930 struct kvm_cpuid_entry2 __user *entries);
21931
21932 -struct kvm_x86_ops *kvm_x86_ops;
21933 +const struct kvm_x86_ops *kvm_x86_ops;
21934 EXPORT_SYMBOL_GPL(kvm_x86_ops);
21935
21936 int ignore_msrs = 0;
21937 @@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
21938 struct kvm_cpuid2 *cpuid,
21939 struct kvm_cpuid_entry2 __user *entries)
21940 {
21941 - int r;
21942 + int r, i;
21943
21944 r = -E2BIG;
21945 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
21946 goto out;
21947 r = -EFAULT;
21948 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
21949 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21950 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21951 goto out;
21952 + for (i = 0; i < cpuid->nent; ++i) {
21953 + struct kvm_cpuid_entry2 cpuid_entry;
21954 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
21955 + goto out;
21956 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
21957 + }
21958 vcpu->arch.cpuid_nent = cpuid->nent;
21959 kvm_apic_set_version(vcpu);
21960 return 0;
21961 @@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
21962 struct kvm_cpuid2 *cpuid,
21963 struct kvm_cpuid_entry2 __user *entries)
21964 {
21965 - int r;
21966 + int r, i;
21967
21968 vcpu_load(vcpu);
21969 r = -E2BIG;
21970 if (cpuid->nent < vcpu->arch.cpuid_nent)
21971 goto out;
21972 r = -EFAULT;
21973 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
21974 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21975 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21976 goto out;
21977 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
21978 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
21979 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
21980 + goto out;
21981 + }
21982 return 0;
21983
21984 out:
21985 @@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
21986 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
21987 struct kvm_interrupt *irq)
21988 {
21989 - if (irq->irq < 0 || irq->irq >= 256)
21990 + if (irq->irq >= 256)
21991 return -EINVAL;
21992 if (irqchip_in_kernel(vcpu->kvm))
21993 return -ENXIO;
21994 @@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = {
21995 .notifier_call = kvmclock_cpufreq_notifier
21996 };
21997
21998 -int kvm_arch_init(void *opaque)
21999 +int kvm_arch_init(const void *opaque)
22000 {
22001 int r, cpu;
22002 - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
22003 + const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
22004
22005 if (kvm_x86_ops) {
22006 printk(KERN_ERR "kvm: already loaded the other module\n");
22007 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
22008 index 7e59dc1..b88c98f 100644
22009 --- a/arch/x86/lguest/boot.c
22010 +++ b/arch/x86/lguest/boot.c
22011 @@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
22012 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
22013 * Launcher to reboot us.
22014 */
22015 -static void lguest_restart(char *reason)
22016 +static __noreturn void lguest_restart(char *reason)
22017 {
22018 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
22019 + BUG();
22020 }
22021
22022 /*G:050
22023 diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
22024 index 824fa0b..c619e96 100644
22025 --- a/arch/x86/lib/atomic64_32.c
22026 +++ b/arch/x86/lib/atomic64_32.c
22027 @@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val)
22028 }
22029 EXPORT_SYMBOL(atomic64_cmpxchg);
22030
22031 +u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
22032 +{
22033 + return cmpxchg8b(&ptr->counter, old_val, new_val);
22034 +}
22035 +EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
22036 +
22037 /**
22038 * atomic64_xchg - xchg atomic64 variable
22039 * @ptr: pointer to type atomic64_t
22040 @@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 new_val)
22041 EXPORT_SYMBOL(atomic64_xchg);
22042
22043 /**
22044 + * atomic64_xchg_unchecked - xchg atomic64 variable
22045 + * @ptr: pointer to type atomic64_unchecked_t
22046 + * @new_val: value to assign
22047 + *
22048 + * Atomically xchgs the value of @ptr to @new_val and returns
22049 + * the old value.
22050 + */
22051 +u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
22052 +{
22053 + /*
22054 + * Try first with a (possibly incorrect) assumption about
22055 + * what we have there. We'll do two loops most likely,
22056 + * but we'll get an ownership MESI transaction straight away
22057 + * instead of a read transaction followed by a
22058 + * flush-for-ownership transaction:
22059 + */
22060 + u64 old_val, real_val = 0;
22061 +
22062 + do {
22063 + old_val = real_val;
22064 +
22065 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
22066 +
22067 + } while (real_val != old_val);
22068 +
22069 + return old_val;
22070 +}
22071 +EXPORT_SYMBOL(atomic64_xchg_unchecked);
22072 +
22073 +/**
22074 * atomic64_set - set atomic64 variable
22075 * @ptr: pointer to type atomic64_t
22076 * @new_val: value to assign
22077 @@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 new_val)
22078 EXPORT_SYMBOL(atomic64_set);
22079
22080 /**
22081 -EXPORT_SYMBOL(atomic64_read);
22082 + * atomic64_unchecked_set - set atomic64 variable
22083 + * @ptr: pointer to type atomic64_unchecked_t
22084 + * @new_val: value to assign
22085 + *
22086 + * Atomically sets the value of @ptr to @new_val.
22087 + */
22088 +void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
22089 +{
22090 + atomic64_xchg_unchecked(ptr, new_val);
22091 +}
22092 +EXPORT_SYMBOL(atomic64_set_unchecked);
22093 +
22094 +/**
22095 * atomic64_add_return - add and return
22096 * @delta: integer value to add
22097 * @ptr: pointer to type atomic64_t
22098 @@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 delta, atomic64_t *ptr)
22099 }
22100 EXPORT_SYMBOL(atomic64_add_return);
22101
22102 +/**
22103 + * atomic64_add_return_unchecked - add and return
22104 + * @delta: integer value to add
22105 + * @ptr: pointer to type atomic64_unchecked_t
22106 + *
22107 + * Atomically adds @delta to @ptr and returns @delta + *@ptr
22108 + */
22109 +noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
22110 +{
22111 + /*
22112 + * Try first with a (possibly incorrect) assumption about
22113 + * what we have there. We'll do two loops most likely,
22114 + * but we'll get an ownership MESI transaction straight away
22115 + * instead of a read transaction followed by a
22116 + * flush-for-ownership transaction:
22117 + */
22118 + u64 old_val, new_val, real_val = 0;
22119 +
22120 + do {
22121 + old_val = real_val;
22122 + new_val = old_val + delta;
22123 +
22124 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
22125 +
22126 + } while (real_val != old_val);
22127 +
22128 + return new_val;
22129 +}
22130 +EXPORT_SYMBOL(atomic64_add_return_unchecked);
22131 +
22132 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
22133 {
22134 return atomic64_add_return(-delta, ptr);
22135 }
22136 EXPORT_SYMBOL(atomic64_sub_return);
22137
22138 +u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
22139 +{
22140 + return atomic64_add_return_unchecked(-delta, ptr);
22141 +}
22142 +EXPORT_SYMBOL(atomic64_sub_return_unchecked);
22143 +
22144 u64 atomic64_inc_return(atomic64_t *ptr)
22145 {
22146 return atomic64_add_return(1, ptr);
22147 }
22148 EXPORT_SYMBOL(atomic64_inc_return);
22149
22150 +u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
22151 +{
22152 + return atomic64_add_return_unchecked(1, ptr);
22153 +}
22154 +EXPORT_SYMBOL(atomic64_inc_return_unchecked);
22155 +
22156 u64 atomic64_dec_return(atomic64_t *ptr)
22157 {
22158 return atomic64_sub_return(1, ptr);
22159 }
22160 EXPORT_SYMBOL(atomic64_dec_return);
22161
22162 +u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
22163 +{
22164 + return atomic64_sub_return_unchecked(1, ptr);
22165 +}
22166 +EXPORT_SYMBOL(atomic64_dec_return_unchecked);
22167 +
22168 /**
22169 * atomic64_add - add integer to atomic64 variable
22170 * @delta: integer value to add
22171 @@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t *ptr)
22172 EXPORT_SYMBOL(atomic64_add);
22173
22174 /**
22175 + * atomic64_add_unchecked - add integer to atomic64 variable
22176 + * @delta: integer value to add
22177 + * @ptr: pointer to type atomic64_unchecked_t
22178 + *
22179 + * Atomically adds @delta to @ptr.
22180 + */
22181 +void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
22182 +{
22183 + atomic64_add_return_unchecked(delta, ptr);
22184 +}
22185 +EXPORT_SYMBOL(atomic64_add_unchecked);
22186 +
22187 +/**
22188 * atomic64_sub - subtract the atomic64 variable
22189 * @delta: integer value to subtract
22190 * @ptr: pointer to type atomic64_t
22191 @@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t *ptr)
22192 EXPORT_SYMBOL(atomic64_sub);
22193
22194 /**
22195 + * atomic64_sub_unchecked - subtract the atomic64 variable
22196 + * @delta: integer value to subtract
22197 + * @ptr: pointer to type atomic64_unchecked_t
22198 + *
22199 + * Atomically subtracts @delta from @ptr.
22200 + */
22201 +void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
22202 +{
22203 + atomic64_add_unchecked(-delta, ptr);
22204 +}
22205 +EXPORT_SYMBOL(atomic64_sub_unchecked);
22206 +
22207 +/**
22208 * atomic64_sub_and_test - subtract value from variable and test result
22209 * @delta: integer value to subtract
22210 * @ptr: pointer to type atomic64_t
22211 @@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
22212 EXPORT_SYMBOL(atomic64_inc);
22213
22214 /**
22215 + * atomic64_inc_unchecked - increment atomic64 variable
22216 + * @ptr: pointer to type atomic64_unchecked_t
22217 + *
22218 + * Atomically increments @ptr by 1.
22219 + */
22220 +void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
22221 +{
22222 + atomic64_add_unchecked(1, ptr);
22223 +}
22224 +EXPORT_SYMBOL(atomic64_inc_unchecked);
22225 +
22226 +/**
22227 * atomic64_dec - decrement atomic64 variable
22228 * @ptr: pointer to type atomic64_t
22229 *
22230 @@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
22231 EXPORT_SYMBOL(atomic64_dec);
22232
22233 /**
22234 + * atomic64_dec_unchecked - decrement atomic64 variable
22235 + * @ptr: pointer to type atomic64_unchecked_t
22236 + *
22237 + * Atomically decrements @ptr by 1.
22238 + */
22239 +void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
22240 +{
22241 + atomic64_sub_unchecked(1, ptr);
22242 +}
22243 +EXPORT_SYMBOL(atomic64_dec_unchecked);
22244 +
22245 +/**
22246 * atomic64_dec_and_test - decrement and test
22247 * @ptr: pointer to type atomic64_t
22248 *
22249 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
22250 index adbccd0..98f96c8 100644
22251 --- a/arch/x86/lib/checksum_32.S
22252 +++ b/arch/x86/lib/checksum_32.S
22253 @@ -28,7 +28,8 @@
22254 #include <linux/linkage.h>
22255 #include <asm/dwarf2.h>
22256 #include <asm/errno.h>
22257 -
22258 +#include <asm/segment.h>
22259 +
22260 /*
22261 * computes a partial checksum, e.g. for TCP/UDP fragments
22262 */
22263 @@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
22264
22265 #define ARGBASE 16
22266 #define FP 12
22267 -
22268 -ENTRY(csum_partial_copy_generic)
22269 +
22270 +ENTRY(csum_partial_copy_generic_to_user)
22271 CFI_STARTPROC
22272 +
22273 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22274 + pushl %gs
22275 + CFI_ADJUST_CFA_OFFSET 4
22276 + popl %es
22277 + CFI_ADJUST_CFA_OFFSET -4
22278 + jmp csum_partial_copy_generic
22279 +#endif
22280 +
22281 +ENTRY(csum_partial_copy_generic_from_user)
22282 +
22283 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22284 + pushl %gs
22285 + CFI_ADJUST_CFA_OFFSET 4
22286 + popl %ds
22287 + CFI_ADJUST_CFA_OFFSET -4
22288 +#endif
22289 +
22290 +ENTRY(csum_partial_copy_generic)
22291 subl $4,%esp
22292 CFI_ADJUST_CFA_OFFSET 4
22293 pushl %edi
22294 @@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
22295 jmp 4f
22296 SRC(1: movw (%esi), %bx )
22297 addl $2, %esi
22298 -DST( movw %bx, (%edi) )
22299 +DST( movw %bx, %es:(%edi) )
22300 addl $2, %edi
22301 addw %bx, %ax
22302 adcl $0, %eax
22303 @@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
22304 SRC(1: movl (%esi), %ebx )
22305 SRC( movl 4(%esi), %edx )
22306 adcl %ebx, %eax
22307 -DST( movl %ebx, (%edi) )
22308 +DST( movl %ebx, %es:(%edi) )
22309 adcl %edx, %eax
22310 -DST( movl %edx, 4(%edi) )
22311 +DST( movl %edx, %es:4(%edi) )
22312
22313 SRC( movl 8(%esi), %ebx )
22314 SRC( movl 12(%esi), %edx )
22315 adcl %ebx, %eax
22316 -DST( movl %ebx, 8(%edi) )
22317 +DST( movl %ebx, %es:8(%edi) )
22318 adcl %edx, %eax
22319 -DST( movl %edx, 12(%edi) )
22320 +DST( movl %edx, %es:12(%edi) )
22321
22322 SRC( movl 16(%esi), %ebx )
22323 SRC( movl 20(%esi), %edx )
22324 adcl %ebx, %eax
22325 -DST( movl %ebx, 16(%edi) )
22326 +DST( movl %ebx, %es:16(%edi) )
22327 adcl %edx, %eax
22328 -DST( movl %edx, 20(%edi) )
22329 +DST( movl %edx, %es:20(%edi) )
22330
22331 SRC( movl 24(%esi), %ebx )
22332 SRC( movl 28(%esi), %edx )
22333 adcl %ebx, %eax
22334 -DST( movl %ebx, 24(%edi) )
22335 +DST( movl %ebx, %es:24(%edi) )
22336 adcl %edx, %eax
22337 -DST( movl %edx, 28(%edi) )
22338 +DST( movl %edx, %es:28(%edi) )
22339
22340 lea 32(%esi), %esi
22341 lea 32(%edi), %edi
22342 @@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
22343 shrl $2, %edx # This clears CF
22344 SRC(3: movl (%esi), %ebx )
22345 adcl %ebx, %eax
22346 -DST( movl %ebx, (%edi) )
22347 +DST( movl %ebx, %es:(%edi) )
22348 lea 4(%esi), %esi
22349 lea 4(%edi), %edi
22350 dec %edx
22351 @@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
22352 jb 5f
22353 SRC( movw (%esi), %cx )
22354 leal 2(%esi), %esi
22355 -DST( movw %cx, (%edi) )
22356 +DST( movw %cx, %es:(%edi) )
22357 leal 2(%edi), %edi
22358 je 6f
22359 shll $16,%ecx
22360 SRC(5: movb (%esi), %cl )
22361 -DST( movb %cl, (%edi) )
22362 +DST( movb %cl, %es:(%edi) )
22363 6: addl %ecx, %eax
22364 adcl $0, %eax
22365 7:
22366 @@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
22367
22368 6001:
22369 movl ARGBASE+20(%esp), %ebx # src_err_ptr
22370 - movl $-EFAULT, (%ebx)
22371 + movl $-EFAULT, %ss:(%ebx)
22372
22373 # zero the complete destination - computing the rest
22374 # is too much work
22375 @@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
22376
22377 6002:
22378 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
22379 - movl $-EFAULT,(%ebx)
22380 + movl $-EFAULT,%ss:(%ebx)
22381 jmp 5000b
22382
22383 .previous
22384
22385 + pushl %ss
22386 + CFI_ADJUST_CFA_OFFSET 4
22387 + popl %ds
22388 + CFI_ADJUST_CFA_OFFSET -4
22389 + pushl %ss
22390 + CFI_ADJUST_CFA_OFFSET 4
22391 + popl %es
22392 + CFI_ADJUST_CFA_OFFSET -4
22393 popl %ebx
22394 CFI_ADJUST_CFA_OFFSET -4
22395 CFI_RESTORE ebx
22396 @@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
22397 CFI_ADJUST_CFA_OFFSET -4
22398 ret
22399 CFI_ENDPROC
22400 -ENDPROC(csum_partial_copy_generic)
22401 +ENDPROC(csum_partial_copy_generic_to_user)
22402
22403 #else
22404
22405 /* Version for PentiumII/PPro */
22406
22407 #define ROUND1(x) \
22408 + nop; nop; nop; \
22409 SRC(movl x(%esi), %ebx ) ; \
22410 addl %ebx, %eax ; \
22411 - DST(movl %ebx, x(%edi) ) ;
22412 + DST(movl %ebx, %es:x(%edi)) ;
22413
22414 #define ROUND(x) \
22415 + nop; nop; nop; \
22416 SRC(movl x(%esi), %ebx ) ; \
22417 adcl %ebx, %eax ; \
22418 - DST(movl %ebx, x(%edi) ) ;
22419 + DST(movl %ebx, %es:x(%edi)) ;
22420
22421 #define ARGBASE 12
22422 -
22423 -ENTRY(csum_partial_copy_generic)
22424 +
22425 +ENTRY(csum_partial_copy_generic_to_user)
22426 CFI_STARTPROC
22427 +
22428 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22429 + pushl %gs
22430 + CFI_ADJUST_CFA_OFFSET 4
22431 + popl %es
22432 + CFI_ADJUST_CFA_OFFSET -4
22433 + jmp csum_partial_copy_generic
22434 +#endif
22435 +
22436 +ENTRY(csum_partial_copy_generic_from_user)
22437 +
22438 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22439 + pushl %gs
22440 + CFI_ADJUST_CFA_OFFSET 4
22441 + popl %ds
22442 + CFI_ADJUST_CFA_OFFSET -4
22443 +#endif
22444 +
22445 +ENTRY(csum_partial_copy_generic)
22446 pushl %ebx
22447 CFI_ADJUST_CFA_OFFSET 4
22448 CFI_REL_OFFSET ebx, 0
22449 @@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
22450 subl %ebx, %edi
22451 lea -1(%esi),%edx
22452 andl $-32,%edx
22453 - lea 3f(%ebx,%ebx), %ebx
22454 + lea 3f(%ebx,%ebx,2), %ebx
22455 testl %esi, %esi
22456 jmp *%ebx
22457 1: addl $64,%esi
22458 @@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
22459 jb 5f
22460 SRC( movw (%esi), %dx )
22461 leal 2(%esi), %esi
22462 -DST( movw %dx, (%edi) )
22463 +DST( movw %dx, %es:(%edi) )
22464 leal 2(%edi), %edi
22465 je 6f
22466 shll $16,%edx
22467 5:
22468 SRC( movb (%esi), %dl )
22469 -DST( movb %dl, (%edi) )
22470 +DST( movb %dl, %es:(%edi) )
22471 6: addl %edx, %eax
22472 adcl $0, %eax
22473 7:
22474 .section .fixup, "ax"
22475 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
22476 - movl $-EFAULT, (%ebx)
22477 + movl $-EFAULT, %ss:(%ebx)
22478 # zero the complete destination (computing the rest is too much work)
22479 movl ARGBASE+8(%esp),%edi # dst
22480 movl ARGBASE+12(%esp),%ecx # len
22481 @@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
22482 rep; stosb
22483 jmp 7b
22484 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
22485 - movl $-EFAULT, (%ebx)
22486 + movl $-EFAULT, %ss:(%ebx)
22487 jmp 7b
22488 .previous
22489
22490 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22491 + pushl %ss
22492 + CFI_ADJUST_CFA_OFFSET 4
22493 + popl %ds
22494 + CFI_ADJUST_CFA_OFFSET -4
22495 + pushl %ss
22496 + CFI_ADJUST_CFA_OFFSET 4
22497 + popl %es
22498 + CFI_ADJUST_CFA_OFFSET -4
22499 +#endif
22500 +
22501 popl %esi
22502 CFI_ADJUST_CFA_OFFSET -4
22503 CFI_RESTORE esi
22504 @@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
22505 CFI_RESTORE ebx
22506 ret
22507 CFI_ENDPROC
22508 -ENDPROC(csum_partial_copy_generic)
22509 +ENDPROC(csum_partial_copy_generic_to_user)
22510
22511 #undef ROUND
22512 #undef ROUND1
22513 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
22514 index ebeafcc..1e3a402 100644
22515 --- a/arch/x86/lib/clear_page_64.S
22516 +++ b/arch/x86/lib/clear_page_64.S
22517 @@ -1,5 +1,6 @@
22518 #include <linux/linkage.h>
22519 #include <asm/dwarf2.h>
22520 +#include <asm/alternative-asm.h>
22521
22522 /*
22523 * Zero a page.
22524 @@ -10,6 +11,7 @@ ENTRY(clear_page_c)
22525 movl $4096/8,%ecx
22526 xorl %eax,%eax
22527 rep stosq
22528 + pax_force_retaddr
22529 ret
22530 CFI_ENDPROC
22531 ENDPROC(clear_page_c)
22532 @@ -33,6 +35,7 @@ ENTRY(clear_page)
22533 leaq 64(%rdi),%rdi
22534 jnz .Lloop
22535 nop
22536 + pax_force_retaddr
22537 ret
22538 CFI_ENDPROC
22539 .Lclear_page_end:
22540 @@ -43,7 +46,7 @@ ENDPROC(clear_page)
22541
22542 #include <asm/cpufeature.h>
22543
22544 - .section .altinstr_replacement,"ax"
22545 + .section .altinstr_replacement,"a"
22546 1: .byte 0xeb /* jmp <disp8> */
22547 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
22548 2:
22549 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
22550 index 727a5d4..333818a 100644
22551 --- a/arch/x86/lib/copy_page_64.S
22552 +++ b/arch/x86/lib/copy_page_64.S
22553 @@ -2,12 +2,14 @@
22554
22555 #include <linux/linkage.h>
22556 #include <asm/dwarf2.h>
22557 +#include <asm/alternative-asm.h>
22558
22559 ALIGN
22560 copy_page_c:
22561 CFI_STARTPROC
22562 movl $4096/8,%ecx
22563 rep movsq
22564 + pax_force_retaddr
22565 ret
22566 CFI_ENDPROC
22567 ENDPROC(copy_page_c)
22568 @@ -38,7 +40,7 @@ ENTRY(copy_page)
22569 movq 16 (%rsi), %rdx
22570 movq 24 (%rsi), %r8
22571 movq 32 (%rsi), %r9
22572 - movq 40 (%rsi), %r10
22573 + movq 40 (%rsi), %r13
22574 movq 48 (%rsi), %r11
22575 movq 56 (%rsi), %r12
22576
22577 @@ -49,7 +51,7 @@ ENTRY(copy_page)
22578 movq %rdx, 16 (%rdi)
22579 movq %r8, 24 (%rdi)
22580 movq %r9, 32 (%rdi)
22581 - movq %r10, 40 (%rdi)
22582 + movq %r13, 40 (%rdi)
22583 movq %r11, 48 (%rdi)
22584 movq %r12, 56 (%rdi)
22585
22586 @@ -68,7 +70,7 @@ ENTRY(copy_page)
22587 movq 16 (%rsi), %rdx
22588 movq 24 (%rsi), %r8
22589 movq 32 (%rsi), %r9
22590 - movq 40 (%rsi), %r10
22591 + movq 40 (%rsi), %r13
22592 movq 48 (%rsi), %r11
22593 movq 56 (%rsi), %r12
22594
22595 @@ -77,7 +79,7 @@ ENTRY(copy_page)
22596 movq %rdx, 16 (%rdi)
22597 movq %r8, 24 (%rdi)
22598 movq %r9, 32 (%rdi)
22599 - movq %r10, 40 (%rdi)
22600 + movq %r13, 40 (%rdi)
22601 movq %r11, 48 (%rdi)
22602 movq %r12, 56 (%rdi)
22603
22604 @@ -94,6 +96,7 @@ ENTRY(copy_page)
22605 CFI_RESTORE r13
22606 addq $3*8,%rsp
22607 CFI_ADJUST_CFA_OFFSET -3*8
22608 + pax_force_retaddr
22609 ret
22610 .Lcopy_page_end:
22611 CFI_ENDPROC
22612 @@ -104,7 +107,7 @@ ENDPROC(copy_page)
22613
22614 #include <asm/cpufeature.h>
22615
22616 - .section .altinstr_replacement,"ax"
22617 + .section .altinstr_replacement,"a"
22618 1: .byte 0xeb /* jmp <disp8> */
22619 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
22620 2:
22621 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
22622 index af8debd..40c75f3 100644
22623 --- a/arch/x86/lib/copy_user_64.S
22624 +++ b/arch/x86/lib/copy_user_64.S
22625 @@ -15,13 +15,15 @@
22626 #include <asm/asm-offsets.h>
22627 #include <asm/thread_info.h>
22628 #include <asm/cpufeature.h>
22629 +#include <asm/pgtable.h>
22630 +#include <asm/alternative-asm.h>
22631
22632 .macro ALTERNATIVE_JUMP feature,orig,alt
22633 0:
22634 .byte 0xe9 /* 32bit jump */
22635 .long \orig-1f /* by default jump to orig */
22636 1:
22637 - .section .altinstr_replacement,"ax"
22638 + .section .altinstr_replacement,"a"
22639 2: .byte 0xe9 /* near jump with 32bit immediate */
22640 .long \alt-1b /* offset */ /* or alternatively to alt */
22641 .previous
22642 @@ -64,55 +66,26 @@
22643 #endif
22644 .endm
22645
22646 -/* Standard copy_to_user with segment limit checking */
22647 -ENTRY(copy_to_user)
22648 - CFI_STARTPROC
22649 - GET_THREAD_INFO(%rax)
22650 - movq %rdi,%rcx
22651 - addq %rdx,%rcx
22652 - jc bad_to_user
22653 - cmpq TI_addr_limit(%rax),%rcx
22654 - ja bad_to_user
22655 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22656 - CFI_ENDPROC
22657 -ENDPROC(copy_to_user)
22658 -
22659 -/* Standard copy_from_user with segment limit checking */
22660 -ENTRY(copy_from_user)
22661 - CFI_STARTPROC
22662 - GET_THREAD_INFO(%rax)
22663 - movq %rsi,%rcx
22664 - addq %rdx,%rcx
22665 - jc bad_from_user
22666 - cmpq TI_addr_limit(%rax),%rcx
22667 - ja bad_from_user
22668 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22669 - CFI_ENDPROC
22670 -ENDPROC(copy_from_user)
22671 -
22672 ENTRY(copy_user_generic)
22673 CFI_STARTPROC
22674 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22675 CFI_ENDPROC
22676 ENDPROC(copy_user_generic)
22677
22678 -ENTRY(__copy_from_user_inatomic)
22679 - CFI_STARTPROC
22680 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22681 - CFI_ENDPROC
22682 -ENDPROC(__copy_from_user_inatomic)
22683 -
22684 .section .fixup,"ax"
22685 /* must zero dest */
22686 ENTRY(bad_from_user)
22687 bad_from_user:
22688 CFI_STARTPROC
22689 + testl %edx,%edx
22690 + js bad_to_user
22691 movl %edx,%ecx
22692 xorl %eax,%eax
22693 rep
22694 stosb
22695 bad_to_user:
22696 movl %edx,%eax
22697 + pax_force_retaddr
22698 ret
22699 CFI_ENDPROC
22700 ENDPROC(bad_from_user)
22701 @@ -142,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
22702 jz 17f
22703 1: movq (%rsi),%r8
22704 2: movq 1*8(%rsi),%r9
22705 -3: movq 2*8(%rsi),%r10
22706 +3: movq 2*8(%rsi),%rax
22707 4: movq 3*8(%rsi),%r11
22708 5: movq %r8,(%rdi)
22709 6: movq %r9,1*8(%rdi)
22710 -7: movq %r10,2*8(%rdi)
22711 +7: movq %rax,2*8(%rdi)
22712 8: movq %r11,3*8(%rdi)
22713 9: movq 4*8(%rsi),%r8
22714 10: movq 5*8(%rsi),%r9
22715 -11: movq 6*8(%rsi),%r10
22716 +11: movq 6*8(%rsi),%rax
22717 12: movq 7*8(%rsi),%r11
22718 13: movq %r8,4*8(%rdi)
22719 14: movq %r9,5*8(%rdi)
22720 -15: movq %r10,6*8(%rdi)
22721 +15: movq %rax,6*8(%rdi)
22722 16: movq %r11,7*8(%rdi)
22723 leaq 64(%rsi),%rsi
22724 leaq 64(%rdi),%rdi
22725 @@ -180,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
22726 decl %ecx
22727 jnz 21b
22728 23: xor %eax,%eax
22729 + pax_force_retaddr
22730 ret
22731
22732 .section .fixup,"ax"
22733 @@ -252,6 +226,7 @@ ENTRY(copy_user_generic_string)
22734 3: rep
22735 movsb
22736 4: xorl %eax,%eax
22737 + pax_force_retaddr
22738 ret
22739
22740 .section .fixup,"ax"
22741 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
22742 index cb0c112..e3a6895 100644
22743 --- a/arch/x86/lib/copy_user_nocache_64.S
22744 +++ b/arch/x86/lib/copy_user_nocache_64.S
22745 @@ -8,12 +8,14 @@
22746
22747 #include <linux/linkage.h>
22748 #include <asm/dwarf2.h>
22749 +#include <asm/alternative-asm.h>
22750
22751 #define FIX_ALIGNMENT 1
22752
22753 #include <asm/current.h>
22754 #include <asm/asm-offsets.h>
22755 #include <asm/thread_info.h>
22756 +#include <asm/pgtable.h>
22757
22758 .macro ALIGN_DESTINATION
22759 #ifdef FIX_ALIGNMENT
22760 @@ -50,6 +52,15 @@
22761 */
22762 ENTRY(__copy_user_nocache)
22763 CFI_STARTPROC
22764 +
22765 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22766 + mov $PAX_USER_SHADOW_BASE,%rcx
22767 + cmp %rcx,%rsi
22768 + jae 1f
22769 + add %rcx,%rsi
22770 +1:
22771 +#endif
22772 +
22773 cmpl $8,%edx
22774 jb 20f /* less then 8 bytes, go to byte copy loop */
22775 ALIGN_DESTINATION
22776 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
22777 jz 17f
22778 1: movq (%rsi),%r8
22779 2: movq 1*8(%rsi),%r9
22780 -3: movq 2*8(%rsi),%r10
22781 +3: movq 2*8(%rsi),%rax
22782 4: movq 3*8(%rsi),%r11
22783 5: movnti %r8,(%rdi)
22784 6: movnti %r9,1*8(%rdi)
22785 -7: movnti %r10,2*8(%rdi)
22786 +7: movnti %rax,2*8(%rdi)
22787 8: movnti %r11,3*8(%rdi)
22788 9: movq 4*8(%rsi),%r8
22789 10: movq 5*8(%rsi),%r9
22790 -11: movq 6*8(%rsi),%r10
22791 +11: movq 6*8(%rsi),%rax
22792 12: movq 7*8(%rsi),%r11
22793 13: movnti %r8,4*8(%rdi)
22794 14: movnti %r9,5*8(%rdi)
22795 -15: movnti %r10,6*8(%rdi)
22796 +15: movnti %rax,6*8(%rdi)
22797 16: movnti %r11,7*8(%rdi)
22798 leaq 64(%rsi),%rsi
22799 leaq 64(%rdi),%rdi
22800 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
22801 jnz 21b
22802 23: xorl %eax,%eax
22803 sfence
22804 + pax_force_retaddr
22805 ret
22806
22807 .section .fixup,"ax"
22808 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
22809 index f0dba36..48cb4d6 100644
22810 --- a/arch/x86/lib/csum-copy_64.S
22811 +++ b/arch/x86/lib/csum-copy_64.S
22812 @@ -8,6 +8,7 @@
22813 #include <linux/linkage.h>
22814 #include <asm/dwarf2.h>
22815 #include <asm/errno.h>
22816 +#include <asm/alternative-asm.h>
22817
22818 /*
22819 * Checksum copy with exception handling.
22820 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
22821 CFI_RESTORE rbp
22822 addq $7*8,%rsp
22823 CFI_ADJUST_CFA_OFFSET -7*8
22824 + pax_force_retaddr 0, 1
22825 ret
22826 CFI_RESTORE_STATE
22827
22828 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
22829 index 459b58a..9570bc7 100644
22830 --- a/arch/x86/lib/csum-wrappers_64.c
22831 +++ b/arch/x86/lib/csum-wrappers_64.c
22832 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
22833 len -= 2;
22834 }
22835 }
22836 - isum = csum_partial_copy_generic((__force const void *)src,
22837 +
22838 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22839 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
22840 + src += PAX_USER_SHADOW_BASE;
22841 +#endif
22842 +
22843 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
22844 dst, len, isum, errp, NULL);
22845 if (unlikely(*errp))
22846 goto out_err;
22847 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
22848 }
22849
22850 *errp = 0;
22851 - return csum_partial_copy_generic(src, (void __force *)dst,
22852 +
22853 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22854 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
22855 + dst += PAX_USER_SHADOW_BASE;
22856 +#endif
22857 +
22858 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
22859 len, isum, NULL, errp);
22860 }
22861 EXPORT_SYMBOL(csum_partial_copy_to_user);
22862 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
22863 index 51f1504..ddac4c1 100644
22864 --- a/arch/x86/lib/getuser.S
22865 +++ b/arch/x86/lib/getuser.S
22866 @@ -33,15 +33,38 @@
22867 #include <asm/asm-offsets.h>
22868 #include <asm/thread_info.h>
22869 #include <asm/asm.h>
22870 +#include <asm/segment.h>
22871 +#include <asm/pgtable.h>
22872 +#include <asm/alternative-asm.h>
22873 +
22874 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22875 +#define __copyuser_seg gs;
22876 +#else
22877 +#define __copyuser_seg
22878 +#endif
22879
22880 .text
22881 ENTRY(__get_user_1)
22882 CFI_STARTPROC
22883 +
22884 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22885 GET_THREAD_INFO(%_ASM_DX)
22886 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22887 jae bad_get_user
22888 -1: movzb (%_ASM_AX),%edx
22889 +
22890 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22891 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22892 + cmp %_ASM_DX,%_ASM_AX
22893 + jae 1234f
22894 + add %_ASM_DX,%_ASM_AX
22895 +1234:
22896 +#endif
22897 +
22898 +#endif
22899 +
22900 +1: __copyuser_seg movzb (%_ASM_AX),%edx
22901 xor %eax,%eax
22902 + pax_force_retaddr
22903 ret
22904 CFI_ENDPROC
22905 ENDPROC(__get_user_1)
22906 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
22907 ENTRY(__get_user_2)
22908 CFI_STARTPROC
22909 add $1,%_ASM_AX
22910 +
22911 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22912 jc bad_get_user
22913 GET_THREAD_INFO(%_ASM_DX)
22914 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22915 jae bad_get_user
22916 -2: movzwl -1(%_ASM_AX),%edx
22917 +
22918 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22919 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22920 + cmp %_ASM_DX,%_ASM_AX
22921 + jae 1234f
22922 + add %_ASM_DX,%_ASM_AX
22923 +1234:
22924 +#endif
22925 +
22926 +#endif
22927 +
22928 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
22929 xor %eax,%eax
22930 + pax_force_retaddr
22931 ret
22932 CFI_ENDPROC
22933 ENDPROC(__get_user_2)
22934 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
22935 ENTRY(__get_user_4)
22936 CFI_STARTPROC
22937 add $3,%_ASM_AX
22938 +
22939 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22940 jc bad_get_user
22941 GET_THREAD_INFO(%_ASM_DX)
22942 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22943 jae bad_get_user
22944 -3: mov -3(%_ASM_AX),%edx
22945 +
22946 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22947 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22948 + cmp %_ASM_DX,%_ASM_AX
22949 + jae 1234f
22950 + add %_ASM_DX,%_ASM_AX
22951 +1234:
22952 +#endif
22953 +
22954 +#endif
22955 +
22956 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
22957 xor %eax,%eax
22958 + pax_force_retaddr
22959 ret
22960 CFI_ENDPROC
22961 ENDPROC(__get_user_4)
22962 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
22963 GET_THREAD_INFO(%_ASM_DX)
22964 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22965 jae bad_get_user
22966 +
22967 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22968 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22969 + cmp %_ASM_DX,%_ASM_AX
22970 + jae 1234f
22971 + add %_ASM_DX,%_ASM_AX
22972 +1234:
22973 +#endif
22974 +
22975 4: movq -7(%_ASM_AX),%_ASM_DX
22976 xor %eax,%eax
22977 + pax_force_retaddr
22978 ret
22979 CFI_ENDPROC
22980 ENDPROC(__get_user_8)
22981 @@ -91,6 +152,7 @@ bad_get_user:
22982 CFI_STARTPROC
22983 xor %edx,%edx
22984 mov $(-EFAULT),%_ASM_AX
22985 + pax_force_retaddr
22986 ret
22987 CFI_ENDPROC
22988 END(bad_get_user)
22989 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
22990 index 05a95e7..326f2fa 100644
22991 --- a/arch/x86/lib/iomap_copy_64.S
22992 +++ b/arch/x86/lib/iomap_copy_64.S
22993 @@ -17,6 +17,7 @@
22994
22995 #include <linux/linkage.h>
22996 #include <asm/dwarf2.h>
22997 +#include <asm/alternative-asm.h>
22998
22999 /*
23000 * override generic version in lib/iomap_copy.c
23001 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
23002 CFI_STARTPROC
23003 movl %edx,%ecx
23004 rep movsd
23005 + pax_force_retaddr
23006 ret
23007 CFI_ENDPROC
23008 ENDPROC(__iowrite32_copy)
23009 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
23010 index ad5441e..610e351 100644
23011 --- a/arch/x86/lib/memcpy_64.S
23012 +++ b/arch/x86/lib/memcpy_64.S
23013 @@ -4,6 +4,7 @@
23014
23015 #include <asm/cpufeature.h>
23016 #include <asm/dwarf2.h>
23017 +#include <asm/alternative-asm.h>
23018
23019 /*
23020 * memcpy - Copy a memory block.
23021 @@ -34,6 +35,7 @@ memcpy_c:
23022 rep movsq
23023 movl %edx, %ecx
23024 rep movsb
23025 + pax_force_retaddr
23026 ret
23027 CFI_ENDPROC
23028 ENDPROC(memcpy_c)
23029 @@ -118,6 +120,7 @@ ENTRY(memcpy)
23030 jnz .Lloop_1
23031
23032 .Lend:
23033 + pax_force_retaddr 0, 1
23034 ret
23035 CFI_ENDPROC
23036 ENDPROC(memcpy)
23037 @@ -128,7 +131,7 @@ ENDPROC(__memcpy)
23038 * It is also a lot simpler. Use this when possible:
23039 */
23040
23041 - .section .altinstr_replacement, "ax"
23042 + .section .altinstr_replacement, "a"
23043 1: .byte 0xeb /* jmp <disp8> */
23044 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
23045 2:
23046 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
23047 index 2c59481..7e9ba4e 100644
23048 --- a/arch/x86/lib/memset_64.S
23049 +++ b/arch/x86/lib/memset_64.S
23050 @@ -2,6 +2,7 @@
23051
23052 #include <linux/linkage.h>
23053 #include <asm/dwarf2.h>
23054 +#include <asm/alternative-asm.h>
23055
23056 /*
23057 * ISO C memset - set a memory block to a byte value.
23058 @@ -28,6 +29,7 @@ memset_c:
23059 movl %r8d,%ecx
23060 rep stosb
23061 movq %r9,%rax
23062 + pax_force_retaddr
23063 ret
23064 CFI_ENDPROC
23065 ENDPROC(memset_c)
23066 @@ -35,13 +37,13 @@ ENDPROC(memset_c)
23067 ENTRY(memset)
23068 ENTRY(__memset)
23069 CFI_STARTPROC
23070 - movq %rdi,%r10
23071 movq %rdx,%r11
23072
23073 /* expand byte value */
23074 movzbl %sil,%ecx
23075 movabs $0x0101010101010101,%rax
23076 mul %rcx /* with rax, clobbers rdx */
23077 + movq %rdi,%rdx
23078
23079 /* align dst */
23080 movl %edi,%r9d
23081 @@ -95,7 +97,8 @@ ENTRY(__memset)
23082 jnz .Lloop_1
23083
23084 .Lende:
23085 - movq %r10,%rax
23086 + movq %rdx,%rax
23087 + pax_force_retaddr
23088 ret
23089
23090 CFI_RESTORE_STATE
23091 @@ -118,7 +121,7 @@ ENDPROC(__memset)
23092
23093 #include <asm/cpufeature.h>
23094
23095 - .section .altinstr_replacement,"ax"
23096 + .section .altinstr_replacement,"a"
23097 1: .byte 0xeb /* jmp <disp8> */
23098 .byte (memset_c - memset) - (2f - 1b) /* offset */
23099 2:
23100 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
23101 index c9f2d9b..e7fd2c0 100644
23102 --- a/arch/x86/lib/mmx_32.c
23103 +++ b/arch/x86/lib/mmx_32.c
23104 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
23105 {
23106 void *p;
23107 int i;
23108 + unsigned long cr0;
23109
23110 if (unlikely(in_interrupt()))
23111 return __memcpy(to, from, len);
23112 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
23113 kernel_fpu_begin();
23114
23115 __asm__ __volatile__ (
23116 - "1: prefetch (%0)\n" /* This set is 28 bytes */
23117 - " prefetch 64(%0)\n"
23118 - " prefetch 128(%0)\n"
23119 - " prefetch 192(%0)\n"
23120 - " prefetch 256(%0)\n"
23121 + "1: prefetch (%1)\n" /* This set is 28 bytes */
23122 + " prefetch 64(%1)\n"
23123 + " prefetch 128(%1)\n"
23124 + " prefetch 192(%1)\n"
23125 + " prefetch 256(%1)\n"
23126 "2: \n"
23127 ".section .fixup, \"ax\"\n"
23128 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23129 + "3: \n"
23130 +
23131 +#ifdef CONFIG_PAX_KERNEXEC
23132 + " movl %%cr0, %0\n"
23133 + " movl %0, %%eax\n"
23134 + " andl $0xFFFEFFFF, %%eax\n"
23135 + " movl %%eax, %%cr0\n"
23136 +#endif
23137 +
23138 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23139 +
23140 +#ifdef CONFIG_PAX_KERNEXEC
23141 + " movl %0, %%cr0\n"
23142 +#endif
23143 +
23144 " jmp 2b\n"
23145 ".previous\n"
23146 _ASM_EXTABLE(1b, 3b)
23147 - : : "r" (from));
23148 + : "=&r" (cr0) : "r" (from) : "ax");
23149
23150 for ( ; i > 5; i--) {
23151 __asm__ __volatile__ (
23152 - "1: prefetch 320(%0)\n"
23153 - "2: movq (%0), %%mm0\n"
23154 - " movq 8(%0), %%mm1\n"
23155 - " movq 16(%0), %%mm2\n"
23156 - " movq 24(%0), %%mm3\n"
23157 - " movq %%mm0, (%1)\n"
23158 - " movq %%mm1, 8(%1)\n"
23159 - " movq %%mm2, 16(%1)\n"
23160 - " movq %%mm3, 24(%1)\n"
23161 - " movq 32(%0), %%mm0\n"
23162 - " movq 40(%0), %%mm1\n"
23163 - " movq 48(%0), %%mm2\n"
23164 - " movq 56(%0), %%mm3\n"
23165 - " movq %%mm0, 32(%1)\n"
23166 - " movq %%mm1, 40(%1)\n"
23167 - " movq %%mm2, 48(%1)\n"
23168 - " movq %%mm3, 56(%1)\n"
23169 + "1: prefetch 320(%1)\n"
23170 + "2: movq (%1), %%mm0\n"
23171 + " movq 8(%1), %%mm1\n"
23172 + " movq 16(%1), %%mm2\n"
23173 + " movq 24(%1), %%mm3\n"
23174 + " movq %%mm0, (%2)\n"
23175 + " movq %%mm1, 8(%2)\n"
23176 + " movq %%mm2, 16(%2)\n"
23177 + " movq %%mm3, 24(%2)\n"
23178 + " movq 32(%1), %%mm0\n"
23179 + " movq 40(%1), %%mm1\n"
23180 + " movq 48(%1), %%mm2\n"
23181 + " movq 56(%1), %%mm3\n"
23182 + " movq %%mm0, 32(%2)\n"
23183 + " movq %%mm1, 40(%2)\n"
23184 + " movq %%mm2, 48(%2)\n"
23185 + " movq %%mm3, 56(%2)\n"
23186 ".section .fixup, \"ax\"\n"
23187 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23188 + "3:\n"
23189 +
23190 +#ifdef CONFIG_PAX_KERNEXEC
23191 + " movl %%cr0, %0\n"
23192 + " movl %0, %%eax\n"
23193 + " andl $0xFFFEFFFF, %%eax\n"
23194 + " movl %%eax, %%cr0\n"
23195 +#endif
23196 +
23197 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23198 +
23199 +#ifdef CONFIG_PAX_KERNEXEC
23200 + " movl %0, %%cr0\n"
23201 +#endif
23202 +
23203 " jmp 2b\n"
23204 ".previous\n"
23205 _ASM_EXTABLE(1b, 3b)
23206 - : : "r" (from), "r" (to) : "memory");
23207 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
23208
23209 from += 64;
23210 to += 64;
23211 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
23212 static void fast_copy_page(void *to, void *from)
23213 {
23214 int i;
23215 + unsigned long cr0;
23216
23217 kernel_fpu_begin();
23218
23219 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
23220 * but that is for later. -AV
23221 */
23222 __asm__ __volatile__(
23223 - "1: prefetch (%0)\n"
23224 - " prefetch 64(%0)\n"
23225 - " prefetch 128(%0)\n"
23226 - " prefetch 192(%0)\n"
23227 - " prefetch 256(%0)\n"
23228 + "1: prefetch (%1)\n"
23229 + " prefetch 64(%1)\n"
23230 + " prefetch 128(%1)\n"
23231 + " prefetch 192(%1)\n"
23232 + " prefetch 256(%1)\n"
23233 "2: \n"
23234 ".section .fixup, \"ax\"\n"
23235 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23236 + "3: \n"
23237 +
23238 +#ifdef CONFIG_PAX_KERNEXEC
23239 + " movl %%cr0, %0\n"
23240 + " movl %0, %%eax\n"
23241 + " andl $0xFFFEFFFF, %%eax\n"
23242 + " movl %%eax, %%cr0\n"
23243 +#endif
23244 +
23245 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23246 +
23247 +#ifdef CONFIG_PAX_KERNEXEC
23248 + " movl %0, %%cr0\n"
23249 +#endif
23250 +
23251 " jmp 2b\n"
23252 ".previous\n"
23253 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
23254 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
23255
23256 for (i = 0; i < (4096-320)/64; i++) {
23257 __asm__ __volatile__ (
23258 - "1: prefetch 320(%0)\n"
23259 - "2: movq (%0), %%mm0\n"
23260 - " movntq %%mm0, (%1)\n"
23261 - " movq 8(%0), %%mm1\n"
23262 - " movntq %%mm1, 8(%1)\n"
23263 - " movq 16(%0), %%mm2\n"
23264 - " movntq %%mm2, 16(%1)\n"
23265 - " movq 24(%0), %%mm3\n"
23266 - " movntq %%mm3, 24(%1)\n"
23267 - " movq 32(%0), %%mm4\n"
23268 - " movntq %%mm4, 32(%1)\n"
23269 - " movq 40(%0), %%mm5\n"
23270 - " movntq %%mm5, 40(%1)\n"
23271 - " movq 48(%0), %%mm6\n"
23272 - " movntq %%mm6, 48(%1)\n"
23273 - " movq 56(%0), %%mm7\n"
23274 - " movntq %%mm7, 56(%1)\n"
23275 + "1: prefetch 320(%1)\n"
23276 + "2: movq (%1), %%mm0\n"
23277 + " movntq %%mm0, (%2)\n"
23278 + " movq 8(%1), %%mm1\n"
23279 + " movntq %%mm1, 8(%2)\n"
23280 + " movq 16(%1), %%mm2\n"
23281 + " movntq %%mm2, 16(%2)\n"
23282 + " movq 24(%1), %%mm3\n"
23283 + " movntq %%mm3, 24(%2)\n"
23284 + " movq 32(%1), %%mm4\n"
23285 + " movntq %%mm4, 32(%2)\n"
23286 + " movq 40(%1), %%mm5\n"
23287 + " movntq %%mm5, 40(%2)\n"
23288 + " movq 48(%1), %%mm6\n"
23289 + " movntq %%mm6, 48(%2)\n"
23290 + " movq 56(%1), %%mm7\n"
23291 + " movntq %%mm7, 56(%2)\n"
23292 ".section .fixup, \"ax\"\n"
23293 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23294 + "3:\n"
23295 +
23296 +#ifdef CONFIG_PAX_KERNEXEC
23297 + " movl %%cr0, %0\n"
23298 + " movl %0, %%eax\n"
23299 + " andl $0xFFFEFFFF, %%eax\n"
23300 + " movl %%eax, %%cr0\n"
23301 +#endif
23302 +
23303 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23304 +
23305 +#ifdef CONFIG_PAX_KERNEXEC
23306 + " movl %0, %%cr0\n"
23307 +#endif
23308 +
23309 " jmp 2b\n"
23310 ".previous\n"
23311 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
23312 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
23313
23314 from += 64;
23315 to += 64;
23316 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
23317 static void fast_copy_page(void *to, void *from)
23318 {
23319 int i;
23320 + unsigned long cr0;
23321
23322 kernel_fpu_begin();
23323
23324 __asm__ __volatile__ (
23325 - "1: prefetch (%0)\n"
23326 - " prefetch 64(%0)\n"
23327 - " prefetch 128(%0)\n"
23328 - " prefetch 192(%0)\n"
23329 - " prefetch 256(%0)\n"
23330 + "1: prefetch (%1)\n"
23331 + " prefetch 64(%1)\n"
23332 + " prefetch 128(%1)\n"
23333 + " prefetch 192(%1)\n"
23334 + " prefetch 256(%1)\n"
23335 "2: \n"
23336 ".section .fixup, \"ax\"\n"
23337 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23338 + "3: \n"
23339 +
23340 +#ifdef CONFIG_PAX_KERNEXEC
23341 + " movl %%cr0, %0\n"
23342 + " movl %0, %%eax\n"
23343 + " andl $0xFFFEFFFF, %%eax\n"
23344 + " movl %%eax, %%cr0\n"
23345 +#endif
23346 +
23347 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23348 +
23349 +#ifdef CONFIG_PAX_KERNEXEC
23350 + " movl %0, %%cr0\n"
23351 +#endif
23352 +
23353 " jmp 2b\n"
23354 ".previous\n"
23355 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
23356 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
23357
23358 for (i = 0; i < 4096/64; i++) {
23359 __asm__ __volatile__ (
23360 - "1: prefetch 320(%0)\n"
23361 - "2: movq (%0), %%mm0\n"
23362 - " movq 8(%0), %%mm1\n"
23363 - " movq 16(%0), %%mm2\n"
23364 - " movq 24(%0), %%mm3\n"
23365 - " movq %%mm0, (%1)\n"
23366 - " movq %%mm1, 8(%1)\n"
23367 - " movq %%mm2, 16(%1)\n"
23368 - " movq %%mm3, 24(%1)\n"
23369 - " movq 32(%0), %%mm0\n"
23370 - " movq 40(%0), %%mm1\n"
23371 - " movq 48(%0), %%mm2\n"
23372 - " movq 56(%0), %%mm3\n"
23373 - " movq %%mm0, 32(%1)\n"
23374 - " movq %%mm1, 40(%1)\n"
23375 - " movq %%mm2, 48(%1)\n"
23376 - " movq %%mm3, 56(%1)\n"
23377 + "1: prefetch 320(%1)\n"
23378 + "2: movq (%1), %%mm0\n"
23379 + " movq 8(%1), %%mm1\n"
23380 + " movq 16(%1), %%mm2\n"
23381 + " movq 24(%1), %%mm3\n"
23382 + " movq %%mm0, (%2)\n"
23383 + " movq %%mm1, 8(%2)\n"
23384 + " movq %%mm2, 16(%2)\n"
23385 + " movq %%mm3, 24(%2)\n"
23386 + " movq 32(%1), %%mm0\n"
23387 + " movq 40(%1), %%mm1\n"
23388 + " movq 48(%1), %%mm2\n"
23389 + " movq 56(%1), %%mm3\n"
23390 + " movq %%mm0, 32(%2)\n"
23391 + " movq %%mm1, 40(%2)\n"
23392 + " movq %%mm2, 48(%2)\n"
23393 + " movq %%mm3, 56(%2)\n"
23394 ".section .fixup, \"ax\"\n"
23395 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23396 + "3:\n"
23397 +
23398 +#ifdef CONFIG_PAX_KERNEXEC
23399 + " movl %%cr0, %0\n"
23400 + " movl %0, %%eax\n"
23401 + " andl $0xFFFEFFFF, %%eax\n"
23402 + " movl %%eax, %%cr0\n"
23403 +#endif
23404 +
23405 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23406 +
23407 +#ifdef CONFIG_PAX_KERNEXEC
23408 + " movl %0, %%cr0\n"
23409 +#endif
23410 +
23411 " jmp 2b\n"
23412 ".previous\n"
23413 _ASM_EXTABLE(1b, 3b)
23414 - : : "r" (from), "r" (to) : "memory");
23415 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
23416
23417 from += 64;
23418 to += 64;
23419 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
23420 index 69fa106..adda88b 100644
23421 --- a/arch/x86/lib/msr-reg.S
23422 +++ b/arch/x86/lib/msr-reg.S
23423 @@ -3,6 +3,7 @@
23424 #include <asm/dwarf2.h>
23425 #include <asm/asm.h>
23426 #include <asm/msr.h>
23427 +#include <asm/alternative-asm.h>
23428
23429 #ifdef CONFIG_X86_64
23430 /*
23431 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
23432 CFI_STARTPROC
23433 pushq_cfi %rbx
23434 pushq_cfi %rbp
23435 - movq %rdi, %r10 /* Save pointer */
23436 + movq %rdi, %r9 /* Save pointer */
23437 xorl %r11d, %r11d /* Return value */
23438 movl (%rdi), %eax
23439 movl 4(%rdi), %ecx
23440 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
23441 movl 28(%rdi), %edi
23442 CFI_REMEMBER_STATE
23443 1: \op
23444 -2: movl %eax, (%r10)
23445 +2: movl %eax, (%r9)
23446 movl %r11d, %eax /* Return value */
23447 - movl %ecx, 4(%r10)
23448 - movl %edx, 8(%r10)
23449 - movl %ebx, 12(%r10)
23450 - movl %ebp, 20(%r10)
23451 - movl %esi, 24(%r10)
23452 - movl %edi, 28(%r10)
23453 + movl %ecx, 4(%r9)
23454 + movl %edx, 8(%r9)
23455 + movl %ebx, 12(%r9)
23456 + movl %ebp, 20(%r9)
23457 + movl %esi, 24(%r9)
23458 + movl %edi, 28(%r9)
23459 popq_cfi %rbp
23460 popq_cfi %rbx
23461 + pax_force_retaddr
23462 ret
23463 3:
23464 CFI_RESTORE_STATE
23465 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
23466 index 36b0d15..d381858 100644
23467 --- a/arch/x86/lib/putuser.S
23468 +++ b/arch/x86/lib/putuser.S
23469 @@ -15,7 +15,9 @@
23470 #include <asm/thread_info.h>
23471 #include <asm/errno.h>
23472 #include <asm/asm.h>
23473 -
23474 +#include <asm/segment.h>
23475 +#include <asm/pgtable.h>
23476 +#include <asm/alternative-asm.h>
23477
23478 /*
23479 * __put_user_X
23480 @@ -29,52 +31,119 @@
23481 * as they get called from within inline assembly.
23482 */
23483
23484 -#define ENTER CFI_STARTPROC ; \
23485 - GET_THREAD_INFO(%_ASM_BX)
23486 -#define EXIT ret ; \
23487 +#define ENTER CFI_STARTPROC
23488 +#define EXIT pax_force_retaddr; ret ; \
23489 CFI_ENDPROC
23490
23491 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23492 +#define _DEST %_ASM_CX,%_ASM_BX
23493 +#else
23494 +#define _DEST %_ASM_CX
23495 +#endif
23496 +
23497 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
23498 +#define __copyuser_seg gs;
23499 +#else
23500 +#define __copyuser_seg
23501 +#endif
23502 +
23503 .text
23504 ENTRY(__put_user_1)
23505 ENTER
23506 +
23507 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23508 + GET_THREAD_INFO(%_ASM_BX)
23509 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
23510 jae bad_put_user
23511 -1: movb %al,(%_ASM_CX)
23512 +
23513 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23514 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23515 + cmp %_ASM_BX,%_ASM_CX
23516 + jb 1234f
23517 + xor %ebx,%ebx
23518 +1234:
23519 +#endif
23520 +
23521 +#endif
23522 +
23523 +1: __copyuser_seg movb %al,(_DEST)
23524 xor %eax,%eax
23525 EXIT
23526 ENDPROC(__put_user_1)
23527
23528 ENTRY(__put_user_2)
23529 ENTER
23530 +
23531 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23532 + GET_THREAD_INFO(%_ASM_BX)
23533 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23534 sub $1,%_ASM_BX
23535 cmp %_ASM_BX,%_ASM_CX
23536 jae bad_put_user
23537 -2: movw %ax,(%_ASM_CX)
23538 +
23539 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23540 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23541 + cmp %_ASM_BX,%_ASM_CX
23542 + jb 1234f
23543 + xor %ebx,%ebx
23544 +1234:
23545 +#endif
23546 +
23547 +#endif
23548 +
23549 +2: __copyuser_seg movw %ax,(_DEST)
23550 xor %eax,%eax
23551 EXIT
23552 ENDPROC(__put_user_2)
23553
23554 ENTRY(__put_user_4)
23555 ENTER
23556 +
23557 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23558 + GET_THREAD_INFO(%_ASM_BX)
23559 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23560 sub $3,%_ASM_BX
23561 cmp %_ASM_BX,%_ASM_CX
23562 jae bad_put_user
23563 -3: movl %eax,(%_ASM_CX)
23564 +
23565 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23566 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23567 + cmp %_ASM_BX,%_ASM_CX
23568 + jb 1234f
23569 + xor %ebx,%ebx
23570 +1234:
23571 +#endif
23572 +
23573 +#endif
23574 +
23575 +3: __copyuser_seg movl %eax,(_DEST)
23576 xor %eax,%eax
23577 EXIT
23578 ENDPROC(__put_user_4)
23579
23580 ENTRY(__put_user_8)
23581 ENTER
23582 +
23583 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23584 + GET_THREAD_INFO(%_ASM_BX)
23585 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23586 sub $7,%_ASM_BX
23587 cmp %_ASM_BX,%_ASM_CX
23588 jae bad_put_user
23589 -4: mov %_ASM_AX,(%_ASM_CX)
23590 +
23591 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23592 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23593 + cmp %_ASM_BX,%_ASM_CX
23594 + jb 1234f
23595 + xor %ebx,%ebx
23596 +1234:
23597 +#endif
23598 +
23599 +#endif
23600 +
23601 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
23602 #ifdef CONFIG_X86_32
23603 -5: movl %edx,4(%_ASM_CX)
23604 +5: __copyuser_seg movl %edx,4(_DEST)
23605 #endif
23606 xor %eax,%eax
23607 EXIT
23608 diff --git a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S
23609 index 05ea55f..6345b9a 100644
23610 --- a/arch/x86/lib/rwlock_64.S
23611 +++ b/arch/x86/lib/rwlock_64.S
23612 @@ -2,6 +2,7 @@
23613
23614 #include <linux/linkage.h>
23615 #include <asm/rwlock.h>
23616 +#include <asm/asm.h>
23617 #include <asm/alternative-asm.h>
23618 #include <asm/dwarf2.h>
23619
23620 @@ -10,13 +11,34 @@ ENTRY(__write_lock_failed)
23621 CFI_STARTPROC
23622 LOCK_PREFIX
23623 addl $RW_LOCK_BIAS,(%rdi)
23624 +
23625 +#ifdef CONFIG_PAX_REFCOUNT
23626 + jno 1234f
23627 + LOCK_PREFIX
23628 + subl $RW_LOCK_BIAS,(%rdi)
23629 + int $4
23630 +1234:
23631 + _ASM_EXTABLE(1234b, 1234b)
23632 +#endif
23633 +
23634 1: rep
23635 nop
23636 cmpl $RW_LOCK_BIAS,(%rdi)
23637 jne 1b
23638 LOCK_PREFIX
23639 subl $RW_LOCK_BIAS,(%rdi)
23640 +
23641 +#ifdef CONFIG_PAX_REFCOUNT
23642 + jno 1234f
23643 + LOCK_PREFIX
23644 + addl $RW_LOCK_BIAS,(%rdi)
23645 + int $4
23646 +1234:
23647 + _ASM_EXTABLE(1234b, 1234b)
23648 +#endif
23649 +
23650 jnz __write_lock_failed
23651 + pax_force_retaddr
23652 ret
23653 CFI_ENDPROC
23654 END(__write_lock_failed)
23655 @@ -26,13 +48,34 @@ ENTRY(__read_lock_failed)
23656 CFI_STARTPROC
23657 LOCK_PREFIX
23658 incl (%rdi)
23659 +
23660 +#ifdef CONFIG_PAX_REFCOUNT
23661 + jno 1234f
23662 + LOCK_PREFIX
23663 + decl (%rdi)
23664 + int $4
23665 +1234:
23666 + _ASM_EXTABLE(1234b, 1234b)
23667 +#endif
23668 +
23669 1: rep
23670 nop
23671 cmpl $1,(%rdi)
23672 js 1b
23673 LOCK_PREFIX
23674 decl (%rdi)
23675 +
23676 +#ifdef CONFIG_PAX_REFCOUNT
23677 + jno 1234f
23678 + LOCK_PREFIX
23679 + incl (%rdi)
23680 + int $4
23681 +1234:
23682 + _ASM_EXTABLE(1234b, 1234b)
23683 +#endif
23684 +
23685 js __read_lock_failed
23686 + pax_force_retaddr
23687 ret
23688 CFI_ENDPROC
23689 END(__read_lock_failed)
23690 diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S
23691 index 15acecf..f768b10 100644
23692 --- a/arch/x86/lib/rwsem_64.S
23693 +++ b/arch/x86/lib/rwsem_64.S
23694 @@ -48,6 +48,7 @@ ENTRY(call_rwsem_down_read_failed)
23695 call rwsem_down_read_failed
23696 popq %rdx
23697 restore_common_regs
23698 + pax_force_retaddr
23699 ret
23700 ENDPROC(call_rwsem_down_read_failed)
23701
23702 @@ -56,6 +57,7 @@ ENTRY(call_rwsem_down_write_failed)
23703 movq %rax,%rdi
23704 call rwsem_down_write_failed
23705 restore_common_regs
23706 + pax_force_retaddr
23707 ret
23708 ENDPROC(call_rwsem_down_write_failed)
23709
23710 @@ -66,7 +68,8 @@ ENTRY(call_rwsem_wake)
23711 movq %rax,%rdi
23712 call rwsem_wake
23713 restore_common_regs
23714 -1: ret
23715 +1: pax_force_retaddr
23716 + ret
23717 ENDPROC(call_rwsem_wake)
23718
23719 /* Fix up special calling conventions */
23720 @@ -77,5 +80,6 @@ ENTRY(call_rwsem_downgrade_wake)
23721 call rwsem_downgrade_wake
23722 popq %rdx
23723 restore_common_regs
23724 + pax_force_retaddr
23725 ret
23726 ENDPROC(call_rwsem_downgrade_wake)
23727 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
23728 index bf9a7d5..fb06ab5 100644
23729 --- a/arch/x86/lib/thunk_64.S
23730 +++ b/arch/x86/lib/thunk_64.S
23731 @@ -10,7 +10,8 @@
23732 #include <asm/dwarf2.h>
23733 #include <asm/calling.h>
23734 #include <asm/rwlock.h>
23735 -
23736 + #include <asm/alternative-asm.h>
23737 +
23738 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
23739 .macro thunk name,func
23740 .globl \name
23741 @@ -70,6 +71,7 @@
23742 SAVE_ARGS
23743 restore:
23744 RESTORE_ARGS
23745 + pax_force_retaddr
23746 ret
23747 CFI_ENDPROC
23748
23749 @@ -77,5 +79,6 @@ restore:
23750 SAVE_ARGS
23751 restore_norax:
23752 RESTORE_ARGS 1
23753 + pax_force_retaddr
23754 ret
23755 CFI_ENDPROC
23756 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
23757 index 1f118d4..ec4a953 100644
23758 --- a/arch/x86/lib/usercopy_32.c
23759 +++ b/arch/x86/lib/usercopy_32.c
23760 @@ -43,7 +43,7 @@ do { \
23761 __asm__ __volatile__( \
23762 " testl %1,%1\n" \
23763 " jz 2f\n" \
23764 - "0: lodsb\n" \
23765 + "0: "__copyuser_seg"lodsb\n" \
23766 " stosb\n" \
23767 " testb %%al,%%al\n" \
23768 " jz 1f\n" \
23769 @@ -128,10 +128,12 @@ do { \
23770 int __d0; \
23771 might_fault(); \
23772 __asm__ __volatile__( \
23773 + __COPYUSER_SET_ES \
23774 "0: rep; stosl\n" \
23775 " movl %2,%0\n" \
23776 "1: rep; stosb\n" \
23777 "2:\n" \
23778 + __COPYUSER_RESTORE_ES \
23779 ".section .fixup,\"ax\"\n" \
23780 "3: lea 0(%2,%0,4),%0\n" \
23781 " jmp 2b\n" \
23782 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
23783 might_fault();
23784
23785 __asm__ __volatile__(
23786 + __COPYUSER_SET_ES
23787 " testl %0, %0\n"
23788 " jz 3f\n"
23789 " andl %0,%%ecx\n"
23790 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
23791 " subl %%ecx,%0\n"
23792 " addl %0,%%eax\n"
23793 "1:\n"
23794 + __COPYUSER_RESTORE_ES
23795 ".section .fixup,\"ax\"\n"
23796 "2: xorl %%eax,%%eax\n"
23797 " jmp 1b\n"
23798 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
23799
23800 #ifdef CONFIG_X86_INTEL_USERCOPY
23801 static unsigned long
23802 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
23803 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
23804 {
23805 int d0, d1;
23806 __asm__ __volatile__(
23807 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23808 " .align 2,0x90\n"
23809 "3: movl 0(%4), %%eax\n"
23810 "4: movl 4(%4), %%edx\n"
23811 - "5: movl %%eax, 0(%3)\n"
23812 - "6: movl %%edx, 4(%3)\n"
23813 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
23814 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
23815 "7: movl 8(%4), %%eax\n"
23816 "8: movl 12(%4),%%edx\n"
23817 - "9: movl %%eax, 8(%3)\n"
23818 - "10: movl %%edx, 12(%3)\n"
23819 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
23820 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
23821 "11: movl 16(%4), %%eax\n"
23822 "12: movl 20(%4), %%edx\n"
23823 - "13: movl %%eax, 16(%3)\n"
23824 - "14: movl %%edx, 20(%3)\n"
23825 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
23826 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
23827 "15: movl 24(%4), %%eax\n"
23828 "16: movl 28(%4), %%edx\n"
23829 - "17: movl %%eax, 24(%3)\n"
23830 - "18: movl %%edx, 28(%3)\n"
23831 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
23832 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
23833 "19: movl 32(%4), %%eax\n"
23834 "20: movl 36(%4), %%edx\n"
23835 - "21: movl %%eax, 32(%3)\n"
23836 - "22: movl %%edx, 36(%3)\n"
23837 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
23838 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
23839 "23: movl 40(%4), %%eax\n"
23840 "24: movl 44(%4), %%edx\n"
23841 - "25: movl %%eax, 40(%3)\n"
23842 - "26: movl %%edx, 44(%3)\n"
23843 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
23844 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
23845 "27: movl 48(%4), %%eax\n"
23846 "28: movl 52(%4), %%edx\n"
23847 - "29: movl %%eax, 48(%3)\n"
23848 - "30: movl %%edx, 52(%3)\n"
23849 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
23850 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
23851 "31: movl 56(%4), %%eax\n"
23852 "32: movl 60(%4), %%edx\n"
23853 - "33: movl %%eax, 56(%3)\n"
23854 - "34: movl %%edx, 60(%3)\n"
23855 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
23856 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
23857 " addl $-64, %0\n"
23858 " addl $64, %4\n"
23859 " addl $64, %3\n"
23860 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23861 " shrl $2, %0\n"
23862 " andl $3, %%eax\n"
23863 " cld\n"
23864 + __COPYUSER_SET_ES
23865 "99: rep; movsl\n"
23866 "36: movl %%eax, %0\n"
23867 "37: rep; movsb\n"
23868 "100:\n"
23869 + __COPYUSER_RESTORE_ES
23870 + ".section .fixup,\"ax\"\n"
23871 + "101: lea 0(%%eax,%0,4),%0\n"
23872 + " jmp 100b\n"
23873 + ".previous\n"
23874 + ".section __ex_table,\"a\"\n"
23875 + " .align 4\n"
23876 + " .long 1b,100b\n"
23877 + " .long 2b,100b\n"
23878 + " .long 3b,100b\n"
23879 + " .long 4b,100b\n"
23880 + " .long 5b,100b\n"
23881 + " .long 6b,100b\n"
23882 + " .long 7b,100b\n"
23883 + " .long 8b,100b\n"
23884 + " .long 9b,100b\n"
23885 + " .long 10b,100b\n"
23886 + " .long 11b,100b\n"
23887 + " .long 12b,100b\n"
23888 + " .long 13b,100b\n"
23889 + " .long 14b,100b\n"
23890 + " .long 15b,100b\n"
23891 + " .long 16b,100b\n"
23892 + " .long 17b,100b\n"
23893 + " .long 18b,100b\n"
23894 + " .long 19b,100b\n"
23895 + " .long 20b,100b\n"
23896 + " .long 21b,100b\n"
23897 + " .long 22b,100b\n"
23898 + " .long 23b,100b\n"
23899 + " .long 24b,100b\n"
23900 + " .long 25b,100b\n"
23901 + " .long 26b,100b\n"
23902 + " .long 27b,100b\n"
23903 + " .long 28b,100b\n"
23904 + " .long 29b,100b\n"
23905 + " .long 30b,100b\n"
23906 + " .long 31b,100b\n"
23907 + " .long 32b,100b\n"
23908 + " .long 33b,100b\n"
23909 + " .long 34b,100b\n"
23910 + " .long 35b,100b\n"
23911 + " .long 36b,100b\n"
23912 + " .long 37b,100b\n"
23913 + " .long 99b,101b\n"
23914 + ".previous"
23915 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
23916 + : "1"(to), "2"(from), "0"(size)
23917 + : "eax", "edx", "memory");
23918 + return size;
23919 +}
23920 +
23921 +static unsigned long
23922 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
23923 +{
23924 + int d0, d1;
23925 + __asm__ __volatile__(
23926 + " .align 2,0x90\n"
23927 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
23928 + " cmpl $67, %0\n"
23929 + " jbe 3f\n"
23930 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
23931 + " .align 2,0x90\n"
23932 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
23933 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
23934 + "5: movl %%eax, 0(%3)\n"
23935 + "6: movl %%edx, 4(%3)\n"
23936 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
23937 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
23938 + "9: movl %%eax, 8(%3)\n"
23939 + "10: movl %%edx, 12(%3)\n"
23940 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
23941 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
23942 + "13: movl %%eax, 16(%3)\n"
23943 + "14: movl %%edx, 20(%3)\n"
23944 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
23945 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
23946 + "17: movl %%eax, 24(%3)\n"
23947 + "18: movl %%edx, 28(%3)\n"
23948 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
23949 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
23950 + "21: movl %%eax, 32(%3)\n"
23951 + "22: movl %%edx, 36(%3)\n"
23952 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
23953 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
23954 + "25: movl %%eax, 40(%3)\n"
23955 + "26: movl %%edx, 44(%3)\n"
23956 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
23957 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
23958 + "29: movl %%eax, 48(%3)\n"
23959 + "30: movl %%edx, 52(%3)\n"
23960 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
23961 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
23962 + "33: movl %%eax, 56(%3)\n"
23963 + "34: movl %%edx, 60(%3)\n"
23964 + " addl $-64, %0\n"
23965 + " addl $64, %4\n"
23966 + " addl $64, %3\n"
23967 + " cmpl $63, %0\n"
23968 + " ja 1b\n"
23969 + "35: movl %0, %%eax\n"
23970 + " shrl $2, %0\n"
23971 + " andl $3, %%eax\n"
23972 + " cld\n"
23973 + "99: rep; "__copyuser_seg" movsl\n"
23974 + "36: movl %%eax, %0\n"
23975 + "37: rep; "__copyuser_seg" movsb\n"
23976 + "100:\n"
23977 ".section .fixup,\"ax\"\n"
23978 "101: lea 0(%%eax,%0,4),%0\n"
23979 " jmp 100b\n"
23980 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23981 int d0, d1;
23982 __asm__ __volatile__(
23983 " .align 2,0x90\n"
23984 - "0: movl 32(%4), %%eax\n"
23985 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23986 " cmpl $67, %0\n"
23987 " jbe 2f\n"
23988 - "1: movl 64(%4), %%eax\n"
23989 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23990 " .align 2,0x90\n"
23991 - "2: movl 0(%4), %%eax\n"
23992 - "21: movl 4(%4), %%edx\n"
23993 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23994 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23995 " movl %%eax, 0(%3)\n"
23996 " movl %%edx, 4(%3)\n"
23997 - "3: movl 8(%4), %%eax\n"
23998 - "31: movl 12(%4),%%edx\n"
23999 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24000 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24001 " movl %%eax, 8(%3)\n"
24002 " movl %%edx, 12(%3)\n"
24003 - "4: movl 16(%4), %%eax\n"
24004 - "41: movl 20(%4), %%edx\n"
24005 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24006 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24007 " movl %%eax, 16(%3)\n"
24008 " movl %%edx, 20(%3)\n"
24009 - "10: movl 24(%4), %%eax\n"
24010 - "51: movl 28(%4), %%edx\n"
24011 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24012 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24013 " movl %%eax, 24(%3)\n"
24014 " movl %%edx, 28(%3)\n"
24015 - "11: movl 32(%4), %%eax\n"
24016 - "61: movl 36(%4), %%edx\n"
24017 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24018 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24019 " movl %%eax, 32(%3)\n"
24020 " movl %%edx, 36(%3)\n"
24021 - "12: movl 40(%4), %%eax\n"
24022 - "71: movl 44(%4), %%edx\n"
24023 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24024 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24025 " movl %%eax, 40(%3)\n"
24026 " movl %%edx, 44(%3)\n"
24027 - "13: movl 48(%4), %%eax\n"
24028 - "81: movl 52(%4), %%edx\n"
24029 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24030 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24031 " movl %%eax, 48(%3)\n"
24032 " movl %%edx, 52(%3)\n"
24033 - "14: movl 56(%4), %%eax\n"
24034 - "91: movl 60(%4), %%edx\n"
24035 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24036 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24037 " movl %%eax, 56(%3)\n"
24038 " movl %%edx, 60(%3)\n"
24039 " addl $-64, %0\n"
24040 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
24041 " shrl $2, %0\n"
24042 " andl $3, %%eax\n"
24043 " cld\n"
24044 - "6: rep; movsl\n"
24045 + "6: rep; "__copyuser_seg" movsl\n"
24046 " movl %%eax,%0\n"
24047 - "7: rep; movsb\n"
24048 + "7: rep; "__copyuser_seg" movsb\n"
24049 "8:\n"
24050 ".section .fixup,\"ax\"\n"
24051 "9: lea 0(%%eax,%0,4),%0\n"
24052 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24053
24054 __asm__ __volatile__(
24055 " .align 2,0x90\n"
24056 - "0: movl 32(%4), %%eax\n"
24057 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24058 " cmpl $67, %0\n"
24059 " jbe 2f\n"
24060 - "1: movl 64(%4), %%eax\n"
24061 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24062 " .align 2,0x90\n"
24063 - "2: movl 0(%4), %%eax\n"
24064 - "21: movl 4(%4), %%edx\n"
24065 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24066 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24067 " movnti %%eax, 0(%3)\n"
24068 " movnti %%edx, 4(%3)\n"
24069 - "3: movl 8(%4), %%eax\n"
24070 - "31: movl 12(%4),%%edx\n"
24071 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24072 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24073 " movnti %%eax, 8(%3)\n"
24074 " movnti %%edx, 12(%3)\n"
24075 - "4: movl 16(%4), %%eax\n"
24076 - "41: movl 20(%4), %%edx\n"
24077 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24078 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24079 " movnti %%eax, 16(%3)\n"
24080 " movnti %%edx, 20(%3)\n"
24081 - "10: movl 24(%4), %%eax\n"
24082 - "51: movl 28(%4), %%edx\n"
24083 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24084 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24085 " movnti %%eax, 24(%3)\n"
24086 " movnti %%edx, 28(%3)\n"
24087 - "11: movl 32(%4), %%eax\n"
24088 - "61: movl 36(%4), %%edx\n"
24089 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24090 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24091 " movnti %%eax, 32(%3)\n"
24092 " movnti %%edx, 36(%3)\n"
24093 - "12: movl 40(%4), %%eax\n"
24094 - "71: movl 44(%4), %%edx\n"
24095 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24096 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24097 " movnti %%eax, 40(%3)\n"
24098 " movnti %%edx, 44(%3)\n"
24099 - "13: movl 48(%4), %%eax\n"
24100 - "81: movl 52(%4), %%edx\n"
24101 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24102 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24103 " movnti %%eax, 48(%3)\n"
24104 " movnti %%edx, 52(%3)\n"
24105 - "14: movl 56(%4), %%eax\n"
24106 - "91: movl 60(%4), %%edx\n"
24107 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24108 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24109 " movnti %%eax, 56(%3)\n"
24110 " movnti %%edx, 60(%3)\n"
24111 " addl $-64, %0\n"
24112 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24113 " shrl $2, %0\n"
24114 " andl $3, %%eax\n"
24115 " cld\n"
24116 - "6: rep; movsl\n"
24117 + "6: rep; "__copyuser_seg" movsl\n"
24118 " movl %%eax,%0\n"
24119 - "7: rep; movsb\n"
24120 + "7: rep; "__copyuser_seg" movsb\n"
24121 "8:\n"
24122 ".section .fixup,\"ax\"\n"
24123 "9: lea 0(%%eax,%0,4),%0\n"
24124 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
24125
24126 __asm__ __volatile__(
24127 " .align 2,0x90\n"
24128 - "0: movl 32(%4), %%eax\n"
24129 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24130 " cmpl $67, %0\n"
24131 " jbe 2f\n"
24132 - "1: movl 64(%4), %%eax\n"
24133 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24134 " .align 2,0x90\n"
24135 - "2: movl 0(%4), %%eax\n"
24136 - "21: movl 4(%4), %%edx\n"
24137 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24138 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24139 " movnti %%eax, 0(%3)\n"
24140 " movnti %%edx, 4(%3)\n"
24141 - "3: movl 8(%4), %%eax\n"
24142 - "31: movl 12(%4),%%edx\n"
24143 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24144 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24145 " movnti %%eax, 8(%3)\n"
24146 " movnti %%edx, 12(%3)\n"
24147 - "4: movl 16(%4), %%eax\n"
24148 - "41: movl 20(%4), %%edx\n"
24149 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24150 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24151 " movnti %%eax, 16(%3)\n"
24152 " movnti %%edx, 20(%3)\n"
24153 - "10: movl 24(%4), %%eax\n"
24154 - "51: movl 28(%4), %%edx\n"
24155 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24156 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24157 " movnti %%eax, 24(%3)\n"
24158 " movnti %%edx, 28(%3)\n"
24159 - "11: movl 32(%4), %%eax\n"
24160 - "61: movl 36(%4), %%edx\n"
24161 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24162 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24163 " movnti %%eax, 32(%3)\n"
24164 " movnti %%edx, 36(%3)\n"
24165 - "12: movl 40(%4), %%eax\n"
24166 - "71: movl 44(%4), %%edx\n"
24167 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24168 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24169 " movnti %%eax, 40(%3)\n"
24170 " movnti %%edx, 44(%3)\n"
24171 - "13: movl 48(%4), %%eax\n"
24172 - "81: movl 52(%4), %%edx\n"
24173 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24174 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24175 " movnti %%eax, 48(%3)\n"
24176 " movnti %%edx, 52(%3)\n"
24177 - "14: movl 56(%4), %%eax\n"
24178 - "91: movl 60(%4), %%edx\n"
24179 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24180 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24181 " movnti %%eax, 56(%3)\n"
24182 " movnti %%edx, 60(%3)\n"
24183 " addl $-64, %0\n"
24184 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
24185 " shrl $2, %0\n"
24186 " andl $3, %%eax\n"
24187 " cld\n"
24188 - "6: rep; movsl\n"
24189 + "6: rep; "__copyuser_seg" movsl\n"
24190 " movl %%eax,%0\n"
24191 - "7: rep; movsb\n"
24192 + "7: rep; "__copyuser_seg" movsb\n"
24193 "8:\n"
24194 ".section .fixup,\"ax\"\n"
24195 "9: lea 0(%%eax,%0,4),%0\n"
24196 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
24197 */
24198 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
24199 unsigned long size);
24200 -unsigned long __copy_user_intel(void __user *to, const void *from,
24201 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
24202 + unsigned long size);
24203 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
24204 unsigned long size);
24205 unsigned long __copy_user_zeroing_intel_nocache(void *to,
24206 const void __user *from, unsigned long size);
24207 #endif /* CONFIG_X86_INTEL_USERCOPY */
24208
24209 /* Generic arbitrary sized copy. */
24210 -#define __copy_user(to, from, size) \
24211 +#define __copy_user(to, from, size, prefix, set, restore) \
24212 do { \
24213 int __d0, __d1, __d2; \
24214 __asm__ __volatile__( \
24215 + set \
24216 " cmp $7,%0\n" \
24217 " jbe 1f\n" \
24218 " movl %1,%0\n" \
24219 " negl %0\n" \
24220 " andl $7,%0\n" \
24221 " subl %0,%3\n" \
24222 - "4: rep; movsb\n" \
24223 + "4: rep; "prefix"movsb\n" \
24224 " movl %3,%0\n" \
24225 " shrl $2,%0\n" \
24226 " andl $3,%3\n" \
24227 " .align 2,0x90\n" \
24228 - "0: rep; movsl\n" \
24229 + "0: rep; "prefix"movsl\n" \
24230 " movl %3,%0\n" \
24231 - "1: rep; movsb\n" \
24232 + "1: rep; "prefix"movsb\n" \
24233 "2:\n" \
24234 + restore \
24235 ".section .fixup,\"ax\"\n" \
24236 "5: addl %3,%0\n" \
24237 " jmp 2b\n" \
24238 @@ -682,14 +799,14 @@ do { \
24239 " negl %0\n" \
24240 " andl $7,%0\n" \
24241 " subl %0,%3\n" \
24242 - "4: rep; movsb\n" \
24243 + "4: rep; "__copyuser_seg"movsb\n" \
24244 " movl %3,%0\n" \
24245 " shrl $2,%0\n" \
24246 " andl $3,%3\n" \
24247 " .align 2,0x90\n" \
24248 - "0: rep; movsl\n" \
24249 + "0: rep; "__copyuser_seg"movsl\n" \
24250 " movl %3,%0\n" \
24251 - "1: rep; movsb\n" \
24252 + "1: rep; "__copyuser_seg"movsb\n" \
24253 "2:\n" \
24254 ".section .fixup,\"ax\"\n" \
24255 "5: addl %3,%0\n" \
24256 @@ -775,9 +892,9 @@ survive:
24257 }
24258 #endif
24259 if (movsl_is_ok(to, from, n))
24260 - __copy_user(to, from, n);
24261 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
24262 else
24263 - n = __copy_user_intel(to, from, n);
24264 + n = __generic_copy_to_user_intel(to, from, n);
24265 return n;
24266 }
24267 EXPORT_SYMBOL(__copy_to_user_ll);
24268 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
24269 unsigned long n)
24270 {
24271 if (movsl_is_ok(to, from, n))
24272 - __copy_user(to, from, n);
24273 + __copy_user(to, from, n, __copyuser_seg, "", "");
24274 else
24275 - n = __copy_user_intel((void __user *)to,
24276 - (const void *)from, n);
24277 + n = __generic_copy_from_user_intel(to, from, n);
24278 return n;
24279 }
24280 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
24281 @@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
24282 if (n > 64 && cpu_has_xmm2)
24283 n = __copy_user_intel_nocache(to, from, n);
24284 else
24285 - __copy_user(to, from, n);
24286 + __copy_user(to, from, n, __copyuser_seg, "", "");
24287 #else
24288 - __copy_user(to, from, n);
24289 + __copy_user(to, from, n, __copyuser_seg, "", "");
24290 #endif
24291 return n;
24292 }
24293 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
24294
24295 -/**
24296 - * copy_to_user: - Copy a block of data into user space.
24297 - * @to: Destination address, in user space.
24298 - * @from: Source address, in kernel space.
24299 - * @n: Number of bytes to copy.
24300 - *
24301 - * Context: User context only. This function may sleep.
24302 - *
24303 - * Copy data from kernel space to user space.
24304 - *
24305 - * Returns number of bytes that could not be copied.
24306 - * On success, this will be zero.
24307 - */
24308 -unsigned long
24309 -copy_to_user(void __user *to, const void *from, unsigned long n)
24310 +#ifdef CONFIG_PAX_MEMORY_UDEREF
24311 +void __set_fs(mm_segment_t x)
24312 {
24313 - if (access_ok(VERIFY_WRITE, to, n))
24314 - n = __copy_to_user(to, from, n);
24315 - return n;
24316 + switch (x.seg) {
24317 + case 0:
24318 + loadsegment(gs, 0);
24319 + break;
24320 + case TASK_SIZE_MAX:
24321 + loadsegment(gs, __USER_DS);
24322 + break;
24323 + case -1UL:
24324 + loadsegment(gs, __KERNEL_DS);
24325 + break;
24326 + default:
24327 + BUG();
24328 + }
24329 + return;
24330 }
24331 -EXPORT_SYMBOL(copy_to_user);
24332 +EXPORT_SYMBOL(__set_fs);
24333
24334 -/**
24335 - * copy_from_user: - Copy a block of data from user space.
24336 - * @to: Destination address, in kernel space.
24337 - * @from: Source address, in user space.
24338 - * @n: Number of bytes to copy.
24339 - *
24340 - * Context: User context only. This function may sleep.
24341 - *
24342 - * Copy data from user space to kernel space.
24343 - *
24344 - * Returns number of bytes that could not be copied.
24345 - * On success, this will be zero.
24346 - *
24347 - * If some data could not be copied, this function will pad the copied
24348 - * data to the requested size using zero bytes.
24349 - */
24350 -unsigned long
24351 -copy_from_user(void *to, const void __user *from, unsigned long n)
24352 +void set_fs(mm_segment_t x)
24353 {
24354 - if (access_ok(VERIFY_READ, from, n))
24355 - n = __copy_from_user(to, from, n);
24356 - else
24357 - memset(to, 0, n);
24358 - return n;
24359 + current_thread_info()->addr_limit = x;
24360 + __set_fs(x);
24361 }
24362 -EXPORT_SYMBOL(copy_from_user);
24363 +EXPORT_SYMBOL(set_fs);
24364 +#endif
24365 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
24366 index b7c2849..8633ad8 100644
24367 --- a/arch/x86/lib/usercopy_64.c
24368 +++ b/arch/x86/lib/usercopy_64.c
24369 @@ -42,6 +42,12 @@ long
24370 __strncpy_from_user(char *dst, const char __user *src, long count)
24371 {
24372 long res;
24373 +
24374 +#ifdef CONFIG_PAX_MEMORY_UDEREF
24375 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
24376 + src += PAX_USER_SHADOW_BASE;
24377 +#endif
24378 +
24379 __do_strncpy_from_user(dst, src, count, res);
24380 return res;
24381 }
24382 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
24383 {
24384 long __d0;
24385 might_fault();
24386 +
24387 +#ifdef CONFIG_PAX_MEMORY_UDEREF
24388 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
24389 + addr += PAX_USER_SHADOW_BASE;
24390 +#endif
24391 +
24392 /* no memory constraint because it doesn't change any memory gcc knows
24393 about */
24394 asm volatile(
24395 @@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
24396 }
24397 EXPORT_SYMBOL(strlen_user);
24398
24399 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
24400 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
24401 {
24402 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
24403 - return copy_user_generic((__force void *)to, (__force void *)from, len);
24404 - }
24405 - return len;
24406 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
24407 +
24408 +#ifdef CONFIG_PAX_MEMORY_UDEREF
24409 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
24410 + to += PAX_USER_SHADOW_BASE;
24411 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
24412 + from += PAX_USER_SHADOW_BASE;
24413 +#endif
24414 +
24415 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
24416 + }
24417 + return len;
24418 }
24419 EXPORT_SYMBOL(copy_in_user);
24420
24421 @@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
24422 * it is not necessary to optimize tail handling.
24423 */
24424 unsigned long
24425 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
24426 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
24427 {
24428 char c;
24429 unsigned zero_len;
24430 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
24431 index 61b41ca..5fef66a 100644
24432 --- a/arch/x86/mm/extable.c
24433 +++ b/arch/x86/mm/extable.c
24434 @@ -1,14 +1,71 @@
24435 #include <linux/module.h>
24436 #include <linux/spinlock.h>
24437 +#include <linux/sort.h>
24438 #include <asm/uaccess.h>
24439 +#include <asm/pgtable.h>
24440
24441 +/*
24442 + * The exception table needs to be sorted so that the binary
24443 + * search that we use to find entries in it works properly.
24444 + * This is used both for the kernel exception table and for
24445 + * the exception tables of modules that get loaded.
24446 + */
24447 +static int cmp_ex(const void *a, const void *b)
24448 +{
24449 + const struct exception_table_entry *x = a, *y = b;
24450 +
24451 + /* avoid overflow */
24452 + if (x->insn > y->insn)
24453 + return 1;
24454 + if (x->insn < y->insn)
24455 + return -1;
24456 + return 0;
24457 +}
24458 +
24459 +static void swap_ex(void *a, void *b, int size)
24460 +{
24461 + struct exception_table_entry t, *x = a, *y = b;
24462 +
24463 + t = *x;
24464 +
24465 + pax_open_kernel();
24466 + *x = *y;
24467 + *y = t;
24468 + pax_close_kernel();
24469 +}
24470 +
24471 +void sort_extable(struct exception_table_entry *start,
24472 + struct exception_table_entry *finish)
24473 +{
24474 + sort(start, finish - start, sizeof(struct exception_table_entry),
24475 + cmp_ex, swap_ex);
24476 +}
24477 +
24478 +#ifdef CONFIG_MODULES
24479 +/*
24480 + * If the exception table is sorted, any referring to the module init
24481 + * will be at the beginning or the end.
24482 + */
24483 +void trim_init_extable(struct module *m)
24484 +{
24485 + /*trim the beginning*/
24486 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
24487 + m->extable++;
24488 + m->num_exentries--;
24489 + }
24490 + /*trim the end*/
24491 + while (m->num_exentries &&
24492 + within_module_init(m->extable[m->num_exentries-1].insn, m))
24493 + m->num_exentries--;
24494 +}
24495 +#endif /* CONFIG_MODULES */
24496
24497 int fixup_exception(struct pt_regs *regs)
24498 {
24499 const struct exception_table_entry *fixup;
24500
24501 #ifdef CONFIG_PNPBIOS
24502 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
24503 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
24504 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
24505 extern u32 pnp_bios_is_utter_crap;
24506 pnp_bios_is_utter_crap = 1;
24507 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
24508 index 8ac0d76..ca501e2 100644
24509 --- a/arch/x86/mm/fault.c
24510 +++ b/arch/x86/mm/fault.c
24511 @@ -11,10 +11,19 @@
24512 #include <linux/kprobes.h> /* __kprobes, ... */
24513 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
24514 #include <linux/perf_event.h> /* perf_sw_event */
24515 +#include <linux/unistd.h>
24516 +#include <linux/compiler.h>
24517
24518 #include <asm/traps.h> /* dotraplinkage, ... */
24519 #include <asm/pgalloc.h> /* pgd_*(), ... */
24520 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
24521 +#include <asm/vsyscall.h>
24522 +#include <asm/tlbflush.h>
24523 +
24524 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24525 +#include <asm/stacktrace.h>
24526 +#include "../kernel/dumpstack.h"
24527 +#endif
24528
24529 /*
24530 * Page fault error code bits:
24531 @@ -51,7 +60,7 @@ static inline int notify_page_fault(struct pt_regs *regs)
24532 int ret = 0;
24533
24534 /* kprobe_running() needs smp_processor_id() */
24535 - if (kprobes_built_in() && !user_mode_vm(regs)) {
24536 + if (kprobes_built_in() && !user_mode(regs)) {
24537 preempt_disable();
24538 if (kprobe_running() && kprobe_fault_handler(regs, 14))
24539 ret = 1;
24540 @@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
24541 return !instr_lo || (instr_lo>>1) == 1;
24542 case 0x00:
24543 /* Prefetch instruction is 0x0F0D or 0x0F18 */
24544 - if (probe_kernel_address(instr, opcode))
24545 + if (user_mode(regs)) {
24546 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
24547 + return 0;
24548 + } else if (probe_kernel_address(instr, opcode))
24549 return 0;
24550
24551 *prefetch = (instr_lo == 0xF) &&
24552 @@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
24553 while (instr < max_instr) {
24554 unsigned char opcode;
24555
24556 - if (probe_kernel_address(instr, opcode))
24557 + if (user_mode(regs)) {
24558 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
24559 + break;
24560 + } else if (probe_kernel_address(instr, opcode))
24561 break;
24562
24563 instr++;
24564 @@ -172,6 +187,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
24565 force_sig_info(si_signo, &info, tsk);
24566 }
24567
24568 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24569 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
24570 +#endif
24571 +
24572 +#ifdef CONFIG_PAX_EMUTRAMP
24573 +static int pax_handle_fetch_fault(struct pt_regs *regs);
24574 +#endif
24575 +
24576 +#ifdef CONFIG_PAX_PAGEEXEC
24577 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
24578 +{
24579 + pgd_t *pgd;
24580 + pud_t *pud;
24581 + pmd_t *pmd;
24582 +
24583 + pgd = pgd_offset(mm, address);
24584 + if (!pgd_present(*pgd))
24585 + return NULL;
24586 + pud = pud_offset(pgd, address);
24587 + if (!pud_present(*pud))
24588 + return NULL;
24589 + pmd = pmd_offset(pud, address);
24590 + if (!pmd_present(*pmd))
24591 + return NULL;
24592 + return pmd;
24593 +}
24594 +#endif
24595 +
24596 DEFINE_SPINLOCK(pgd_lock);
24597 LIST_HEAD(pgd_list);
24598
24599 @@ -224,11 +267,24 @@ void vmalloc_sync_all(void)
24600 address += PMD_SIZE) {
24601
24602 unsigned long flags;
24603 +
24604 +#ifdef CONFIG_PAX_PER_CPU_PGD
24605 + unsigned long cpu;
24606 +#else
24607 struct page *page;
24608 +#endif
24609
24610 spin_lock_irqsave(&pgd_lock, flags);
24611 +
24612 +#ifdef CONFIG_PAX_PER_CPU_PGD
24613 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24614 + pgd_t *pgd = get_cpu_pgd(cpu);
24615 +#else
24616 list_for_each_entry(page, &pgd_list, lru) {
24617 - if (!vmalloc_sync_one(page_address(page), address))
24618 + pgd_t *pgd = page_address(page);
24619 +#endif
24620 +
24621 + if (!vmalloc_sync_one(pgd, address))
24622 break;
24623 }
24624 spin_unlock_irqrestore(&pgd_lock, flags);
24625 @@ -258,6 +314,11 @@ static noinline int vmalloc_fault(unsigned long address)
24626 * an interrupt in the middle of a task switch..
24627 */
24628 pgd_paddr = read_cr3();
24629 +
24630 +#ifdef CONFIG_PAX_PER_CPU_PGD
24631 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
24632 +#endif
24633 +
24634 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
24635 if (!pmd_k)
24636 return -1;
24637 @@ -332,15 +393,27 @@ void vmalloc_sync_all(void)
24638
24639 const pgd_t *pgd_ref = pgd_offset_k(address);
24640 unsigned long flags;
24641 +
24642 +#ifdef CONFIG_PAX_PER_CPU_PGD
24643 + unsigned long cpu;
24644 +#else
24645 struct page *page;
24646 +#endif
24647
24648 if (pgd_none(*pgd_ref))
24649 continue;
24650
24651 spin_lock_irqsave(&pgd_lock, flags);
24652 +
24653 +#ifdef CONFIG_PAX_PER_CPU_PGD
24654 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24655 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
24656 +#else
24657 list_for_each_entry(page, &pgd_list, lru) {
24658 pgd_t *pgd;
24659 pgd = (pgd_t *)page_address(page) + pgd_index(address);
24660 +#endif
24661 +
24662 if (pgd_none(*pgd))
24663 set_pgd(pgd, *pgd_ref);
24664 else
24665 @@ -373,7 +446,14 @@ static noinline int vmalloc_fault(unsigned long address)
24666 * happen within a race in page table update. In the later
24667 * case just flush:
24668 */
24669 +
24670 +#ifdef CONFIG_PAX_PER_CPU_PGD
24671 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
24672 + pgd = pgd_offset_cpu(smp_processor_id(), address);
24673 +#else
24674 pgd = pgd_offset(current->active_mm, address);
24675 +#endif
24676 +
24677 pgd_ref = pgd_offset_k(address);
24678 if (pgd_none(*pgd_ref))
24679 return -1;
24680 @@ -535,7 +615,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
24681 static int is_errata100(struct pt_regs *regs, unsigned long address)
24682 {
24683 #ifdef CONFIG_X86_64
24684 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
24685 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
24686 return 1;
24687 #endif
24688 return 0;
24689 @@ -562,7 +642,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
24690 }
24691
24692 static const char nx_warning[] = KERN_CRIT
24693 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
24694 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
24695
24696 static void
24697 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
24698 @@ -571,15 +651,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
24699 if (!oops_may_print())
24700 return;
24701
24702 - if (error_code & PF_INSTR) {
24703 + if (nx_enabled && (error_code & PF_INSTR)) {
24704 unsigned int level;
24705
24706 pte_t *pte = lookup_address(address, &level);
24707
24708 if (pte && pte_present(*pte) && !pte_exec(*pte))
24709 - printk(nx_warning, current_uid());
24710 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
24711 }
24712
24713 +#ifdef CONFIG_PAX_KERNEXEC
24714 + if (init_mm.start_code <= address && address < init_mm.end_code) {
24715 + if (current->signal->curr_ip)
24716 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24717 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
24718 + else
24719 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24720 + current->comm, task_pid_nr(current), current_uid(), current_euid());
24721 + }
24722 +#endif
24723 +
24724 printk(KERN_ALERT "BUG: unable to handle kernel ");
24725 if (address < PAGE_SIZE)
24726 printk(KERN_CONT "NULL pointer dereference");
24727 @@ -705,6 +796,23 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24728 {
24729 struct task_struct *tsk = current;
24730
24731 +#ifdef CONFIG_X86_64
24732 + struct mm_struct *mm = tsk->mm;
24733 +
24734 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
24735 + if (regs->ip == (unsigned long)vgettimeofday) {
24736 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
24737 + return;
24738 + } else if (regs->ip == (unsigned long)vtime) {
24739 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
24740 + return;
24741 + } else if (regs->ip == (unsigned long)vgetcpu) {
24742 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
24743 + return;
24744 + }
24745 + }
24746 +#endif
24747 +
24748 /* User mode accesses just cause a SIGSEGV */
24749 if (error_code & PF_USER) {
24750 /*
24751 @@ -722,6 +830,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24752 if (is_errata100(regs, address))
24753 return;
24754
24755 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24756 + if (pax_is_fetch_fault(regs, error_code, address)) {
24757 +
24758 +#ifdef CONFIG_PAX_EMUTRAMP
24759 + switch (pax_handle_fetch_fault(regs)) {
24760 + case 2:
24761 + return;
24762 + }
24763 +#endif
24764 +
24765 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24766 + do_group_exit(SIGKILL);
24767 + }
24768 +#endif
24769 +
24770 if (unlikely(show_unhandled_signals))
24771 show_signal_msg(regs, error_code, address, tsk);
24772
24773 @@ -818,7 +941,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
24774 if (fault & VM_FAULT_HWPOISON) {
24775 printk(KERN_ERR
24776 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
24777 - tsk->comm, tsk->pid, address);
24778 + tsk->comm, task_pid_nr(tsk), address);
24779 code = BUS_MCEERR_AR;
24780 }
24781 #endif
24782 @@ -857,6 +980,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
24783 return 1;
24784 }
24785
24786 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24787 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
24788 +{
24789 + pte_t *pte;
24790 + pmd_t *pmd;
24791 + spinlock_t *ptl;
24792 + unsigned char pte_mask;
24793 +
24794 + if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
24795 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
24796 + return 0;
24797 +
24798 + /* PaX: it's our fault, let's handle it if we can */
24799 +
24800 + /* PaX: take a look at read faults before acquiring any locks */
24801 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
24802 + /* instruction fetch attempt from a protected page in user mode */
24803 + up_read(&mm->mmap_sem);
24804 +
24805 +#ifdef CONFIG_PAX_EMUTRAMP
24806 + switch (pax_handle_fetch_fault(regs)) {
24807 + case 2:
24808 + return 1;
24809 + }
24810 +#endif
24811 +
24812 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24813 + do_group_exit(SIGKILL);
24814 + }
24815 +
24816 + pmd = pax_get_pmd(mm, address);
24817 + if (unlikely(!pmd))
24818 + return 0;
24819 +
24820 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
24821 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
24822 + pte_unmap_unlock(pte, ptl);
24823 + return 0;
24824 + }
24825 +
24826 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
24827 + /* write attempt to a protected page in user mode */
24828 + pte_unmap_unlock(pte, ptl);
24829 + return 0;
24830 + }
24831 +
24832 +#ifdef CONFIG_SMP
24833 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
24834 +#else
24835 + if (likely(address > get_limit(regs->cs)))
24836 +#endif
24837 + {
24838 + set_pte(pte, pte_mkread(*pte));
24839 + __flush_tlb_one(address);
24840 + pte_unmap_unlock(pte, ptl);
24841 + up_read(&mm->mmap_sem);
24842 + return 1;
24843 + }
24844 +
24845 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
24846 +
24847 + /*
24848 + * PaX: fill DTLB with user rights and retry
24849 + */
24850 + __asm__ __volatile__ (
24851 + "orb %2,(%1)\n"
24852 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
24853 +/*
24854 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
24855 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
24856 + * page fault when examined during a TLB load attempt. this is true not only
24857 + * for PTEs holding a non-present entry but also present entries that will
24858 + * raise a page fault (such as those set up by PaX, or the copy-on-write
24859 + * mechanism). in effect it means that we do *not* need to flush the TLBs
24860 + * for our target pages since their PTEs are simply not in the TLBs at all.
24861 +
24862 + * the best thing in omitting it is that we gain around 15-20% speed in the
24863 + * fast path of the page fault handler and can get rid of tracing since we
24864 + * can no longer flush unintended entries.
24865 + */
24866 + "invlpg (%0)\n"
24867 +#endif
24868 + __copyuser_seg"testb $0,(%0)\n"
24869 + "xorb %3,(%1)\n"
24870 + :
24871 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
24872 + : "memory", "cc");
24873 + pte_unmap_unlock(pte, ptl);
24874 + up_read(&mm->mmap_sem);
24875 + return 1;
24876 +}
24877 +#endif
24878 +
24879 /*
24880 * Handle a spurious fault caused by a stale TLB entry.
24881 *
24882 @@ -923,6 +1139,9 @@ int show_unhandled_signals = 1;
24883 static inline int
24884 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
24885 {
24886 + if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
24887 + return 1;
24888 +
24889 if (write) {
24890 /* write, present and write, not present: */
24891 if (unlikely(!(vma->vm_flags & VM_WRITE)))
24892 @@ -956,16 +1175,30 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24893 {
24894 struct vm_area_struct *vma;
24895 struct task_struct *tsk;
24896 - unsigned long address;
24897 struct mm_struct *mm;
24898 int write;
24899 int fault;
24900
24901 - tsk = current;
24902 - mm = tsk->mm;
24903 -
24904 /* Get the faulting address: */
24905 - address = read_cr2();
24906 + unsigned long address = read_cr2();
24907 +
24908 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24909 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
24910 + if (!search_exception_tables(regs->ip)) {
24911 + bad_area_nosemaphore(regs, error_code, address);
24912 + return;
24913 + }
24914 + if (address < PAX_USER_SHADOW_BASE) {
24915 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
24916 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
24917 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
24918 + } else
24919 + address -= PAX_USER_SHADOW_BASE;
24920 + }
24921 +#endif
24922 +
24923 + tsk = current;
24924 + mm = tsk->mm;
24925
24926 /*
24927 * Detect and handle instructions that would cause a page fault for
24928 @@ -1026,7 +1259,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24929 * User-mode registers count as a user access even for any
24930 * potential system fault or CPU buglet:
24931 */
24932 - if (user_mode_vm(regs)) {
24933 + if (user_mode(regs)) {
24934 local_irq_enable();
24935 error_code |= PF_USER;
24936 } else {
24937 @@ -1080,6 +1313,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24938 might_sleep();
24939 }
24940
24941 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24942 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
24943 + return;
24944 +#endif
24945 +
24946 vma = find_vma(mm, address);
24947 if (unlikely(!vma)) {
24948 bad_area(regs, error_code, address);
24949 @@ -1091,18 +1329,24 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24950 bad_area(regs, error_code, address);
24951 return;
24952 }
24953 - if (error_code & PF_USER) {
24954 - /*
24955 - * Accessing the stack below %sp is always a bug.
24956 - * The large cushion allows instructions like enter
24957 - * and pusha to work. ("enter $65535, $31" pushes
24958 - * 32 pointers and then decrements %sp by 65535.)
24959 - */
24960 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
24961 - bad_area(regs, error_code, address);
24962 - return;
24963 - }
24964 + /*
24965 + * Accessing the stack below %sp is always a bug.
24966 + * The large cushion allows instructions like enter
24967 + * and pusha to work. ("enter $65535, $31" pushes
24968 + * 32 pointers and then decrements %sp by 65535.)
24969 + */
24970 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
24971 + bad_area(regs, error_code, address);
24972 + return;
24973 }
24974 +
24975 +#ifdef CONFIG_PAX_SEGMEXEC
24976 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
24977 + bad_area(regs, error_code, address);
24978 + return;
24979 + }
24980 +#endif
24981 +
24982 if (unlikely(expand_stack(vma, address))) {
24983 bad_area(regs, error_code, address);
24984 return;
24985 @@ -1146,3 +1390,292 @@ good_area:
24986
24987 up_read(&mm->mmap_sem);
24988 }
24989 +
24990 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24991 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
24992 +{
24993 + struct mm_struct *mm = current->mm;
24994 + unsigned long ip = regs->ip;
24995 +
24996 + if (v8086_mode(regs))
24997 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24998 +
24999 +#ifdef CONFIG_PAX_PAGEEXEC
25000 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
25001 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
25002 + return true;
25003 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
25004 + return true;
25005 + return false;
25006 + }
25007 +#endif
25008 +
25009 +#ifdef CONFIG_PAX_SEGMEXEC
25010 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
25011 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
25012 + return true;
25013 + return false;
25014 + }
25015 +#endif
25016 +
25017 + return false;
25018 +}
25019 +#endif
25020 +
25021 +#ifdef CONFIG_PAX_EMUTRAMP
25022 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
25023 +{
25024 + int err;
25025 +
25026 + do { /* PaX: libffi trampoline emulation */
25027 + unsigned char mov, jmp;
25028 + unsigned int addr1, addr2;
25029 +
25030 +#ifdef CONFIG_X86_64
25031 + if ((regs->ip + 9) >> 32)
25032 + break;
25033 +#endif
25034 +
25035 + err = get_user(mov, (unsigned char __user *)regs->ip);
25036 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25037 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
25038 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25039 +
25040 + if (err)
25041 + break;
25042 +
25043 + if (mov == 0xB8 && jmp == 0xE9) {
25044 + regs->ax = addr1;
25045 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
25046 + return 2;
25047 + }
25048 + } while (0);
25049 +
25050 + do { /* PaX: gcc trampoline emulation #1 */
25051 + unsigned char mov1, mov2;
25052 + unsigned short jmp;
25053 + unsigned int addr1, addr2;
25054 +
25055 +#ifdef CONFIG_X86_64
25056 + if ((regs->ip + 11) >> 32)
25057 + break;
25058 +#endif
25059 +
25060 + err = get_user(mov1, (unsigned char __user *)regs->ip);
25061 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25062 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
25063 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25064 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
25065 +
25066 + if (err)
25067 + break;
25068 +
25069 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
25070 + regs->cx = addr1;
25071 + regs->ax = addr2;
25072 + regs->ip = addr2;
25073 + return 2;
25074 + }
25075 + } while (0);
25076 +
25077 + do { /* PaX: gcc trampoline emulation #2 */
25078 + unsigned char mov, jmp;
25079 + unsigned int addr1, addr2;
25080 +
25081 +#ifdef CONFIG_X86_64
25082 + if ((regs->ip + 9) >> 32)
25083 + break;
25084 +#endif
25085 +
25086 + err = get_user(mov, (unsigned char __user *)regs->ip);
25087 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25088 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
25089 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25090 +
25091 + if (err)
25092 + break;
25093 +
25094 + if (mov == 0xB9 && jmp == 0xE9) {
25095 + regs->cx = addr1;
25096 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
25097 + return 2;
25098 + }
25099 + } while (0);
25100 +
25101 + return 1; /* PaX in action */
25102 +}
25103 +
25104 +#ifdef CONFIG_X86_64
25105 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
25106 +{
25107 + int err;
25108 +
25109 + do { /* PaX: libffi trampoline emulation */
25110 + unsigned short mov1, mov2, jmp1;
25111 + unsigned char stcclc, jmp2;
25112 + unsigned long addr1, addr2;
25113 +
25114 + err = get_user(mov1, (unsigned short __user *)regs->ip);
25115 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
25116 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
25117 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
25118 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
25119 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
25120 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
25121 +
25122 + if (err)
25123 + break;
25124 +
25125 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
25126 + regs->r11 = addr1;
25127 + regs->r10 = addr2;
25128 + if (stcclc == 0xF8)
25129 + regs->flags &= ~X86_EFLAGS_CF;
25130 + else
25131 + regs->flags |= X86_EFLAGS_CF;
25132 + regs->ip = addr1;
25133 + return 2;
25134 + }
25135 + } while (0);
25136 +
25137 + do { /* PaX: gcc trampoline emulation #1 */
25138 + unsigned short mov1, mov2, jmp1;
25139 + unsigned char jmp2;
25140 + unsigned int addr1;
25141 + unsigned long addr2;
25142 +
25143 + err = get_user(mov1, (unsigned short __user *)regs->ip);
25144 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
25145 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
25146 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
25147 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
25148 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
25149 +
25150 + if (err)
25151 + break;
25152 +
25153 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
25154 + regs->r11 = addr1;
25155 + regs->r10 = addr2;
25156 + regs->ip = addr1;
25157 + return 2;
25158 + }
25159 + } while (0);
25160 +
25161 + do { /* PaX: gcc trampoline emulation #2 */
25162 + unsigned short mov1, mov2, jmp1;
25163 + unsigned char jmp2;
25164 + unsigned long addr1, addr2;
25165 +
25166 + err = get_user(mov1, (unsigned short __user *)regs->ip);
25167 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
25168 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
25169 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
25170 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
25171 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
25172 +
25173 + if (err)
25174 + break;
25175 +
25176 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
25177 + regs->r11 = addr1;
25178 + regs->r10 = addr2;
25179 + regs->ip = addr1;
25180 + return 2;
25181 + }
25182 + } while (0);
25183 +
25184 + return 1; /* PaX in action */
25185 +}
25186 +#endif
25187 +
25188 +/*
25189 + * PaX: decide what to do with offenders (regs->ip = fault address)
25190 + *
25191 + * returns 1 when task should be killed
25192 + * 2 when gcc trampoline was detected
25193 + */
25194 +static int pax_handle_fetch_fault(struct pt_regs *regs)
25195 +{
25196 + if (v8086_mode(regs))
25197 + return 1;
25198 +
25199 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
25200 + return 1;
25201 +
25202 +#ifdef CONFIG_X86_32
25203 + return pax_handle_fetch_fault_32(regs);
25204 +#else
25205 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
25206 + return pax_handle_fetch_fault_32(regs);
25207 + else
25208 + return pax_handle_fetch_fault_64(regs);
25209 +#endif
25210 +}
25211 +#endif
25212 +
25213 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25214 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
25215 +{
25216 + long i;
25217 +
25218 + printk(KERN_ERR "PAX: bytes at PC: ");
25219 + for (i = 0; i < 20; i++) {
25220 + unsigned char c;
25221 + if (get_user(c, (unsigned char __force_user *)pc+i))
25222 + printk(KERN_CONT "?? ");
25223 + else
25224 + printk(KERN_CONT "%02x ", c);
25225 + }
25226 + printk("\n");
25227 +
25228 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
25229 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
25230 + unsigned long c;
25231 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
25232 +#ifdef CONFIG_X86_32
25233 + printk(KERN_CONT "???????? ");
25234 +#else
25235 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
25236 + printk(KERN_CONT "???????? ???????? ");
25237 + else
25238 + printk(KERN_CONT "???????????????? ");
25239 +#endif
25240 + } else {
25241 +#ifdef CONFIG_X86_64
25242 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
25243 + printk(KERN_CONT "%08x ", (unsigned int)c);
25244 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
25245 + } else
25246 +#endif
25247 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
25248 + }
25249 + }
25250 + printk("\n");
25251 +}
25252 +#endif
25253 +
25254 +/**
25255 + * probe_kernel_write(): safely attempt to write to a location
25256 + * @dst: address to write to
25257 + * @src: pointer to the data that shall be written
25258 + * @size: size of the data chunk
25259 + *
25260 + * Safely write to address @dst from the buffer at @src. If a kernel fault
25261 + * happens, handle that and return -EFAULT.
25262 + */
25263 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
25264 +{
25265 + long ret;
25266 + mm_segment_t old_fs = get_fs();
25267 +
25268 + set_fs(KERNEL_DS);
25269 + pagefault_disable();
25270 + pax_open_kernel();
25271 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
25272 + pax_close_kernel();
25273 + pagefault_enable();
25274 + set_fs(old_fs);
25275 +
25276 + return ret ? -EFAULT : 0;
25277 +}
25278 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
25279 index 71da1bc..7a16bf4 100644
25280 --- a/arch/x86/mm/gup.c
25281 +++ b/arch/x86/mm/gup.c
25282 @@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
25283 addr = start;
25284 len = (unsigned long) nr_pages << PAGE_SHIFT;
25285 end = start + len;
25286 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
25287 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
25288 (void __user *)start, len)))
25289 return 0;
25290
25291 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
25292 index 63a6ba6..79abd7a 100644
25293 --- a/arch/x86/mm/highmem_32.c
25294 +++ b/arch/x86/mm/highmem_32.c
25295 @@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
25296 idx = type + KM_TYPE_NR*smp_processor_id();
25297 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25298 BUG_ON(!pte_none(*(kmap_pte-idx)));
25299 +
25300 + pax_open_kernel();
25301 set_pte(kmap_pte-idx, mk_pte(page, prot));
25302 + pax_close_kernel();
25303
25304 return (void *)vaddr;
25305 }
25306 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
25307 index f46c3407..6ff9a26 100644
25308 --- a/arch/x86/mm/hugetlbpage.c
25309 +++ b/arch/x86/mm/hugetlbpage.c
25310 @@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
25311 struct hstate *h = hstate_file(file);
25312 struct mm_struct *mm = current->mm;
25313 struct vm_area_struct *vma;
25314 - unsigned long start_addr;
25315 + unsigned long start_addr, pax_task_size = TASK_SIZE;
25316 +
25317 +#ifdef CONFIG_PAX_SEGMEXEC
25318 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25319 + pax_task_size = SEGMEXEC_TASK_SIZE;
25320 +#endif
25321 +
25322 + pax_task_size -= PAGE_SIZE;
25323
25324 if (len > mm->cached_hole_size) {
25325 - start_addr = mm->free_area_cache;
25326 + start_addr = mm->free_area_cache;
25327 } else {
25328 - start_addr = TASK_UNMAPPED_BASE;
25329 - mm->cached_hole_size = 0;
25330 + start_addr = mm->mmap_base;
25331 + mm->cached_hole_size = 0;
25332 }
25333
25334 full_search:
25335 @@ -281,26 +288,27 @@ full_search:
25336
25337 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
25338 /* At this point: (!vma || addr < vma->vm_end). */
25339 - if (TASK_SIZE - len < addr) {
25340 + if (pax_task_size - len < addr) {
25341 /*
25342 * Start a new search - just in case we missed
25343 * some holes.
25344 */
25345 - if (start_addr != TASK_UNMAPPED_BASE) {
25346 - start_addr = TASK_UNMAPPED_BASE;
25347 + if (start_addr != mm->mmap_base) {
25348 + start_addr = mm->mmap_base;
25349 mm->cached_hole_size = 0;
25350 goto full_search;
25351 }
25352 return -ENOMEM;
25353 }
25354 - if (!vma || addr + len <= vma->vm_start) {
25355 - mm->free_area_cache = addr + len;
25356 - return addr;
25357 - }
25358 + if (check_heap_stack_gap(vma, addr, len))
25359 + break;
25360 if (addr + mm->cached_hole_size < vma->vm_start)
25361 mm->cached_hole_size = vma->vm_start - addr;
25362 addr = ALIGN(vma->vm_end, huge_page_size(h));
25363 }
25364 +
25365 + mm->free_area_cache = addr + len;
25366 + return addr;
25367 }
25368
25369 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
25370 @@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
25371 {
25372 struct hstate *h = hstate_file(file);
25373 struct mm_struct *mm = current->mm;
25374 - struct vm_area_struct *vma, *prev_vma;
25375 - unsigned long base = mm->mmap_base, addr = addr0;
25376 + struct vm_area_struct *vma;
25377 + unsigned long base = mm->mmap_base, addr;
25378 unsigned long largest_hole = mm->cached_hole_size;
25379 - int first_time = 1;
25380
25381 /* don't allow allocations above current base */
25382 if (mm->free_area_cache > base)
25383 @@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
25384 largest_hole = 0;
25385 mm->free_area_cache = base;
25386 }
25387 -try_again:
25388 +
25389 /* make sure it can fit in the remaining address space */
25390 if (mm->free_area_cache < len)
25391 goto fail;
25392
25393 /* either no address requested or cant fit in requested address hole */
25394 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
25395 + addr = (mm->free_area_cache - len);
25396 do {
25397 + addr &= huge_page_mask(h);
25398 + vma = find_vma(mm, addr);
25399 /*
25400 * Lookup failure means no vma is above this address,
25401 * i.e. return with success:
25402 - */
25403 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
25404 - return addr;
25405 -
25406 - /*
25407 * new region fits between prev_vma->vm_end and
25408 * vma->vm_start, use it:
25409 */
25410 - if (addr + len <= vma->vm_start &&
25411 - (!prev_vma || (addr >= prev_vma->vm_end))) {
25412 + if (check_heap_stack_gap(vma, addr, len)) {
25413 /* remember the address as a hint for next time */
25414 - mm->cached_hole_size = largest_hole;
25415 - return (mm->free_area_cache = addr);
25416 - } else {
25417 - /* pull free_area_cache down to the first hole */
25418 - if (mm->free_area_cache == vma->vm_end) {
25419 - mm->free_area_cache = vma->vm_start;
25420 - mm->cached_hole_size = largest_hole;
25421 - }
25422 + mm->cached_hole_size = largest_hole;
25423 + return (mm->free_area_cache = addr);
25424 + }
25425 + /* pull free_area_cache down to the first hole */
25426 + if (mm->free_area_cache == vma->vm_end) {
25427 + mm->free_area_cache = vma->vm_start;
25428 + mm->cached_hole_size = largest_hole;
25429 }
25430
25431 /* remember the largest hole we saw so far */
25432 if (addr + largest_hole < vma->vm_start)
25433 - largest_hole = vma->vm_start - addr;
25434 + largest_hole = vma->vm_start - addr;
25435
25436 /* try just below the current vma->vm_start */
25437 - addr = (vma->vm_start - len) & huge_page_mask(h);
25438 - } while (len <= vma->vm_start);
25439 + addr = skip_heap_stack_gap(vma, len);
25440 + } while (!IS_ERR_VALUE(addr));
25441
25442 fail:
25443 /*
25444 - * if hint left us with no space for the requested
25445 - * mapping then try again:
25446 - */
25447 - if (first_time) {
25448 - mm->free_area_cache = base;
25449 - largest_hole = 0;
25450 - first_time = 0;
25451 - goto try_again;
25452 - }
25453 - /*
25454 * A failed mmap() very likely causes application failure,
25455 * so fall back to the bottom-up function here. This scenario
25456 * can happen with large stack limits and large mmap()
25457 * allocations.
25458 */
25459 - mm->free_area_cache = TASK_UNMAPPED_BASE;
25460 +
25461 +#ifdef CONFIG_PAX_SEGMEXEC
25462 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25463 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
25464 + else
25465 +#endif
25466 +
25467 + mm->mmap_base = TASK_UNMAPPED_BASE;
25468 +
25469 +#ifdef CONFIG_PAX_RANDMMAP
25470 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25471 + mm->mmap_base += mm->delta_mmap;
25472 +#endif
25473 +
25474 + mm->free_area_cache = mm->mmap_base;
25475 mm->cached_hole_size = ~0UL;
25476 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
25477 len, pgoff, flags);
25478 @@ -387,6 +393,7 @@ fail:
25479 /*
25480 * Restore the topdown base:
25481 */
25482 + mm->mmap_base = base;
25483 mm->free_area_cache = base;
25484 mm->cached_hole_size = ~0UL;
25485
25486 @@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
25487 struct hstate *h = hstate_file(file);
25488 struct mm_struct *mm = current->mm;
25489 struct vm_area_struct *vma;
25490 + unsigned long pax_task_size = TASK_SIZE;
25491
25492 if (len & ~huge_page_mask(h))
25493 return -EINVAL;
25494 - if (len > TASK_SIZE)
25495 +
25496 +#ifdef CONFIG_PAX_SEGMEXEC
25497 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25498 + pax_task_size = SEGMEXEC_TASK_SIZE;
25499 +#endif
25500 +
25501 + pax_task_size -= PAGE_SIZE;
25502 +
25503 + if (len > pax_task_size)
25504 return -ENOMEM;
25505
25506 if (flags & MAP_FIXED) {
25507 @@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
25508 if (addr) {
25509 addr = ALIGN(addr, huge_page_size(h));
25510 vma = find_vma(mm, addr);
25511 - if (TASK_SIZE - len >= addr &&
25512 - (!vma || addr + len <= vma->vm_start))
25513 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
25514 return addr;
25515 }
25516 if (mm->get_unmapped_area == arch_get_unmapped_area)
25517 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
25518 index 73ffd55..f61c2a7 100644
25519 --- a/arch/x86/mm/init.c
25520 +++ b/arch/x86/mm/init.c
25521 @@ -69,11 +69,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
25522 * cause a hotspot and fill up ZONE_DMA. The page tables
25523 * need roughly 0.5KB per GB.
25524 */
25525 -#ifdef CONFIG_X86_32
25526 - start = 0x7000;
25527 -#else
25528 - start = 0x8000;
25529 -#endif
25530 + start = 0x100000;
25531 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
25532 tables, PAGE_SIZE);
25533 if (e820_table_start == -1UL)
25534 @@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
25535 #endif
25536
25537 set_nx();
25538 - if (nx_enabled)
25539 + if (nx_enabled && cpu_has_nx)
25540 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
25541
25542 /* Enable PSE if available */
25543 @@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
25544 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
25545 * mmio resources as well as potential bios/acpi data regions.
25546 */
25547 +
25548 int devmem_is_allowed(unsigned long pagenr)
25549 {
25550 +#ifdef CONFIG_GRKERNSEC_KMEM
25551 + /* allow BDA */
25552 + if (!pagenr)
25553 + return 1;
25554 + /* allow EBDA */
25555 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
25556 + return 1;
25557 + /* allow ISA/video mem */
25558 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
25559 + return 1;
25560 + /* throw out everything else below 1MB */
25561 + if (pagenr <= 256)
25562 + return 0;
25563 +#else
25564 if (pagenr <= 256)
25565 return 1;
25566 +#endif
25567 +
25568 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
25569 return 0;
25570 if (!page_is_ram(pagenr))
25571 @@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
25572
25573 void free_initmem(void)
25574 {
25575 +
25576 +#ifdef CONFIG_PAX_KERNEXEC
25577 +#ifdef CONFIG_X86_32
25578 + /* PaX: limit KERNEL_CS to actual size */
25579 + unsigned long addr, limit;
25580 + struct desc_struct d;
25581 + int cpu;
25582 +
25583 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
25584 + limit = (limit - 1UL) >> PAGE_SHIFT;
25585 +
25586 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
25587 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
25588 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
25589 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
25590 + }
25591 +
25592 + /* PaX: make KERNEL_CS read-only */
25593 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
25594 + if (!paravirt_enabled())
25595 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
25596 +/*
25597 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
25598 + pgd = pgd_offset_k(addr);
25599 + pud = pud_offset(pgd, addr);
25600 + pmd = pmd_offset(pud, addr);
25601 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25602 + }
25603 +*/
25604 +#ifdef CONFIG_X86_PAE
25605 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
25606 +/*
25607 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
25608 + pgd = pgd_offset_k(addr);
25609 + pud = pud_offset(pgd, addr);
25610 + pmd = pmd_offset(pud, addr);
25611 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
25612 + }
25613 +*/
25614 +#endif
25615 +
25616 +#ifdef CONFIG_MODULES
25617 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
25618 +#endif
25619 +
25620 +#else
25621 + pgd_t *pgd;
25622 + pud_t *pud;
25623 + pmd_t *pmd;
25624 + unsigned long addr, end;
25625 +
25626 + /* PaX: make kernel code/rodata read-only, rest non-executable */
25627 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
25628 + pgd = pgd_offset_k(addr);
25629 + pud = pud_offset(pgd, addr);
25630 + pmd = pmd_offset(pud, addr);
25631 + if (!pmd_present(*pmd))
25632 + continue;
25633 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
25634 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25635 + else
25636 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
25637 + }
25638 +
25639 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
25640 + end = addr + KERNEL_IMAGE_SIZE;
25641 + for (; addr < end; addr += PMD_SIZE) {
25642 + pgd = pgd_offset_k(addr);
25643 + pud = pud_offset(pgd, addr);
25644 + pmd = pmd_offset(pud, addr);
25645 + if (!pmd_present(*pmd))
25646 + continue;
25647 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
25648 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25649 + }
25650 +#endif
25651 +
25652 + flush_tlb_all();
25653 +#endif
25654 +
25655 free_init_pages("unused kernel memory",
25656 (unsigned long)(&__init_begin),
25657 (unsigned long)(&__init_end));
25658 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
25659 index 30938c1..bda3d5d 100644
25660 --- a/arch/x86/mm/init_32.c
25661 +++ b/arch/x86/mm/init_32.c
25662 @@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
25663 }
25664
25665 /*
25666 - * Creates a middle page table and puts a pointer to it in the
25667 - * given global directory entry. This only returns the gd entry
25668 - * in non-PAE compilation mode, since the middle layer is folded.
25669 - */
25670 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
25671 -{
25672 - pud_t *pud;
25673 - pmd_t *pmd_table;
25674 -
25675 -#ifdef CONFIG_X86_PAE
25676 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
25677 - if (after_bootmem)
25678 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
25679 - else
25680 - pmd_table = (pmd_t *)alloc_low_page();
25681 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
25682 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
25683 - pud = pud_offset(pgd, 0);
25684 - BUG_ON(pmd_table != pmd_offset(pud, 0));
25685 -
25686 - return pmd_table;
25687 - }
25688 -#endif
25689 - pud = pud_offset(pgd, 0);
25690 - pmd_table = pmd_offset(pud, 0);
25691 -
25692 - return pmd_table;
25693 -}
25694 -
25695 -/*
25696 * Create a page table and place a pointer to it in a middle page
25697 * directory entry:
25698 */
25699 @@ -121,13 +91,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
25700 page_table = (pte_t *)alloc_low_page();
25701
25702 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
25703 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25704 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
25705 +#else
25706 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
25707 +#endif
25708 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
25709 }
25710
25711 return pte_offset_kernel(pmd, 0);
25712 }
25713
25714 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
25715 +{
25716 + pud_t *pud;
25717 + pmd_t *pmd_table;
25718 +
25719 + pud = pud_offset(pgd, 0);
25720 + pmd_table = pmd_offset(pud, 0);
25721 +
25722 + return pmd_table;
25723 +}
25724 +
25725 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
25726 {
25727 int pgd_idx = pgd_index(vaddr);
25728 @@ -201,6 +186,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25729 int pgd_idx, pmd_idx;
25730 unsigned long vaddr;
25731 pgd_t *pgd;
25732 + pud_t *pud;
25733 pmd_t *pmd;
25734 pte_t *pte = NULL;
25735
25736 @@ -210,8 +196,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25737 pgd = pgd_base + pgd_idx;
25738
25739 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
25740 - pmd = one_md_table_init(pgd);
25741 - pmd = pmd + pmd_index(vaddr);
25742 + pud = pud_offset(pgd, vaddr);
25743 + pmd = pmd_offset(pud, vaddr);
25744 +
25745 +#ifdef CONFIG_X86_PAE
25746 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25747 +#endif
25748 +
25749 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
25750 pmd++, pmd_idx++) {
25751 pte = page_table_kmap_check(one_page_table_init(pmd),
25752 @@ -223,11 +214,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25753 }
25754 }
25755
25756 -static inline int is_kernel_text(unsigned long addr)
25757 +static inline int is_kernel_text(unsigned long start, unsigned long end)
25758 {
25759 - if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
25760 - return 1;
25761 - return 0;
25762 + if ((start > ktla_ktva((unsigned long)_etext) ||
25763 + end <= ktla_ktva((unsigned long)_stext)) &&
25764 + (start > ktla_ktva((unsigned long)_einittext) ||
25765 + end <= ktla_ktva((unsigned long)_sinittext)) &&
25766 +
25767 +#ifdef CONFIG_ACPI_SLEEP
25768 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
25769 +#endif
25770 +
25771 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
25772 + return 0;
25773 + return 1;
25774 }
25775
25776 /*
25777 @@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned long start,
25778 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
25779 unsigned long start_pfn, end_pfn;
25780 pgd_t *pgd_base = swapper_pg_dir;
25781 - int pgd_idx, pmd_idx, pte_ofs;
25782 + unsigned int pgd_idx, pmd_idx, pte_ofs;
25783 unsigned long pfn;
25784 pgd_t *pgd;
25785 + pud_t *pud;
25786 pmd_t *pmd;
25787 pte_t *pte;
25788 unsigned pages_2m, pages_4k;
25789 @@ -278,8 +279,13 @@ repeat:
25790 pfn = start_pfn;
25791 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25792 pgd = pgd_base + pgd_idx;
25793 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
25794 - pmd = one_md_table_init(pgd);
25795 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
25796 + pud = pud_offset(pgd, 0);
25797 + pmd = pmd_offset(pud, 0);
25798 +
25799 +#ifdef CONFIG_X86_PAE
25800 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25801 +#endif
25802
25803 if (pfn >= end_pfn)
25804 continue;
25805 @@ -291,14 +297,13 @@ repeat:
25806 #endif
25807 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
25808 pmd++, pmd_idx++) {
25809 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
25810 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
25811
25812 /*
25813 * Map with big pages if possible, otherwise
25814 * create normal page tables:
25815 */
25816 if (use_pse) {
25817 - unsigned int addr2;
25818 pgprot_t prot = PAGE_KERNEL_LARGE;
25819 /*
25820 * first pass will use the same initial
25821 @@ -308,11 +313,7 @@ repeat:
25822 __pgprot(PTE_IDENT_ATTR |
25823 _PAGE_PSE);
25824
25825 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
25826 - PAGE_OFFSET + PAGE_SIZE-1;
25827 -
25828 - if (is_kernel_text(addr) ||
25829 - is_kernel_text(addr2))
25830 + if (is_kernel_text(address, address + PMD_SIZE))
25831 prot = PAGE_KERNEL_LARGE_EXEC;
25832
25833 pages_2m++;
25834 @@ -329,7 +330,7 @@ repeat:
25835 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25836 pte += pte_ofs;
25837 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
25838 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
25839 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
25840 pgprot_t prot = PAGE_KERNEL;
25841 /*
25842 * first pass will use the same initial
25843 @@ -337,7 +338,7 @@ repeat:
25844 */
25845 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
25846
25847 - if (is_kernel_text(addr))
25848 + if (is_kernel_text(address, address + PAGE_SIZE))
25849 prot = PAGE_KERNEL_EXEC;
25850
25851 pages_4k++;
25852 @@ -489,7 +490,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
25853
25854 pud = pud_offset(pgd, va);
25855 pmd = pmd_offset(pud, va);
25856 - if (!pmd_present(*pmd))
25857 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
25858 break;
25859
25860 pte = pte_offset_kernel(pmd, va);
25861 @@ -541,9 +542,7 @@ void __init early_ioremap_page_table_range_init(void)
25862
25863 static void __init pagetable_init(void)
25864 {
25865 - pgd_t *pgd_base = swapper_pg_dir;
25866 -
25867 - permanent_kmaps_init(pgd_base);
25868 + permanent_kmaps_init(swapper_pg_dir);
25869 }
25870
25871 #ifdef CONFIG_ACPI_SLEEP
25872 @@ -551,12 +550,12 @@ static void __init pagetable_init(void)
25873 * ACPI suspend needs this for resume, because things like the intel-agp
25874 * driver might have split up a kernel 4MB mapping.
25875 */
25876 -char swsusp_pg_dir[PAGE_SIZE]
25877 +pgd_t swsusp_pg_dir[PTRS_PER_PGD]
25878 __attribute__ ((aligned(PAGE_SIZE)));
25879
25880 static inline void save_pg_dir(void)
25881 {
25882 - memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
25883 + clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
25884 }
25885 #else /* !CONFIG_ACPI_SLEEP */
25886 static inline void save_pg_dir(void)
25887 @@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
25888 flush_tlb_all();
25889 }
25890
25891 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25892 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25893 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25894
25895 /* user-defined highmem size */
25896 @@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void)
25897 * Initialize the boot-time allocator (with low memory only):
25898 */
25899 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
25900 - bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25901 + bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25902 PAGE_SIZE);
25903 if (bootmap == -1L)
25904 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
25905 @@ -864,6 +863,12 @@ void __init mem_init(void)
25906
25907 pci_iommu_alloc();
25908
25909 +#ifdef CONFIG_PAX_PER_CPU_PGD
25910 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25911 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25912 + KERNEL_PGD_PTRS);
25913 +#endif
25914 +
25915 #ifdef CONFIG_FLATMEM
25916 BUG_ON(!mem_map);
25917 #endif
25918 @@ -881,7 +886,7 @@ void __init mem_init(void)
25919 set_highmem_pages_init();
25920
25921 codesize = (unsigned long) &_etext - (unsigned long) &_text;
25922 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
25923 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
25924 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
25925
25926 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
25927 @@ -923,10 +928,10 @@ void __init mem_init(void)
25928 ((unsigned long)&__init_end -
25929 (unsigned long)&__init_begin) >> 10,
25930
25931 - (unsigned long)&_etext, (unsigned long)&_edata,
25932 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
25933 + (unsigned long)&_sdata, (unsigned long)&_edata,
25934 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
25935
25936 - (unsigned long)&_text, (unsigned long)&_etext,
25937 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
25938 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
25939
25940 /*
25941 @@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
25942 if (!kernel_set_to_readonly)
25943 return;
25944
25945 + start = ktla_ktva(start);
25946 pr_debug("Set kernel text: %lx - %lx for read write\n",
25947 start, start+size);
25948
25949 @@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
25950 if (!kernel_set_to_readonly)
25951 return;
25952
25953 + start = ktla_ktva(start);
25954 pr_debug("Set kernel text: %lx - %lx for read only\n",
25955 start, start+size);
25956
25957 @@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
25958 unsigned long start = PFN_ALIGN(_text);
25959 unsigned long size = PFN_ALIGN(_etext) - start;
25960
25961 + start = ktla_ktva(start);
25962 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
25963 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
25964 size >> 10);
25965 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
25966 index 7d095ad..25d2549 100644
25967 --- a/arch/x86/mm/init_64.c
25968 +++ b/arch/x86/mm/init_64.c
25969 @@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
25970 pmd = fill_pmd(pud, vaddr);
25971 pte = fill_pte(pmd, vaddr);
25972
25973 + pax_open_kernel();
25974 set_pte(pte, new_pte);
25975 + pax_close_kernel();
25976
25977 /*
25978 * It's enough to flush this one mapping.
25979 @@ -223,14 +225,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
25980 pgd = pgd_offset_k((unsigned long)__va(phys));
25981 if (pgd_none(*pgd)) {
25982 pud = (pud_t *) spp_getpage();
25983 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25984 - _PAGE_USER));
25985 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25986 }
25987 pud = pud_offset(pgd, (unsigned long)__va(phys));
25988 if (pud_none(*pud)) {
25989 pmd = (pmd_t *) spp_getpage();
25990 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25991 - _PAGE_USER));
25992 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25993 }
25994 pmd = pmd_offset(pud, phys);
25995 BUG_ON(!pmd_none(*pmd));
25996 @@ -675,6 +675,12 @@ void __init mem_init(void)
25997
25998 pci_iommu_alloc();
25999
26000 +#ifdef CONFIG_PAX_PER_CPU_PGD
26001 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
26002 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26003 + KERNEL_PGD_PTRS);
26004 +#endif
26005 +
26006 /* clear_bss() already clear the empty_zero_page */
26007
26008 reservedpages = 0;
26009 @@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
26010 static struct vm_area_struct gate_vma = {
26011 .vm_start = VSYSCALL_START,
26012 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
26013 - .vm_page_prot = PAGE_READONLY_EXEC,
26014 - .vm_flags = VM_READ | VM_EXEC
26015 + .vm_page_prot = PAGE_READONLY,
26016 + .vm_flags = VM_READ
26017 };
26018
26019 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
26020 @@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long addr)
26021
26022 const char *arch_vma_name(struct vm_area_struct *vma)
26023 {
26024 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26025 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26026 return "[vdso]";
26027 if (vma == &gate_vma)
26028 return "[vsyscall]";
26029 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
26030 index 84e236c..69bd3f6 100644
26031 --- a/arch/x86/mm/iomap_32.c
26032 +++ b/arch/x86/mm/iomap_32.c
26033 @@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
26034 debug_kmap_atomic(type);
26035 idx = type + KM_TYPE_NR * smp_processor_id();
26036 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
26037 +
26038 + pax_open_kernel();
26039 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
26040 + pax_close_kernel();
26041 +
26042 arch_flush_lazy_mmu_mode();
26043
26044 return (void *)vaddr;
26045 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
26046 index 2feb9bd..ab91e7b 100644
26047 --- a/arch/x86/mm/ioremap.c
26048 +++ b/arch/x86/mm/ioremap.c
26049 @@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
26050 * Second special case: Some BIOSen report the PC BIOS
26051 * area (640->1Mb) as ram even though it is not.
26052 */
26053 - if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
26054 - pagenr < (BIOS_END >> PAGE_SHIFT))
26055 + if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
26056 + pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
26057 return 0;
26058
26059 for (i = 0; i < e820.nr_map; i++) {
26060 @@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
26061 /*
26062 * Don't allow anybody to remap normal RAM that we're using..
26063 */
26064 - for (pfn = phys_addr >> PAGE_SHIFT;
26065 - (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
26066 - pfn++) {
26067 -
26068 + for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
26069 int is_ram = page_is_ram(pfn);
26070
26071 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
26072 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
26073 return NULL;
26074 WARN_ON_ONCE(is_ram);
26075 }
26076 @@ -378,6 +375,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
26077
26078 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
26079 if (page_is_ram(start >> PAGE_SHIFT))
26080 +#ifdef CONFIG_HIGHMEM
26081 + if ((start >> PAGE_SHIFT) < max_low_pfn)
26082 +#endif
26083 return __va(phys);
26084
26085 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
26086 @@ -407,7 +407,7 @@ static int __init early_ioremap_debug_setup(char *str)
26087 early_param("early_ioremap_debug", early_ioremap_debug_setup);
26088
26089 static __initdata int after_paging_init;
26090 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
26091 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
26092
26093 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
26094 {
26095 @@ -439,8 +439,7 @@ void __init early_ioremap_init(void)
26096 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
26097
26098 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
26099 - memset(bm_pte, 0, sizeof(bm_pte));
26100 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
26101 + pmd_populate_user(&init_mm, pmd, bm_pte);
26102
26103 /*
26104 * The boot-ioremap range spans multiple pmds, for which
26105 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
26106 index 8cc1833..1abbc5b 100644
26107 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
26108 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
26109 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
26110 * memory (e.g. tracked pages)? For now, we need this to avoid
26111 * invoking kmemcheck for PnP BIOS calls.
26112 */
26113 - if (regs->flags & X86_VM_MASK)
26114 + if (v8086_mode(regs))
26115 return false;
26116 - if (regs->cs != __KERNEL_CS)
26117 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
26118 return false;
26119
26120 pte = kmemcheck_pte_lookup(address);
26121 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
26122 index c9e57af..07a321b 100644
26123 --- a/arch/x86/mm/mmap.c
26124 +++ b/arch/x86/mm/mmap.c
26125 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void)
26126 * Leave an at least ~128 MB hole with possible stack randomization.
26127 */
26128 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
26129 -#define MAX_GAP (TASK_SIZE/6*5)
26130 +#define MAX_GAP (pax_task_size/6*5)
26131
26132 /*
26133 * True on X86_32 or when emulating IA32 on X86_64
26134 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
26135 return rnd << PAGE_SHIFT;
26136 }
26137
26138 -static unsigned long mmap_base(void)
26139 +static unsigned long mmap_base(struct mm_struct *mm)
26140 {
26141 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
26142 + unsigned long pax_task_size = TASK_SIZE;
26143 +
26144 +#ifdef CONFIG_PAX_SEGMEXEC
26145 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
26146 + pax_task_size = SEGMEXEC_TASK_SIZE;
26147 +#endif
26148
26149 if (gap < MIN_GAP)
26150 gap = MIN_GAP;
26151 else if (gap > MAX_GAP)
26152 gap = MAX_GAP;
26153
26154 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
26155 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
26156 }
26157
26158 /*
26159 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
26160 * does, but not when emulating X86_32
26161 */
26162 -static unsigned long mmap_legacy_base(void)
26163 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
26164 {
26165 - if (mmap_is_ia32())
26166 + if (mmap_is_ia32()) {
26167 +
26168 +#ifdef CONFIG_PAX_SEGMEXEC
26169 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
26170 + return SEGMEXEC_TASK_UNMAPPED_BASE;
26171 + else
26172 +#endif
26173 +
26174 return TASK_UNMAPPED_BASE;
26175 - else
26176 + } else
26177 return TASK_UNMAPPED_BASE + mmap_rnd();
26178 }
26179
26180 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void)
26181 void arch_pick_mmap_layout(struct mm_struct *mm)
26182 {
26183 if (mmap_is_legacy()) {
26184 - mm->mmap_base = mmap_legacy_base();
26185 + mm->mmap_base = mmap_legacy_base(mm);
26186 +
26187 +#ifdef CONFIG_PAX_RANDMMAP
26188 + if (mm->pax_flags & MF_PAX_RANDMMAP)
26189 + mm->mmap_base += mm->delta_mmap;
26190 +#endif
26191 +
26192 mm->get_unmapped_area = arch_get_unmapped_area;
26193 mm->unmap_area = arch_unmap_area;
26194 } else {
26195 - mm->mmap_base = mmap_base();
26196 + mm->mmap_base = mmap_base(mm);
26197 +
26198 +#ifdef CONFIG_PAX_RANDMMAP
26199 + if (mm->pax_flags & MF_PAX_RANDMMAP)
26200 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
26201 +#endif
26202 +
26203 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
26204 mm->unmap_area = arch_unmap_area_topdown;
26205 }
26206 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
26207 index 132772a..b961f11 100644
26208 --- a/arch/x86/mm/mmio-mod.c
26209 +++ b/arch/x86/mm/mmio-mod.c
26210 @@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
26211 break;
26212 default:
26213 {
26214 - unsigned char *ip = (unsigned char *)instptr;
26215 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
26216 my_trace->opcode = MMIO_UNKNOWN_OP;
26217 my_trace->width = 0;
26218 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
26219 @@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
26220 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
26221 void __iomem *addr)
26222 {
26223 - static atomic_t next_id;
26224 + static atomic_unchecked_t next_id;
26225 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
26226 /* These are page-unaligned. */
26227 struct mmiotrace_map map = {
26228 @@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
26229 .private = trace
26230 },
26231 .phys = offset,
26232 - .id = atomic_inc_return(&next_id)
26233 + .id = atomic_inc_return_unchecked(&next_id)
26234 };
26235 map.map_id = trace->id;
26236
26237 diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
26238 index d253006..e56dd6a 100644
26239 --- a/arch/x86/mm/numa_32.c
26240 +++ b/arch/x86/mm/numa_32.c
26241 @@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
26242 }
26243 #endif
26244
26245 -extern unsigned long find_max_low_pfn(void);
26246 extern unsigned long highend_pfn, highstart_pfn;
26247
26248 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
26249 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
26250 index e1d1069..2251ff3 100644
26251 --- a/arch/x86/mm/pageattr-test.c
26252 +++ b/arch/x86/mm/pageattr-test.c
26253 @@ -36,7 +36,7 @@ enum {
26254
26255 static int pte_testbit(pte_t pte)
26256 {
26257 - return pte_flags(pte) & _PAGE_UNUSED1;
26258 + return pte_flags(pte) & _PAGE_CPA_TEST;
26259 }
26260
26261 struct split_state {
26262 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
26263 index dd38bfb..b72c63e 100644
26264 --- a/arch/x86/mm/pageattr.c
26265 +++ b/arch/x86/mm/pageattr.c
26266 @@ -261,16 +261,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
26267 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
26268 */
26269 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
26270 - pgprot_val(forbidden) |= _PAGE_NX;
26271 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
26272
26273 /*
26274 * The kernel text needs to be executable for obvious reasons
26275 * Does not cover __inittext since that is gone later on. On
26276 * 64bit we do not enforce !NX on the low mapping
26277 */
26278 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
26279 - pgprot_val(forbidden) |= _PAGE_NX;
26280 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
26281 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
26282
26283 +#ifdef CONFIG_DEBUG_RODATA
26284 /*
26285 * The .rodata section needs to be read-only. Using the pfn
26286 * catches all aliases.
26287 @@ -278,6 +279,14 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
26288 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
26289 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
26290 pgprot_val(forbidden) |= _PAGE_RW;
26291 +#endif
26292 +
26293 +#ifdef CONFIG_PAX_KERNEXEC
26294 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
26295 + pgprot_val(forbidden) |= _PAGE_RW;
26296 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
26297 + }
26298 +#endif
26299
26300 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
26301
26302 @@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
26303 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
26304 {
26305 /* change init_mm */
26306 + pax_open_kernel();
26307 set_pte_atomic(kpte, pte);
26308 +
26309 #ifdef CONFIG_X86_32
26310 if (!SHARED_KERNEL_PMD) {
26311 +
26312 +#ifdef CONFIG_PAX_PER_CPU_PGD
26313 + unsigned long cpu;
26314 +#else
26315 struct page *page;
26316 +#endif
26317
26318 +#ifdef CONFIG_PAX_PER_CPU_PGD
26319 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
26320 + pgd_t *pgd = get_cpu_pgd(cpu);
26321 +#else
26322 list_for_each_entry(page, &pgd_list, lru) {
26323 - pgd_t *pgd;
26324 + pgd_t *pgd = (pgd_t *)page_address(page);
26325 +#endif
26326 +
26327 pud_t *pud;
26328 pmd_t *pmd;
26329
26330 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
26331 + pgd += pgd_index(address);
26332 pud = pud_offset(pgd, address);
26333 pmd = pmd_offset(pud, address);
26334 set_pte_atomic((pte_t *)pmd, pte);
26335 }
26336 }
26337 #endif
26338 + pax_close_kernel();
26339 }
26340
26341 static int
26342 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
26343 index e78cd0e..de0a817 100644
26344 --- a/arch/x86/mm/pat.c
26345 +++ b/arch/x86/mm/pat.c
26346 @@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
26347
26348 conflict:
26349 printk(KERN_INFO "%s:%d conflicting memory types "
26350 - "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
26351 + "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
26352 new->end, cattr_name(new->type), cattr_name(entry->type));
26353 return -EBUSY;
26354 }
26355 @@ -559,7 +559,7 @@ unlock_ret:
26356
26357 if (err) {
26358 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
26359 - current->comm, current->pid, start, end);
26360 + current->comm, task_pid_nr(current), start, end);
26361 }
26362
26363 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
26364 @@ -689,8 +689,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26365 while (cursor < to) {
26366 if (!devmem_is_allowed(pfn)) {
26367 printk(KERN_INFO
26368 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
26369 - current->comm, from, to);
26370 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
26371 + current->comm, from, to, cursor);
26372 return 0;
26373 }
26374 cursor += PAGE_SIZE;
26375 @@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
26376 printk(KERN_INFO
26377 "%s:%d ioremap_change_attr failed %s "
26378 "for %Lx-%Lx\n",
26379 - current->comm, current->pid,
26380 + current->comm, task_pid_nr(current),
26381 cattr_name(flags),
26382 base, (unsigned long long)(base + size));
26383 return -EINVAL;
26384 @@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
26385 free_memtype(paddr, paddr + size);
26386 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
26387 " for %Lx-%Lx, got %s\n",
26388 - current->comm, current->pid,
26389 + current->comm, task_pid_nr(current),
26390 cattr_name(want_flags),
26391 (unsigned long long)paddr,
26392 (unsigned long long)(paddr + size),
26393 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
26394 index df3d5c8..c2223e1 100644
26395 --- a/arch/x86/mm/pf_in.c
26396 +++ b/arch/x86/mm/pf_in.c
26397 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
26398 int i;
26399 enum reason_type rv = OTHERS;
26400
26401 - p = (unsigned char *)ins_addr;
26402 + p = (unsigned char *)ktla_ktva(ins_addr);
26403 p += skip_prefix(p, &prf);
26404 p += get_opcode(p, &opcode);
26405
26406 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
26407 struct prefix_bits prf;
26408 int i;
26409
26410 - p = (unsigned char *)ins_addr;
26411 + p = (unsigned char *)ktla_ktva(ins_addr);
26412 p += skip_prefix(p, &prf);
26413 p += get_opcode(p, &opcode);
26414
26415 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
26416 struct prefix_bits prf;
26417 int i;
26418
26419 - p = (unsigned char *)ins_addr;
26420 + p = (unsigned char *)ktla_ktva(ins_addr);
26421 p += skip_prefix(p, &prf);
26422 p += get_opcode(p, &opcode);
26423
26424 @@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
26425 int i;
26426 unsigned long rv;
26427
26428 - p = (unsigned char *)ins_addr;
26429 + p = (unsigned char *)ktla_ktva(ins_addr);
26430 p += skip_prefix(p, &prf);
26431 p += get_opcode(p, &opcode);
26432 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
26433 @@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
26434 int i;
26435 unsigned long rv;
26436
26437 - p = (unsigned char *)ins_addr;
26438 + p = (unsigned char *)ktla_ktva(ins_addr);
26439 p += skip_prefix(p, &prf);
26440 p += get_opcode(p, &opcode);
26441 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
26442 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
26443 index e0e6fad..c56b495 100644
26444 --- a/arch/x86/mm/pgtable.c
26445 +++ b/arch/x86/mm/pgtable.c
26446 @@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *pgd)
26447 list_del(&page->lru);
26448 }
26449
26450 -#define UNSHARED_PTRS_PER_PGD \
26451 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
26452 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26453 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
26454
26455 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
26456 +{
26457 + while (count--)
26458 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
26459 +}
26460 +#endif
26461 +
26462 +#ifdef CONFIG_PAX_PER_CPU_PGD
26463 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
26464 +{
26465 + while (count--)
26466 +
26467 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26468 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
26469 +#else
26470 + *dst++ = *src++;
26471 +#endif
26472 +
26473 +}
26474 +#endif
26475 +
26476 +#ifdef CONFIG_X86_64
26477 +#define pxd_t pud_t
26478 +#define pyd_t pgd_t
26479 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
26480 +#define pxd_free(mm, pud) pud_free((mm), (pud))
26481 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
26482 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
26483 +#define PYD_SIZE PGDIR_SIZE
26484 +#else
26485 +#define pxd_t pmd_t
26486 +#define pyd_t pud_t
26487 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
26488 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
26489 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
26490 +#define pyd_offset(mm, address) pud_offset((mm), (address))
26491 +#define PYD_SIZE PUD_SIZE
26492 +#endif
26493 +
26494 +#ifdef CONFIG_PAX_PER_CPU_PGD
26495 +static inline void pgd_ctor(pgd_t *pgd) {}
26496 +static inline void pgd_dtor(pgd_t *pgd) {}
26497 +#else
26498 static void pgd_ctor(pgd_t *pgd)
26499 {
26500 /* If the pgd points to a shared pagetable level (either the
26501 @@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
26502 pgd_list_del(pgd);
26503 spin_unlock_irqrestore(&pgd_lock, flags);
26504 }
26505 +#endif
26506
26507 /*
26508 * List of all pgd's needed for non-PAE so it can invalidate entries
26509 @@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
26510 * -- wli
26511 */
26512
26513 -#ifdef CONFIG_X86_PAE
26514 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26515 /*
26516 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
26517 * updating the top-level pagetable entries to guarantee the
26518 @@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
26519 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
26520 * and initialize the kernel pmds here.
26521 */
26522 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
26523 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
26524
26525 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
26526 {
26527 @@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
26528 */
26529 flush_tlb_mm(mm);
26530 }
26531 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
26532 +#define PREALLOCATED_PXDS USER_PGD_PTRS
26533 #else /* !CONFIG_X86_PAE */
26534
26535 /* No need to prepopulate any pagetable entries in non-PAE modes. */
26536 -#define PREALLOCATED_PMDS 0
26537 +#define PREALLOCATED_PXDS 0
26538
26539 #endif /* CONFIG_X86_PAE */
26540
26541 -static void free_pmds(pmd_t *pmds[])
26542 +static void free_pxds(pxd_t *pxds[])
26543 {
26544 int i;
26545
26546 - for(i = 0; i < PREALLOCATED_PMDS; i++)
26547 - if (pmds[i])
26548 - free_page((unsigned long)pmds[i]);
26549 + for(i = 0; i < PREALLOCATED_PXDS; i++)
26550 + if (pxds[i])
26551 + free_page((unsigned long)pxds[i]);
26552 }
26553
26554 -static int preallocate_pmds(pmd_t *pmds[])
26555 +static int preallocate_pxds(pxd_t *pxds[])
26556 {
26557 int i;
26558 bool failed = false;
26559
26560 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
26561 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
26562 - if (pmd == NULL)
26563 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
26564 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
26565 + if (pxd == NULL)
26566 failed = true;
26567 - pmds[i] = pmd;
26568 + pxds[i] = pxd;
26569 }
26570
26571 if (failed) {
26572 - free_pmds(pmds);
26573 + free_pxds(pxds);
26574 return -ENOMEM;
26575 }
26576
26577 @@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[])
26578 * preallocate which never got a corresponding vma will need to be
26579 * freed manually.
26580 */
26581 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
26582 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
26583 {
26584 int i;
26585
26586 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
26587 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
26588 pgd_t pgd = pgdp[i];
26589
26590 if (pgd_val(pgd) != 0) {
26591 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
26592 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
26593
26594 - pgdp[i] = native_make_pgd(0);
26595 + set_pgd(pgdp + i, native_make_pgd(0));
26596
26597 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
26598 - pmd_free(mm, pmd);
26599 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
26600 + pxd_free(mm, pxd);
26601 }
26602 }
26603 }
26604
26605 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
26606 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
26607 {
26608 - pud_t *pud;
26609 + pyd_t *pyd;
26610 unsigned long addr;
26611 int i;
26612
26613 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
26614 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
26615 return;
26616
26617 - pud = pud_offset(pgd, 0);
26618 +#ifdef CONFIG_X86_64
26619 + pyd = pyd_offset(mm, 0L);
26620 +#else
26621 + pyd = pyd_offset(pgd, 0L);
26622 +#endif
26623
26624 - for (addr = i = 0; i < PREALLOCATED_PMDS;
26625 - i++, pud++, addr += PUD_SIZE) {
26626 - pmd_t *pmd = pmds[i];
26627 + for (addr = i = 0; i < PREALLOCATED_PXDS;
26628 + i++, pyd++, addr += PYD_SIZE) {
26629 + pxd_t *pxd = pxds[i];
26630
26631 if (i >= KERNEL_PGD_BOUNDARY)
26632 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
26633 - sizeof(pmd_t) * PTRS_PER_PMD);
26634 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
26635 + sizeof(pxd_t) * PTRS_PER_PMD);
26636
26637 - pud_populate(mm, pud, pmd);
26638 + pyd_populate(mm, pyd, pxd);
26639 }
26640 }
26641
26642 pgd_t *pgd_alloc(struct mm_struct *mm)
26643 {
26644 pgd_t *pgd;
26645 - pmd_t *pmds[PREALLOCATED_PMDS];
26646 + pxd_t *pxds[PREALLOCATED_PXDS];
26647 +
26648 unsigned long flags;
26649
26650 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
26651 @@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
26652
26653 mm->pgd = pgd;
26654
26655 - if (preallocate_pmds(pmds) != 0)
26656 + if (preallocate_pxds(pxds) != 0)
26657 goto out_free_pgd;
26658
26659 if (paravirt_pgd_alloc(mm) != 0)
26660 - goto out_free_pmds;
26661 + goto out_free_pxds;
26662
26663 /*
26664 * Make sure that pre-populating the pmds is atomic with
26665 @@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
26666 spin_lock_irqsave(&pgd_lock, flags);
26667
26668 pgd_ctor(pgd);
26669 - pgd_prepopulate_pmd(mm, pgd, pmds);
26670 + pgd_prepopulate_pxd(mm, pgd, pxds);
26671
26672 spin_unlock_irqrestore(&pgd_lock, flags);
26673
26674 return pgd;
26675
26676 -out_free_pmds:
26677 - free_pmds(pmds);
26678 +out_free_pxds:
26679 + free_pxds(pxds);
26680 out_free_pgd:
26681 free_page((unsigned long)pgd);
26682 out:
26683 @@ -287,7 +338,7 @@ out:
26684
26685 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
26686 {
26687 - pgd_mop_up_pmds(mm, pgd);
26688 + pgd_mop_up_pxds(mm, pgd);
26689 pgd_dtor(pgd);
26690 paravirt_pgd_free(mm, pgd);
26691 free_page((unsigned long)pgd);
26692 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
26693 index 46c8834..fcab43d 100644
26694 --- a/arch/x86/mm/pgtable_32.c
26695 +++ b/arch/x86/mm/pgtable_32.c
26696 @@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
26697 return;
26698 }
26699 pte = pte_offset_kernel(pmd, vaddr);
26700 +
26701 + pax_open_kernel();
26702 if (pte_val(pteval))
26703 set_pte_at(&init_mm, vaddr, pte, pteval);
26704 else
26705 pte_clear(&init_mm, vaddr, pte);
26706 + pax_close_kernel();
26707
26708 /*
26709 * It's enough to flush this one mapping.
26710 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
26711 index 513d8ed..978c161 100644
26712 --- a/arch/x86/mm/setup_nx.c
26713 +++ b/arch/x86/mm/setup_nx.c
26714 @@ -4,11 +4,10 @@
26715
26716 #include <asm/pgtable.h>
26717
26718 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26719 int nx_enabled;
26720
26721 -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26722 -static int disable_nx __cpuinitdata;
26723 -
26724 +#ifndef CONFIG_PAX_PAGEEXEC
26725 /*
26726 * noexec = on|off
26727 *
26728 @@ -22,32 +21,26 @@ static int __init noexec_setup(char *str)
26729 if (!str)
26730 return -EINVAL;
26731 if (!strncmp(str, "on", 2)) {
26732 - __supported_pte_mask |= _PAGE_NX;
26733 - disable_nx = 0;
26734 + nx_enabled = 1;
26735 } else if (!strncmp(str, "off", 3)) {
26736 - disable_nx = 1;
26737 - __supported_pte_mask &= ~_PAGE_NX;
26738 + nx_enabled = 0;
26739 }
26740 return 0;
26741 }
26742 early_param("noexec", noexec_setup);
26743 #endif
26744 +#endif
26745
26746 #ifdef CONFIG_X86_PAE
26747 void __init set_nx(void)
26748 {
26749 - unsigned int v[4], l, h;
26750 + if (!nx_enabled && cpu_has_nx) {
26751 + unsigned l, h;
26752
26753 - if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
26754 - cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
26755 -
26756 - if ((v[3] & (1 << 20)) && !disable_nx) {
26757 - rdmsr(MSR_EFER, l, h);
26758 - l |= EFER_NX;
26759 - wrmsr(MSR_EFER, l, h);
26760 - nx_enabled = 1;
26761 - __supported_pte_mask |= _PAGE_NX;
26762 - }
26763 + __supported_pte_mask &= ~_PAGE_NX;
26764 + rdmsr(MSR_EFER, l, h);
26765 + l &= ~EFER_NX;
26766 + wrmsr(MSR_EFER, l, h);
26767 }
26768 }
26769 #else
26770 @@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
26771 unsigned long efer;
26772
26773 rdmsrl(MSR_EFER, efer);
26774 - if (!(efer & EFER_NX) || disable_nx)
26775 + if (!(efer & EFER_NX) || !nx_enabled)
26776 __supported_pte_mask &= ~_PAGE_NX;
26777 }
26778 #endif
26779 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
26780 index 36fe08e..b123d3a 100644
26781 --- a/arch/x86/mm/tlb.c
26782 +++ b/arch/x86/mm/tlb.c
26783 @@ -61,7 +61,11 @@ void leave_mm(int cpu)
26784 BUG();
26785 cpumask_clear_cpu(cpu,
26786 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
26787 +
26788 +#ifndef CONFIG_PAX_PER_CPU_PGD
26789 load_cr3(swapper_pg_dir);
26790 +#endif
26791 +
26792 }
26793 EXPORT_SYMBOL_GPL(leave_mm);
26794
26795 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26796 index 829edf0..672adb3 100644
26797 --- a/arch/x86/oprofile/backtrace.c
26798 +++ b/arch/x86/oprofile/backtrace.c
26799 @@ -115,7 +115,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
26800 {
26801 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
26802
26803 - if (!user_mode_vm(regs)) {
26804 + if (!user_mode(regs)) {
26805 unsigned long stack = kernel_stack_pointer(regs);
26806 if (depth)
26807 dump_trace(NULL, regs, (unsigned long *)stack, 0,
26808 diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
26809 index e6a160a..36deff6 100644
26810 --- a/arch/x86/oprofile/op_model_p4.c
26811 +++ b/arch/x86/oprofile/op_model_p4.c
26812 @@ -50,7 +50,7 @@ static inline void setup_num_counters(void)
26813 #endif
26814 }
26815
26816 -static int inline addr_increment(void)
26817 +static inline int addr_increment(void)
26818 {
26819 #ifdef CONFIG_SMP
26820 return smp_num_siblings == 2 ? 2 : 1;
26821 diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
26822 index 1331fcf..03901b2 100644
26823 --- a/arch/x86/pci/common.c
26824 +++ b/arch/x86/pci/common.c
26825 @@ -31,8 +31,8 @@ int noioapicreroute = 1;
26826 int pcibios_last_bus = -1;
26827 unsigned long pirq_table_addr;
26828 struct pci_bus *pci_root_bus;
26829 -struct pci_raw_ops *raw_pci_ops;
26830 -struct pci_raw_ops *raw_pci_ext_ops;
26831 +const struct pci_raw_ops *raw_pci_ops;
26832 +const struct pci_raw_ops *raw_pci_ext_ops;
26833
26834 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
26835 int reg, int len, u32 *val)
26836 diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c
26837 index 347d882..4baf6b6 100644
26838 --- a/arch/x86/pci/direct.c
26839 +++ b/arch/x86/pci/direct.c
26840 @@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
26841
26842 #undef PCI_CONF1_ADDRESS
26843
26844 -struct pci_raw_ops pci_direct_conf1 = {
26845 +const struct pci_raw_ops pci_direct_conf1 = {
26846 .read = pci_conf1_read,
26847 .write = pci_conf1_write,
26848 };
26849 @@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
26850
26851 #undef PCI_CONF2_ADDRESS
26852
26853 -struct pci_raw_ops pci_direct_conf2 = {
26854 +const struct pci_raw_ops pci_direct_conf2 = {
26855 .read = pci_conf2_read,
26856 .write = pci_conf2_write,
26857 };
26858 @@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
26859 * This should be close to trivial, but it isn't, because there are buggy
26860 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
26861 */
26862 -static int __init pci_sanity_check(struct pci_raw_ops *o)
26863 +static int __init pci_sanity_check(const struct pci_raw_ops *o)
26864 {
26865 u32 x = 0;
26866 int year, devfn;
26867 diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
26868 index f10a7e9..0425342 100644
26869 --- a/arch/x86/pci/mmconfig_32.c
26870 +++ b/arch/x86/pci/mmconfig_32.c
26871 @@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
26872 return 0;
26873 }
26874
26875 -static struct pci_raw_ops pci_mmcfg = {
26876 +static const struct pci_raw_ops pci_mmcfg = {
26877 .read = pci_mmcfg_read,
26878 .write = pci_mmcfg_write,
26879 };
26880 diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
26881 index 94349f8..41600a7 100644
26882 --- a/arch/x86/pci/mmconfig_64.c
26883 +++ b/arch/x86/pci/mmconfig_64.c
26884 @@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
26885 return 0;
26886 }
26887
26888 -static struct pci_raw_ops pci_mmcfg = {
26889 +static const struct pci_raw_ops pci_mmcfg = {
26890 .read = pci_mmcfg_read,
26891 .write = pci_mmcfg_write,
26892 };
26893 diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
26894 index 8eb295e..86bd657 100644
26895 --- a/arch/x86/pci/numaq_32.c
26896 +++ b/arch/x86/pci/numaq_32.c
26897 @@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus,
26898
26899 #undef PCI_CONF1_MQ_ADDRESS
26900
26901 -static struct pci_raw_ops pci_direct_conf1_mq = {
26902 +static const struct pci_raw_ops pci_direct_conf1_mq = {
26903 .read = pci_conf1_mq_read,
26904 .write = pci_conf1_mq_write
26905 };
26906 diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c
26907 index b889d82..5a58a0a 100644
26908 --- a/arch/x86/pci/olpc.c
26909 +++ b/arch/x86/pci/olpc.c
26910 @@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int seg, unsigned int bus,
26911 return 0;
26912 }
26913
26914 -static struct pci_raw_ops pci_olpc_conf = {
26915 +static const struct pci_raw_ops pci_olpc_conf = {
26916 .read = pci_olpc_read,
26917 .write = pci_olpc_write,
26918 };
26919 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26920 index 1c975cc..b8e16c2 100644
26921 --- a/arch/x86/pci/pcbios.c
26922 +++ b/arch/x86/pci/pcbios.c
26923 @@ -56,50 +56,93 @@ union bios32 {
26924 static struct {
26925 unsigned long address;
26926 unsigned short segment;
26927 -} bios32_indirect = { 0, __KERNEL_CS };
26928 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26929
26930 /*
26931 * Returns the entry point for the given service, NULL on error
26932 */
26933
26934 -static unsigned long bios32_service(unsigned long service)
26935 +static unsigned long __devinit bios32_service(unsigned long service)
26936 {
26937 unsigned char return_code; /* %al */
26938 unsigned long address; /* %ebx */
26939 unsigned long length; /* %ecx */
26940 unsigned long entry; /* %edx */
26941 unsigned long flags;
26942 + struct desc_struct d, *gdt;
26943
26944 local_irq_save(flags);
26945 - __asm__("lcall *(%%edi); cld"
26946 +
26947 + gdt = get_cpu_gdt_table(smp_processor_id());
26948 +
26949 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26950 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26951 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26952 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26953 +
26954 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26955 : "=a" (return_code),
26956 "=b" (address),
26957 "=c" (length),
26958 "=d" (entry)
26959 : "0" (service),
26960 "1" (0),
26961 - "D" (&bios32_indirect));
26962 + "D" (&bios32_indirect),
26963 + "r"(__PCIBIOS_DS)
26964 + : "memory");
26965 +
26966 + pax_open_kernel();
26967 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26968 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26969 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26970 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26971 + pax_close_kernel();
26972 +
26973 local_irq_restore(flags);
26974
26975 switch (return_code) {
26976 - case 0:
26977 - return address + entry;
26978 - case 0x80: /* Not present */
26979 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26980 - return 0;
26981 - default: /* Shouldn't happen */
26982 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26983 - service, return_code);
26984 + case 0: {
26985 + int cpu;
26986 + unsigned char flags;
26987 +
26988 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26989 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26990 + printk(KERN_WARNING "bios32_service: not valid\n");
26991 return 0;
26992 + }
26993 + address = address + PAGE_OFFSET;
26994 + length += 16UL; /* some BIOSs underreport this... */
26995 + flags = 4;
26996 + if (length >= 64*1024*1024) {
26997 + length >>= PAGE_SHIFT;
26998 + flags |= 8;
26999 + }
27000 +
27001 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
27002 + gdt = get_cpu_gdt_table(cpu);
27003 + pack_descriptor(&d, address, length, 0x9b, flags);
27004 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
27005 + pack_descriptor(&d, address, length, 0x93, flags);
27006 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
27007 + }
27008 + return entry;
27009 + }
27010 + case 0x80: /* Not present */
27011 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
27012 + return 0;
27013 + default: /* Shouldn't happen */
27014 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
27015 + service, return_code);
27016 + return 0;
27017 }
27018 }
27019
27020 static struct {
27021 unsigned long address;
27022 unsigned short segment;
27023 -} pci_indirect = { 0, __KERNEL_CS };
27024 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
27025
27026 -static int pci_bios_present;
27027 +static int pci_bios_present __read_only;
27028
27029 static int __devinit check_pcibios(void)
27030 {
27031 @@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
27032 unsigned long flags, pcibios_entry;
27033
27034 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
27035 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
27036 + pci_indirect.address = pcibios_entry;
27037
27038 local_irq_save(flags);
27039 - __asm__(
27040 - "lcall *(%%edi); cld\n\t"
27041 + __asm__("movw %w6, %%ds\n\t"
27042 + "lcall *%%ss:(%%edi); cld\n\t"
27043 + "push %%ss\n\t"
27044 + "pop %%ds\n\t"
27045 "jc 1f\n\t"
27046 "xor %%ah, %%ah\n"
27047 "1:"
27048 @@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
27049 "=b" (ebx),
27050 "=c" (ecx)
27051 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
27052 - "D" (&pci_indirect)
27053 + "D" (&pci_indirect),
27054 + "r" (__PCIBIOS_DS)
27055 : "memory");
27056 local_irq_restore(flags);
27057
27058 @@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27059
27060 switch (len) {
27061 case 1:
27062 - __asm__("lcall *(%%esi); cld\n\t"
27063 + __asm__("movw %w6, %%ds\n\t"
27064 + "lcall *%%ss:(%%esi); cld\n\t"
27065 + "push %%ss\n\t"
27066 + "pop %%ds\n\t"
27067 "jc 1f\n\t"
27068 "xor %%ah, %%ah\n"
27069 "1:"
27070 @@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27071 : "1" (PCIBIOS_READ_CONFIG_BYTE),
27072 "b" (bx),
27073 "D" ((long)reg),
27074 - "S" (&pci_indirect));
27075 + "S" (&pci_indirect),
27076 + "r" (__PCIBIOS_DS));
27077 /*
27078 * Zero-extend the result beyond 8 bits, do not trust the
27079 * BIOS having done it:
27080 @@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27081 *value &= 0xff;
27082 break;
27083 case 2:
27084 - __asm__("lcall *(%%esi); cld\n\t"
27085 + __asm__("movw %w6, %%ds\n\t"
27086 + "lcall *%%ss:(%%esi); cld\n\t"
27087 + "push %%ss\n\t"
27088 + "pop %%ds\n\t"
27089 "jc 1f\n\t"
27090 "xor %%ah, %%ah\n"
27091 "1:"
27092 @@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27093 : "1" (PCIBIOS_READ_CONFIG_WORD),
27094 "b" (bx),
27095 "D" ((long)reg),
27096 - "S" (&pci_indirect));
27097 + "S" (&pci_indirect),
27098 + "r" (__PCIBIOS_DS));
27099 /*
27100 * Zero-extend the result beyond 16 bits, do not trust the
27101 * BIOS having done it:
27102 @@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27103 *value &= 0xffff;
27104 break;
27105 case 4:
27106 - __asm__("lcall *(%%esi); cld\n\t"
27107 + __asm__("movw %w6, %%ds\n\t"
27108 + "lcall *%%ss:(%%esi); cld\n\t"
27109 + "push %%ss\n\t"
27110 + "pop %%ds\n\t"
27111 "jc 1f\n\t"
27112 "xor %%ah, %%ah\n"
27113 "1:"
27114 @@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27115 : "1" (PCIBIOS_READ_CONFIG_DWORD),
27116 "b" (bx),
27117 "D" ((long)reg),
27118 - "S" (&pci_indirect));
27119 + "S" (&pci_indirect),
27120 + "r" (__PCIBIOS_DS));
27121 break;
27122 }
27123
27124 @@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
27125
27126 switch (len) {
27127 case 1:
27128 - __asm__("lcall *(%%esi); cld\n\t"
27129 + __asm__("movw %w6, %%ds\n\t"
27130 + "lcall *%%ss:(%%esi); cld\n\t"
27131 + "push %%ss\n\t"
27132 + "pop %%ds\n\t"
27133 "jc 1f\n\t"
27134 "xor %%ah, %%ah\n"
27135 "1:"
27136 @@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
27137 "c" (value),
27138 "b" (bx),
27139 "D" ((long)reg),
27140 - "S" (&pci_indirect));
27141 + "S" (&pci_indirect),
27142 + "r" (__PCIBIOS_DS));
27143 break;
27144 case 2:
27145 - __asm__("lcall *(%%esi); cld\n\t"
27146 + __asm__("movw %w6, %%ds\n\t"
27147 + "lcall *%%ss:(%%esi); cld\n\t"
27148 + "push %%ss\n\t"
27149 + "pop %%ds\n\t"
27150 "jc 1f\n\t"
27151 "xor %%ah, %%ah\n"
27152 "1:"
27153 @@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
27154 "c" (value),
27155 "b" (bx),
27156 "D" ((long)reg),
27157 - "S" (&pci_indirect));
27158 + "S" (&pci_indirect),
27159 + "r" (__PCIBIOS_DS));
27160 break;
27161 case 4:
27162 - __asm__("lcall *(%%esi); cld\n\t"
27163 + __asm__("movw %w6, %%ds\n\t"
27164 + "lcall *%%ss:(%%esi); cld\n\t"
27165 + "push %%ss\n\t"
27166 + "pop %%ds\n\t"
27167 "jc 1f\n\t"
27168 "xor %%ah, %%ah\n"
27169 "1:"
27170 @@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
27171 "c" (value),
27172 "b" (bx),
27173 "D" ((long)reg),
27174 - "S" (&pci_indirect));
27175 + "S" (&pci_indirect),
27176 + "r" (__PCIBIOS_DS));
27177 break;
27178 }
27179
27180 @@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
27181 * Function table for BIOS32 access
27182 */
27183
27184 -static struct pci_raw_ops pci_bios_access = {
27185 +static const struct pci_raw_ops pci_bios_access = {
27186 .read = pci_bios_read,
27187 .write = pci_bios_write
27188 };
27189 @@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_access = {
27190 * Try to find PCI BIOS.
27191 */
27192
27193 -static struct pci_raw_ops * __devinit pci_find_bios(void)
27194 +static const struct pci_raw_ops * __devinit pci_find_bios(void)
27195 {
27196 union bios32 *check;
27197 unsigned char sum;
27198 @@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
27199
27200 DBG("PCI: Fetching IRQ routing table... ");
27201 __asm__("push %%es\n\t"
27202 + "movw %w8, %%ds\n\t"
27203 "push %%ds\n\t"
27204 "pop %%es\n\t"
27205 - "lcall *(%%esi); cld\n\t"
27206 + "lcall *%%ss:(%%esi); cld\n\t"
27207 "pop %%es\n\t"
27208 + "push %%ss\n\t"
27209 + "pop %%ds\n"
27210 "jc 1f\n\t"
27211 "xor %%ah, %%ah\n"
27212 "1:"
27213 @@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
27214 "1" (0),
27215 "D" ((long) &opt),
27216 "S" (&pci_indirect),
27217 - "m" (opt)
27218 + "m" (opt),
27219 + "r" (__PCIBIOS_DS)
27220 : "memory");
27221 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
27222 if (ret & 0xff00)
27223 @@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
27224 {
27225 int ret;
27226
27227 - __asm__("lcall *(%%esi); cld\n\t"
27228 + __asm__("movw %w5, %%ds\n\t"
27229 + "lcall *%%ss:(%%esi); cld\n\t"
27230 + "push %%ss\n\t"
27231 + "pop %%ds\n"
27232 "jc 1f\n\t"
27233 "xor %%ah, %%ah\n"
27234 "1:"
27235 @@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
27236 : "0" (PCIBIOS_SET_PCI_HW_INT),
27237 "b" ((dev->bus->number << 8) | dev->devfn),
27238 "c" ((irq << 8) | (pin + 10)),
27239 - "S" (&pci_indirect));
27240 + "S" (&pci_indirect),
27241 + "r" (__PCIBIOS_DS));
27242 return !(ret & 0xff00);
27243 }
27244 EXPORT_SYMBOL(pcibios_set_irq_routing);
27245 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
27246 index fa0f651..9d8f3d9 100644
27247 --- a/arch/x86/power/cpu.c
27248 +++ b/arch/x86/power/cpu.c
27249 @@ -129,7 +129,7 @@ static void do_fpu_end(void)
27250 static void fix_processor_context(void)
27251 {
27252 int cpu = smp_processor_id();
27253 - struct tss_struct *t = &per_cpu(init_tss, cpu);
27254 + struct tss_struct *t = init_tss + cpu;
27255
27256 set_tss_desc(cpu, t); /*
27257 * This just modifies memory; should not be
27258 @@ -139,7 +139,9 @@ static void fix_processor_context(void)
27259 */
27260
27261 #ifdef CONFIG_X86_64
27262 + pax_open_kernel();
27263 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
27264 + pax_close_kernel();
27265
27266 syscall_init(); /* This sets MSR_*STAR and related */
27267 #endif
27268 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
27269 index dd78ef6..f9d928d 100644
27270 --- a/arch/x86/vdso/Makefile
27271 +++ b/arch/x86/vdso/Makefile
27272 @@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
27273 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
27274 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
27275
27276 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
27277 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
27278 GCOV_PROFILE := n
27279
27280 #
27281 diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
27282 index ee55754..0013b2e 100644
27283 --- a/arch/x86/vdso/vclock_gettime.c
27284 +++ b/arch/x86/vdso/vclock_gettime.c
27285 @@ -22,24 +22,48 @@
27286 #include <asm/hpet.h>
27287 #include <asm/unistd.h>
27288 #include <asm/io.h>
27289 +#include <asm/fixmap.h>
27290 #include "vextern.h"
27291
27292 #define gtod vdso_vsyscall_gtod_data
27293
27294 +notrace noinline long __vdso_fallback_time(long *t)
27295 +{
27296 + long secs;
27297 + asm volatile("syscall"
27298 + : "=a" (secs)
27299 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
27300 + return secs;
27301 +}
27302 +
27303 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
27304 {
27305 long ret;
27306 asm("syscall" : "=a" (ret) :
27307 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
27308 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
27309 return ret;
27310 }
27311
27312 +notrace static inline cycle_t __vdso_vread_hpet(void)
27313 +{
27314 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
27315 +}
27316 +
27317 +notrace static inline cycle_t __vdso_vread_tsc(void)
27318 +{
27319 + cycle_t ret = (cycle_t)vget_cycles();
27320 +
27321 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
27322 +}
27323 +
27324 notrace static inline long vgetns(void)
27325 {
27326 long v;
27327 - cycles_t (*vread)(void);
27328 - vread = gtod->clock.vread;
27329 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
27330 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
27331 + v = __vdso_vread_tsc();
27332 + else
27333 + v = __vdso_vread_hpet();
27334 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
27335 return (v * gtod->clock.mult) >> gtod->clock.shift;
27336 }
27337
27338 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
27339
27340 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
27341 {
27342 - if (likely(gtod->sysctl_enabled))
27343 + if (likely(gtod->sysctl_enabled &&
27344 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
27345 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
27346 switch (clock) {
27347 case CLOCK_REALTIME:
27348 if (likely(gtod->clock.vread))
27349 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
27350 int clock_gettime(clockid_t, struct timespec *)
27351 __attribute__((weak, alias("__vdso_clock_gettime")));
27352
27353 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
27354 +{
27355 + long ret;
27356 + asm("syscall" : "=a" (ret) :
27357 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
27358 + return ret;
27359 +}
27360 +
27361 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
27362 {
27363 - long ret;
27364 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
27365 + if (likely(gtod->sysctl_enabled &&
27366 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
27367 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
27368 + {
27369 if (likely(tv != NULL)) {
27370 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
27371 offsetof(struct timespec, tv_nsec) ||
27372 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
27373 }
27374 return 0;
27375 }
27376 - asm("syscall" : "=a" (ret) :
27377 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
27378 - return ret;
27379 + return __vdso_fallback_gettimeofday(tv, tz);
27380 }
27381 int gettimeofday(struct timeval *, struct timezone *)
27382 __attribute__((weak, alias("__vdso_gettimeofday")));
27383 diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
27384 index 4e5dd3b..00ba15e 100644
27385 --- a/arch/x86/vdso/vdso.lds.S
27386 +++ b/arch/x86/vdso/vdso.lds.S
27387 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
27388 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
27389 #include "vextern.h"
27390 #undef VEXTERN
27391 +
27392 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
27393 +VEXTERN(fallback_gettimeofday)
27394 +VEXTERN(fallback_time)
27395 +VEXTERN(getcpu)
27396 +#undef VEXTERN
27397 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
27398 index 58bc00f..d53fb48 100644
27399 --- a/arch/x86/vdso/vdso32-setup.c
27400 +++ b/arch/x86/vdso/vdso32-setup.c
27401 @@ -25,6 +25,7 @@
27402 #include <asm/tlbflush.h>
27403 #include <asm/vdso.h>
27404 #include <asm/proto.h>
27405 +#include <asm/mman.h>
27406
27407 enum {
27408 VDSO_DISABLED = 0,
27409 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
27410 void enable_sep_cpu(void)
27411 {
27412 int cpu = get_cpu();
27413 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
27414 + struct tss_struct *tss = init_tss + cpu;
27415
27416 if (!boot_cpu_has(X86_FEATURE_SEP)) {
27417 put_cpu();
27418 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
27419 gate_vma.vm_start = FIXADDR_USER_START;
27420 gate_vma.vm_end = FIXADDR_USER_END;
27421 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
27422 - gate_vma.vm_page_prot = __P101;
27423 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
27424 /*
27425 * Make sure the vDSO gets into every core dump.
27426 * Dumping its contents makes post-mortem fully interpretable later
27427 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27428 if (compat)
27429 addr = VDSO_HIGH_BASE;
27430 else {
27431 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
27432 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
27433 if (IS_ERR_VALUE(addr)) {
27434 ret = addr;
27435 goto up_fail;
27436 }
27437 }
27438
27439 - current->mm->context.vdso = (void *)addr;
27440 + current->mm->context.vdso = addr;
27441
27442 if (compat_uses_vma || !compat) {
27443 /*
27444 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27445 }
27446
27447 current_thread_info()->sysenter_return =
27448 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
27449 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
27450
27451 up_fail:
27452 if (ret)
27453 - current->mm->context.vdso = NULL;
27454 + current->mm->context.vdso = 0;
27455
27456 up_write(&mm->mmap_sem);
27457
27458 @@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
27459
27460 const char *arch_vma_name(struct vm_area_struct *vma)
27461 {
27462 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
27463 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
27464 return "[vdso]";
27465 +
27466 +#ifdef CONFIG_PAX_SEGMEXEC
27467 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
27468 + return "[vdso]";
27469 +#endif
27470 +
27471 return NULL;
27472 }
27473
27474 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
27475 struct mm_struct *mm = tsk->mm;
27476
27477 /* Check to see if this task was created in compat vdso mode */
27478 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
27479 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
27480 return &gate_vma;
27481 return NULL;
27482 }
27483 diff --git a/arch/x86/vdso/vextern.h b/arch/x86/vdso/vextern.h
27484 index 1683ba2..48d07f3 100644
27485 --- a/arch/x86/vdso/vextern.h
27486 +++ b/arch/x86/vdso/vextern.h
27487 @@ -11,6 +11,5 @@
27488 put into vextern.h and be referenced as a pointer with vdso prefix.
27489 The main kernel later fills in the values. */
27490
27491 -VEXTERN(jiffies)
27492 VEXTERN(vgetcpu_mode)
27493 VEXTERN(vsyscall_gtod_data)
27494 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
27495 index 21e1aeb..2c0b3c4 100644
27496 --- a/arch/x86/vdso/vma.c
27497 +++ b/arch/x86/vdso/vma.c
27498 @@ -17,8 +17,6 @@
27499 #include "vextern.h" /* Just for VMAGIC. */
27500 #undef VEXTERN
27501
27502 -unsigned int __read_mostly vdso_enabled = 1;
27503 -
27504 extern char vdso_start[], vdso_end[];
27505 extern unsigned short vdso_sync_cpuid;
27506
27507 @@ -27,10 +25,8 @@ static unsigned vdso_size;
27508
27509 static inline void *var_ref(void *p, char *name)
27510 {
27511 - if (*(void **)p != (void *)VMAGIC) {
27512 - printk("VDSO: variable %s broken\n", name);
27513 - vdso_enabled = 0;
27514 - }
27515 + if (*(void **)p != (void *)VMAGIC)
27516 + panic("VDSO: variable %s broken\n", name);
27517 return p;
27518 }
27519
27520 @@ -57,21 +53,18 @@ static int __init init_vdso_vars(void)
27521 if (!vbase)
27522 goto oom;
27523
27524 - if (memcmp(vbase, "\177ELF", 4)) {
27525 - printk("VDSO: I'm broken; not ELF\n");
27526 - vdso_enabled = 0;
27527 - }
27528 + if (memcmp(vbase, ELFMAG, SELFMAG))
27529 + panic("VDSO: I'm broken; not ELF\n");
27530
27531 #define VEXTERN(x) \
27532 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
27533 #include "vextern.h"
27534 #undef VEXTERN
27535 + vunmap(vbase);
27536 return 0;
27537
27538 oom:
27539 - printk("Cannot allocate vdso\n");
27540 - vdso_enabled = 0;
27541 - return -ENOMEM;
27542 + panic("Cannot allocate vdso\n");
27543 }
27544 __initcall(init_vdso_vars);
27545
27546 @@ -102,13 +95,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
27547 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27548 {
27549 struct mm_struct *mm = current->mm;
27550 - unsigned long addr;
27551 + unsigned long addr = 0;
27552 int ret;
27553
27554 - if (!vdso_enabled)
27555 - return 0;
27556 -
27557 down_write(&mm->mmap_sem);
27558 +
27559 +#ifdef CONFIG_PAX_RANDMMAP
27560 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27561 +#endif
27562 +
27563 addr = vdso_addr(mm->start_stack, vdso_size);
27564 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
27565 if (IS_ERR_VALUE(addr)) {
27566 @@ -116,7 +111,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27567 goto up_fail;
27568 }
27569
27570 - current->mm->context.vdso = (void *)addr;
27571 + current->mm->context.vdso = addr;
27572
27573 ret = install_special_mapping(mm, addr, vdso_size,
27574 VM_READ|VM_EXEC|
27575 @@ -124,7 +119,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27576 VM_ALWAYSDUMP,
27577 vdso_pages);
27578 if (ret) {
27579 - current->mm->context.vdso = NULL;
27580 + current->mm->context.vdso = 0;
27581 goto up_fail;
27582 }
27583
27584 @@ -132,10 +127,3 @@ up_fail:
27585 up_write(&mm->mmap_sem);
27586 return ret;
27587 }
27588 -
27589 -static __init int vdso_setup(char *s)
27590 -{
27591 - vdso_enabled = simple_strtoul(s, NULL, 0);
27592 - return 0;
27593 -}
27594 -__setup("vdso=", vdso_setup);
27595 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
27596 index 0087b00..eecb34f 100644
27597 --- a/arch/x86/xen/enlighten.c
27598 +++ b/arch/x86/xen/enlighten.c
27599 @@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
27600
27601 struct shared_info xen_dummy_shared_info;
27602
27603 -void *xen_initial_gdt;
27604 -
27605 /*
27606 * Point at some empty memory to start with. We map the real shared_info
27607 * page as soon as fixmap is up and running.
27608 @@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
27609
27610 preempt_disable();
27611
27612 - start = __get_cpu_var(idt_desc).address;
27613 + start = (unsigned long)__get_cpu_var(idt_desc).address;
27614 end = start + __get_cpu_var(idt_desc).size + 1;
27615
27616 xen_mc_flush();
27617 @@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic_ops __initdata = {
27618 #endif
27619 };
27620
27621 -static void xen_reboot(int reason)
27622 +static __noreturn void xen_reboot(int reason)
27623 {
27624 struct sched_shutdown r = { .reason = reason };
27625
27626 @@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
27627 BUG();
27628 }
27629
27630 -static void xen_restart(char *msg)
27631 +static __noreturn void xen_restart(char *msg)
27632 {
27633 xen_reboot(SHUTDOWN_reboot);
27634 }
27635
27636 -static void xen_emergency_restart(void)
27637 +static __noreturn void xen_emergency_restart(void)
27638 {
27639 xen_reboot(SHUTDOWN_reboot);
27640 }
27641
27642 -static void xen_machine_halt(void)
27643 +static __noreturn void xen_machine_halt(void)
27644 {
27645 xen_reboot(SHUTDOWN_poweroff);
27646 }
27647 @@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(void)
27648 */
27649 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
27650
27651 -#ifdef CONFIG_X86_64
27652 /* Work out if we support NX */
27653 - check_efer();
27654 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27655 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
27656 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
27657 + unsigned l, h;
27658 +
27659 +#ifdef CONFIG_X86_PAE
27660 + nx_enabled = 1;
27661 +#endif
27662 + __supported_pte_mask |= _PAGE_NX;
27663 + rdmsr(MSR_EFER, l, h);
27664 + l |= EFER_NX;
27665 + wrmsr(MSR_EFER, l, h);
27666 + }
27667 #endif
27668
27669 xen_setup_features();
27670 @@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(void)
27671
27672 machine_ops = xen_machine_ops;
27673
27674 - /*
27675 - * The only reliable way to retain the initial address of the
27676 - * percpu gdt_page is to remember it here, so we can go and
27677 - * mark it RW later, when the initial percpu area is freed.
27678 - */
27679 - xen_initial_gdt = &per_cpu(gdt_page, 0);
27680 -
27681 xen_smp_init();
27682
27683 pgd = (pgd_t *)xen_start_info->pt_base;
27684 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
27685 index 3f90a2c..2c2ad84 100644
27686 --- a/arch/x86/xen/mmu.c
27687 +++ b/arch/x86/xen/mmu.c
27688 @@ -1719,6 +1719,9 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
27689 convert_pfn_mfn(init_level4_pgt);
27690 convert_pfn_mfn(level3_ident_pgt);
27691 convert_pfn_mfn(level3_kernel_pgt);
27692 + convert_pfn_mfn(level3_vmalloc_start_pgt);
27693 + convert_pfn_mfn(level3_vmalloc_end_pgt);
27694 + convert_pfn_mfn(level3_vmemmap_pgt);
27695
27696 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
27697 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
27698 @@ -1737,7 +1740,11 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
27699 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
27700 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
27701 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
27702 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
27703 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
27704 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
27705 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
27706 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
27707 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
27708 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
27709
27710 @@ -1860,6 +1867,7 @@ static __init void xen_post_allocator_init(void)
27711 pv_mmu_ops.set_pud = xen_set_pud;
27712 #if PAGETABLE_LEVELS == 4
27713 pv_mmu_ops.set_pgd = xen_set_pgd;
27714 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
27715 #endif
27716
27717 /* This will work as long as patching hasn't happened yet
27718 @@ -1946,6 +1954,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
27719 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
27720 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
27721 .set_pgd = xen_set_pgd_hyper,
27722 + .set_pgd_batched = xen_set_pgd_hyper,
27723
27724 .alloc_pud = xen_alloc_pmd_init,
27725 .release_pud = xen_release_pmd_init,
27726 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
27727 index a96204a..fca9b8e 100644
27728 --- a/arch/x86/xen/smp.c
27729 +++ b/arch/x86/xen/smp.c
27730 @@ -168,11 +168,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
27731 {
27732 BUG_ON(smp_processor_id() != 0);
27733 native_smp_prepare_boot_cpu();
27734 -
27735 - /* We've switched to the "real" per-cpu gdt, so make sure the
27736 - old memory can be recycled */
27737 - make_lowmem_page_readwrite(xen_initial_gdt);
27738 -
27739 xen_setup_vcpu_info_placement();
27740 }
27741
27742 @@ -241,12 +236,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
27743 gdt = get_cpu_gdt_table(cpu);
27744
27745 ctxt->flags = VGCF_IN_KERNEL;
27746 - ctxt->user_regs.ds = __USER_DS;
27747 - ctxt->user_regs.es = __USER_DS;
27748 + ctxt->user_regs.ds = __KERNEL_DS;
27749 + ctxt->user_regs.es = __KERNEL_DS;
27750 ctxt->user_regs.ss = __KERNEL_DS;
27751 #ifdef CONFIG_X86_32
27752 ctxt->user_regs.fs = __KERNEL_PERCPU;
27753 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
27754 + savesegment(gs, ctxt->user_regs.gs);
27755 #else
27756 ctxt->gs_base_kernel = per_cpu_offset(cpu);
27757 #endif
27758 @@ -297,13 +292,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
27759 int rc;
27760
27761 per_cpu(current_task, cpu) = idle;
27762 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
27763 #ifdef CONFIG_X86_32
27764 irq_ctx_init(cpu);
27765 #else
27766 clear_tsk_thread_flag(idle, TIF_FORK);
27767 - per_cpu(kernel_stack, cpu) =
27768 - (unsigned long)task_stack_page(idle) -
27769 - KERNEL_STACK_OFFSET + THREAD_SIZE;
27770 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27771 #endif
27772 xen_setup_runstate_info(cpu);
27773 xen_setup_timer(cpu);
27774 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
27775 index 9a95a9c..4f39e774 100644
27776 --- a/arch/x86/xen/xen-asm_32.S
27777 +++ b/arch/x86/xen/xen-asm_32.S
27778 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
27779 ESP_OFFSET=4 # bytes pushed onto stack
27780
27781 /*
27782 - * Store vcpu_info pointer for easy access. Do it this way to
27783 - * avoid having to reload %fs
27784 + * Store vcpu_info pointer for easy access.
27785 */
27786 #ifdef CONFIG_SMP
27787 - GET_THREAD_INFO(%eax)
27788 - movl TI_cpu(%eax), %eax
27789 - movl __per_cpu_offset(,%eax,4), %eax
27790 - mov per_cpu__xen_vcpu(%eax), %eax
27791 + push %fs
27792 + mov $(__KERNEL_PERCPU), %eax
27793 + mov %eax, %fs
27794 + mov PER_CPU_VAR(xen_vcpu), %eax
27795 + pop %fs
27796 #else
27797 movl per_cpu__xen_vcpu, %eax
27798 #endif
27799 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27800 index 1a5ff24..a187d40 100644
27801 --- a/arch/x86/xen/xen-head.S
27802 +++ b/arch/x86/xen/xen-head.S
27803 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
27804 #ifdef CONFIG_X86_32
27805 mov %esi,xen_start_info
27806 mov $init_thread_union+THREAD_SIZE,%esp
27807 +#ifdef CONFIG_SMP
27808 + movl $cpu_gdt_table,%edi
27809 + movl $__per_cpu_load,%eax
27810 + movw %ax,__KERNEL_PERCPU + 2(%edi)
27811 + rorl $16,%eax
27812 + movb %al,__KERNEL_PERCPU + 4(%edi)
27813 + movb %ah,__KERNEL_PERCPU + 7(%edi)
27814 + movl $__per_cpu_end - 1,%eax
27815 + subl $__per_cpu_start,%eax
27816 + movw %ax,__KERNEL_PERCPU + 0(%edi)
27817 +#endif
27818 #else
27819 mov %rsi,xen_start_info
27820 mov $init_thread_union+THREAD_SIZE,%rsp
27821 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27822 index f9153a3..51eab3d 100644
27823 --- a/arch/x86/xen/xen-ops.h
27824 +++ b/arch/x86/xen/xen-ops.h
27825 @@ -10,8 +10,6 @@
27826 extern const char xen_hypervisor_callback[];
27827 extern const char xen_failsafe_callback[];
27828
27829 -extern void *xen_initial_gdt;
27830 -
27831 struct trap_info;
27832 void xen_copy_trap_info(struct trap_info *traps);
27833
27834 diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
27835 index 525bd3d..ef888b1 100644
27836 --- a/arch/xtensa/variants/dc232b/include/variant/core.h
27837 +++ b/arch/xtensa/variants/dc232b/include/variant/core.h
27838 @@ -119,9 +119,9 @@
27839 ----------------------------------------------------------------------*/
27840
27841 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
27842 -#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
27843 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
27844 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
27845 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27846
27847 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
27848 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
27849 diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
27850 index 2f33760..835e50a 100644
27851 --- a/arch/xtensa/variants/fsf/include/variant/core.h
27852 +++ b/arch/xtensa/variants/fsf/include/variant/core.h
27853 @@ -11,6 +11,7 @@
27854 #ifndef _XTENSA_CORE_H
27855 #define _XTENSA_CORE_H
27856
27857 +#include <linux/const.h>
27858
27859 /****************************************************************************
27860 Parameters Useful for Any Code, USER or PRIVILEGED
27861 @@ -112,9 +113,9 @@
27862 ----------------------------------------------------------------------*/
27863
27864 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27865 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27866 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27867 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27868 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27869
27870 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
27871 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
27872 diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
27873 index af00795..2bb8105 100644
27874 --- a/arch/xtensa/variants/s6000/include/variant/core.h
27875 +++ b/arch/xtensa/variants/s6000/include/variant/core.h
27876 @@ -11,6 +11,7 @@
27877 #ifndef _XTENSA_CORE_CONFIGURATION_H
27878 #define _XTENSA_CORE_CONFIGURATION_H
27879
27880 +#include <linux/const.h>
27881
27882 /****************************************************************************
27883 Parameters Useful for Any Code, USER or PRIVILEGED
27884 @@ -118,9 +119,9 @@
27885 ----------------------------------------------------------------------*/
27886
27887 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27888 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27889 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27890 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27891 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27892
27893 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
27894 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
27895 diff --git a/block/blk-integrity.c b/block/blk-integrity.c
27896 index 15c6308..96e83c2 100644
27897 --- a/block/blk-integrity.c
27898 +++ b/block/blk-integrity.c
27899 @@ -278,7 +278,7 @@ static struct attribute *integrity_attrs[] = {
27900 NULL,
27901 };
27902
27903 -static struct sysfs_ops integrity_ops = {
27904 +static const struct sysfs_ops integrity_ops = {
27905 .show = &integrity_attr_show,
27906 .store = &integrity_attr_store,
27907 };
27908 diff --git a/block/blk-ioc.c b/block/blk-ioc.c
27909 index d4ed600..cbdabb0 100644
27910 --- a/block/blk-ioc.c
27911 +++ b/block/blk-ioc.c
27912 @@ -66,22 +66,22 @@ static void cfq_exit(struct io_context *ioc)
27913 }
27914
27915 /* Called by the exitting task */
27916 -void exit_io_context(void)
27917 +void exit_io_context(struct task_struct *task)
27918 {
27919 struct io_context *ioc;
27920
27921 - task_lock(current);
27922 - ioc = current->io_context;
27923 - current->io_context = NULL;
27924 - task_unlock(current);
27925 + task_lock(task);
27926 + ioc = task->io_context;
27927 + task->io_context = NULL;
27928 + task_unlock(task);
27929
27930 if (atomic_dec_and_test(&ioc->nr_tasks)) {
27931 if (ioc->aic && ioc->aic->exit)
27932 ioc->aic->exit(ioc->aic);
27933 cfq_exit(ioc);
27934
27935 - put_io_context(ioc);
27936 }
27937 + put_io_context(ioc);
27938 }
27939
27940 struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
27941 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27942 index ca56420..f2fc409 100644
27943 --- a/block/blk-iopoll.c
27944 +++ b/block/blk-iopoll.c
27945 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
27946 }
27947 EXPORT_SYMBOL(blk_iopoll_complete);
27948
27949 -static void blk_iopoll_softirq(struct softirq_action *h)
27950 +static void blk_iopoll_softirq(void)
27951 {
27952 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27953 int rearm = 0, budget = blk_iopoll_budget;
27954 diff --git a/block/blk-map.c b/block/blk-map.c
27955 index 30a7e51..0aeec6a 100644
27956 --- a/block/blk-map.c
27957 +++ b/block/blk-map.c
27958 @@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
27959 * direct dma. else, set up kernel bounce buffers
27960 */
27961 uaddr = (unsigned long) ubuf;
27962 - if (blk_rq_aligned(q, ubuf, len) && !map_data)
27963 + if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
27964 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
27965 else
27966 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
27967 @@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
27968 for (i = 0; i < iov_count; i++) {
27969 unsigned long uaddr = (unsigned long)iov[i].iov_base;
27970
27971 + if (!iov[i].iov_len)
27972 + return -EINVAL;
27973 +
27974 if (uaddr & queue_dma_alignment(q)) {
27975 unaligned = 1;
27976 break;
27977 }
27978 - if (!iov[i].iov_len)
27979 - return -EINVAL;
27980 }
27981
27982 if (unaligned || (q->dma_pad_mask & len) || map_data)
27983 @@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
27984 if (!len || !kbuf)
27985 return -EINVAL;
27986
27987 - do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
27988 + do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
27989 if (do_copy)
27990 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27991 else
27992 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27993 index ee9c216..58d410a 100644
27994 --- a/block/blk-softirq.c
27995 +++ b/block/blk-softirq.c
27996 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27997 * Softirq action handler - move entries to local list and loop over them
27998 * while passing them to the queue registered handler.
27999 */
28000 -static void blk_done_softirq(struct softirq_action *h)
28001 +static void blk_done_softirq(void)
28002 {
28003 struct list_head *cpu_list, local_list;
28004
28005 diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
28006 index bb9c5ea..5330d48 100644
28007 --- a/block/blk-sysfs.c
28008 +++ b/block/blk-sysfs.c
28009 @@ -414,7 +414,7 @@ static void blk_release_queue(struct kobject *kobj)
28010 kmem_cache_free(blk_requestq_cachep, q);
28011 }
28012
28013 -static struct sysfs_ops queue_sysfs_ops = {
28014 +static const struct sysfs_ops queue_sysfs_ops = {
28015 .show = queue_attr_show,
28016 .store = queue_attr_store,
28017 };
28018 diff --git a/block/bsg.c b/block/bsg.c
28019 index 7154a7a..08ac2f0 100644
28020 --- a/block/bsg.c
28021 +++ b/block/bsg.c
28022 @@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
28023 struct sg_io_v4 *hdr, struct bsg_device *bd,
28024 fmode_t has_write_perm)
28025 {
28026 + unsigned char tmpcmd[sizeof(rq->__cmd)];
28027 + unsigned char *cmdptr;
28028 +
28029 if (hdr->request_len > BLK_MAX_CDB) {
28030 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
28031 if (!rq->cmd)
28032 return -ENOMEM;
28033 - }
28034 + cmdptr = rq->cmd;
28035 + } else
28036 + cmdptr = tmpcmd;
28037
28038 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
28039 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
28040 hdr->request_len))
28041 return -EFAULT;
28042
28043 + if (cmdptr != rq->cmd)
28044 + memcpy(rq->cmd, cmdptr, hdr->request_len);
28045 +
28046 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
28047 if (blk_verify_command(rq->cmd, has_write_perm))
28048 return -EPERM;
28049 @@ -282,7 +290,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
28050 rq->next_rq = next_rq;
28051 next_rq->cmd_type = rq->cmd_type;
28052
28053 - dxferp = (void*)(unsigned long)hdr->din_xferp;
28054 + dxferp = (void __user *)(unsigned long)hdr->din_xferp;
28055 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
28056 hdr->din_xfer_len, GFP_KERNEL);
28057 if (ret)
28058 @@ -291,10 +299,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
28059
28060 if (hdr->dout_xfer_len) {
28061 dxfer_len = hdr->dout_xfer_len;
28062 - dxferp = (void*)(unsigned long)hdr->dout_xferp;
28063 + dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
28064 } else if (hdr->din_xfer_len) {
28065 dxfer_len = hdr->din_xfer_len;
28066 - dxferp = (void*)(unsigned long)hdr->din_xferp;
28067 + dxferp = (void __user *)(unsigned long)hdr->din_xferp;
28068 } else
28069 dxfer_len = 0;
28070
28071 @@ -436,7 +444,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
28072 int len = min_t(unsigned int, hdr->max_response_len,
28073 rq->sense_len);
28074
28075 - ret = copy_to_user((void*)(unsigned long)hdr->response,
28076 + ret = copy_to_user((void __user *)(unsigned long)hdr->response,
28077 rq->sense, len);
28078 if (!ret)
28079 hdr->response_len = len;
28080 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
28081 index 9bd086c..ca1fc22 100644
28082 --- a/block/compat_ioctl.c
28083 +++ b/block/compat_ioctl.c
28084 @@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
28085 err |= __get_user(f->spec1, &uf->spec1);
28086 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
28087 err |= __get_user(name, &uf->name);
28088 - f->name = compat_ptr(name);
28089 + f->name = (void __force_kernel *)compat_ptr(name);
28090 if (err) {
28091 err = -EFAULT;
28092 goto out;
28093 diff --git a/block/elevator.c b/block/elevator.c
28094 index a847046..75a1746 100644
28095 --- a/block/elevator.c
28096 +++ b/block/elevator.c
28097 @@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, struct attribute *attr,
28098 return error;
28099 }
28100
28101 -static struct sysfs_ops elv_sysfs_ops = {
28102 +static const struct sysfs_ops elv_sysfs_ops = {
28103 .show = elv_attr_show,
28104 .store = elv_attr_store,
28105 };
28106 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
28107 index 2be0a97..bded3fd 100644
28108 --- a/block/scsi_ioctl.c
28109 +++ b/block/scsi_ioctl.c
28110 @@ -221,8 +221,20 @@ EXPORT_SYMBOL(blk_verify_command);
28111 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
28112 struct sg_io_hdr *hdr, fmode_t mode)
28113 {
28114 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
28115 + unsigned char tmpcmd[sizeof(rq->__cmd)];
28116 + unsigned char *cmdptr;
28117 +
28118 + if (rq->cmd != rq->__cmd)
28119 + cmdptr = rq->cmd;
28120 + else
28121 + cmdptr = tmpcmd;
28122 +
28123 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
28124 return -EFAULT;
28125 +
28126 + if (cmdptr != rq->cmd)
28127 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
28128 +
28129 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
28130 return -EPERM;
28131
28132 @@ -431,6 +443,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
28133 int err;
28134 unsigned int in_len, out_len, bytes, opcode, cmdlen;
28135 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
28136 + unsigned char tmpcmd[sizeof(rq->__cmd)];
28137 + unsigned char *cmdptr;
28138
28139 if (!sic)
28140 return -EINVAL;
28141 @@ -464,9 +478,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
28142 */
28143 err = -EFAULT;
28144 rq->cmd_len = cmdlen;
28145 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
28146 +
28147 + if (rq->cmd != rq->__cmd)
28148 + cmdptr = rq->cmd;
28149 + else
28150 + cmdptr = tmpcmd;
28151 +
28152 + if (copy_from_user(cmdptr, sic->data, cmdlen))
28153 goto error;
28154
28155 + if (rq->cmd != cmdptr)
28156 + memcpy(rq->cmd, cmdptr, cmdlen);
28157 +
28158 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
28159 goto error;
28160
28161 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
28162 index 3533582..f143117 100644
28163 --- a/crypto/cryptd.c
28164 +++ b/crypto/cryptd.c
28165 @@ -50,7 +50,7 @@ struct cryptd_blkcipher_ctx {
28166
28167 struct cryptd_blkcipher_request_ctx {
28168 crypto_completion_t complete;
28169 -};
28170 +} __no_const;
28171
28172 struct cryptd_hash_ctx {
28173 struct crypto_shash *child;
28174 diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
28175 index a90d260..7a9765e 100644
28176 --- a/crypto/gf128mul.c
28177 +++ b/crypto/gf128mul.c
28178 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128 *b)
28179 for (i = 0; i < 7; ++i)
28180 gf128mul_x_lle(&p[i + 1], &p[i]);
28181
28182 - memset(r, 0, sizeof(r));
28183 + memset(r, 0, sizeof(*r));
28184 for (i = 0;;) {
28185 u8 ch = ((u8 *)b)[15 - i];
28186
28187 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128 *b)
28188 for (i = 0; i < 7; ++i)
28189 gf128mul_x_bbe(&p[i + 1], &p[i]);
28190
28191 - memset(r, 0, sizeof(r));
28192 + memset(r, 0, sizeof(*r));
28193 for (i = 0;;) {
28194 u8 ch = ((u8 *)b)[i];
28195
28196 diff --git a/crypto/serpent.c b/crypto/serpent.c
28197 index b651a55..023297d 100644
28198 --- a/crypto/serpent.c
28199 +++ b/crypto/serpent.c
28200 @@ -21,6 +21,7 @@
28201 #include <asm/byteorder.h>
28202 #include <linux/crypto.h>
28203 #include <linux/types.h>
28204 +#include <linux/sched.h>
28205
28206 /* Key is padded to the maximum of 256 bits before round key generation.
28207 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
28208 @@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
28209 u32 r0,r1,r2,r3,r4;
28210 int i;
28211
28212 + pax_track_stack();
28213 +
28214 /* Copy key, add padding */
28215
28216 for (i = 0; i < keylen; ++i)
28217 diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
28218 index 0d2cdb8..d8de48d 100644
28219 --- a/drivers/acpi/acpi_pad.c
28220 +++ b/drivers/acpi/acpi_pad.c
28221 @@ -30,7 +30,7 @@
28222 #include <acpi/acpi_bus.h>
28223 #include <acpi/acpi_drivers.h>
28224
28225 -#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
28226 +#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
28227 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
28228 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
28229 static DEFINE_MUTEX(isolated_cpus_lock);
28230 diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
28231 index 3f4602b..2e41d36 100644
28232 --- a/drivers/acpi/battery.c
28233 +++ b/drivers/acpi/battery.c
28234 @@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
28235 }
28236
28237 static struct battery_file {
28238 - struct file_operations ops;
28239 + const struct file_operations ops;
28240 mode_t mode;
28241 const char *name;
28242 } acpi_battery_file[] = {
28243 diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
28244 index 7338b6a..82f0257 100644
28245 --- a/drivers/acpi/dock.c
28246 +++ b/drivers/acpi/dock.c
28247 @@ -77,7 +77,7 @@ struct dock_dependent_device {
28248 struct list_head list;
28249 struct list_head hotplug_list;
28250 acpi_handle handle;
28251 - struct acpi_dock_ops *ops;
28252 + const struct acpi_dock_ops *ops;
28253 void *context;
28254 };
28255
28256 @@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
28257 * the dock driver after _DCK is executed.
28258 */
28259 int
28260 -register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
28261 +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
28262 void *context)
28263 {
28264 struct dock_dependent_device *dd;
28265 diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
28266 index 7c1c59e..2993595 100644
28267 --- a/drivers/acpi/osl.c
28268 +++ b/drivers/acpi/osl.c
28269 @@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
28270 void __iomem *virt_addr;
28271
28272 virt_addr = ioremap(phys_addr, width);
28273 + if (!virt_addr)
28274 + return AE_NO_MEMORY;
28275 if (!value)
28276 value = &dummy;
28277
28278 @@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
28279 void __iomem *virt_addr;
28280
28281 virt_addr = ioremap(phys_addr, width);
28282 + if (!virt_addr)
28283 + return AE_NO_MEMORY;
28284
28285 switch (width) {
28286 case 8:
28287 diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
28288 index c216062..eec10d2 100644
28289 --- a/drivers/acpi/power_meter.c
28290 +++ b/drivers/acpi/power_meter.c
28291 @@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
28292 return res;
28293
28294 temp /= 1000;
28295 - if (temp < 0)
28296 - return -EINVAL;
28297
28298 mutex_lock(&resource->lock);
28299 resource->trip[attr->index - 7] = temp;
28300 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
28301 index d0d25e2..961643d 100644
28302 --- a/drivers/acpi/proc.c
28303 +++ b/drivers/acpi/proc.c
28304 @@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct file *file,
28305 size_t count, loff_t * ppos)
28306 {
28307 struct list_head *node, *next;
28308 - char strbuf[5];
28309 - char str[5] = "";
28310 - unsigned int len = count;
28311 + char strbuf[5] = {0};
28312 struct acpi_device *found_dev = NULL;
28313
28314 - if (len > 4)
28315 - len = 4;
28316 - if (len < 0)
28317 - return -EFAULT;
28318 + if (count > 4)
28319 + count = 4;
28320
28321 - if (copy_from_user(strbuf, buffer, len))
28322 + if (copy_from_user(strbuf, buffer, count))
28323 return -EFAULT;
28324 - strbuf[len] = '\0';
28325 - sscanf(strbuf, "%s", str);
28326 + strbuf[count] = '\0';
28327
28328 mutex_lock(&acpi_device_lock);
28329 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
28330 @@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct file *file,
28331 if (!dev->wakeup.flags.valid)
28332 continue;
28333
28334 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
28335 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
28336 dev->wakeup.state.enabled =
28337 dev->wakeup.state.enabled ? 0 : 1;
28338 found_dev = dev;
28339 diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
28340 index 7102474..de8ad22 100644
28341 --- a/drivers/acpi/processor_core.c
28342 +++ b/drivers/acpi/processor_core.c
28343 @@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
28344 return 0;
28345 }
28346
28347 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
28348 + BUG_ON(pr->id >= nr_cpu_ids);
28349
28350 /*
28351 * Buggy BIOS check
28352 diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
28353 index d933980..5761f13 100644
28354 --- a/drivers/acpi/sbshc.c
28355 +++ b/drivers/acpi/sbshc.c
28356 @@ -17,7 +17,7 @@
28357
28358 #define PREFIX "ACPI: "
28359
28360 -#define ACPI_SMB_HC_CLASS "smbus_host_controller"
28361 +#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
28362 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
28363
28364 struct acpi_smb_hc {
28365 diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
28366 index 0458094..6978e7b 100644
28367 --- a/drivers/acpi/sleep.c
28368 +++ b/drivers/acpi/sleep.c
28369 @@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state)
28370 }
28371 }
28372
28373 -static struct platform_suspend_ops acpi_suspend_ops = {
28374 +static const struct platform_suspend_ops acpi_suspend_ops = {
28375 .valid = acpi_suspend_state_valid,
28376 .begin = acpi_suspend_begin,
28377 .prepare_late = acpi_pm_prepare,
28378 @@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state)
28379 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
28380 * been requested.
28381 */
28382 -static struct platform_suspend_ops acpi_suspend_ops_old = {
28383 +static const struct platform_suspend_ops acpi_suspend_ops_old = {
28384 .valid = acpi_suspend_state_valid,
28385 .begin = acpi_suspend_begin_old,
28386 .prepare_late = acpi_pm_disable_gpes,
28387 @@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
28388 acpi_enable_all_runtime_gpes();
28389 }
28390
28391 -static struct platform_hibernation_ops acpi_hibernation_ops = {
28392 +static const struct platform_hibernation_ops acpi_hibernation_ops = {
28393 .begin = acpi_hibernation_begin,
28394 .end = acpi_pm_end,
28395 .pre_snapshot = acpi_hibernation_pre_snapshot,
28396 @@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot_old(void)
28397 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
28398 * been requested.
28399 */
28400 -static struct platform_hibernation_ops acpi_hibernation_ops_old = {
28401 +static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
28402 .begin = acpi_hibernation_begin_old,
28403 .end = acpi_pm_end,
28404 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
28405 diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
28406 index 05dff63..b662ab7 100644
28407 --- a/drivers/acpi/video.c
28408 +++ b/drivers/acpi/video.c
28409 @@ -359,7 +359,7 @@ static int acpi_video_set_brightness(struct backlight_device *bd)
28410 vd->brightness->levels[request_level]);
28411 }
28412
28413 -static struct backlight_ops acpi_backlight_ops = {
28414 +static const struct backlight_ops acpi_backlight_ops = {
28415 .get_brightness = acpi_video_get_brightness,
28416 .update_status = acpi_video_set_brightness,
28417 };
28418 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
28419 index 6787aab..23ffb0e 100644
28420 --- a/drivers/ata/ahci.c
28421 +++ b/drivers/ata/ahci.c
28422 @@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sht = {
28423 .sdev_attrs = ahci_sdev_attrs,
28424 };
28425
28426 -static struct ata_port_operations ahci_ops = {
28427 +static const struct ata_port_operations ahci_ops = {
28428 .inherits = &sata_pmp_port_ops,
28429
28430 .qc_defer = sata_pmp_qc_defer_cmd_switch,
28431 @@ -424,17 +424,17 @@ static struct ata_port_operations ahci_ops = {
28432 .port_stop = ahci_port_stop,
28433 };
28434
28435 -static struct ata_port_operations ahci_vt8251_ops = {
28436 +static const struct ata_port_operations ahci_vt8251_ops = {
28437 .inherits = &ahci_ops,
28438 .hardreset = ahci_vt8251_hardreset,
28439 };
28440
28441 -static struct ata_port_operations ahci_p5wdh_ops = {
28442 +static const struct ata_port_operations ahci_p5wdh_ops = {
28443 .inherits = &ahci_ops,
28444 .hardreset = ahci_p5wdh_hardreset,
28445 };
28446
28447 -static struct ata_port_operations ahci_sb600_ops = {
28448 +static const struct ata_port_operations ahci_sb600_ops = {
28449 .inherits = &ahci_ops,
28450 .softreset = ahci_sb600_softreset,
28451 .pmp_softreset = ahci_sb600_softreset,
28452 diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
28453 index 99e7196..4968c77 100644
28454 --- a/drivers/ata/ata_generic.c
28455 +++ b/drivers/ata/ata_generic.c
28456 @@ -104,7 +104,7 @@ static struct scsi_host_template generic_sht = {
28457 ATA_BMDMA_SHT(DRV_NAME),
28458 };
28459
28460 -static struct ata_port_operations generic_port_ops = {
28461 +static const struct ata_port_operations generic_port_ops = {
28462 .inherits = &ata_bmdma_port_ops,
28463 .cable_detect = ata_cable_unknown,
28464 .set_mode = generic_set_mode,
28465 diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
28466 index c33591d..000c121 100644
28467 --- a/drivers/ata/ata_piix.c
28468 +++ b/drivers/ata/ata_piix.c
28469 @@ -318,7 +318,7 @@ static struct scsi_host_template piix_sht = {
28470 ATA_BMDMA_SHT(DRV_NAME),
28471 };
28472
28473 -static struct ata_port_operations piix_pata_ops = {
28474 +static const struct ata_port_operations piix_pata_ops = {
28475 .inherits = &ata_bmdma32_port_ops,
28476 .cable_detect = ata_cable_40wire,
28477 .set_piomode = piix_set_piomode,
28478 @@ -326,22 +326,22 @@ static struct ata_port_operations piix_pata_ops = {
28479 .prereset = piix_pata_prereset,
28480 };
28481
28482 -static struct ata_port_operations piix_vmw_ops = {
28483 +static const struct ata_port_operations piix_vmw_ops = {
28484 .inherits = &piix_pata_ops,
28485 .bmdma_status = piix_vmw_bmdma_status,
28486 };
28487
28488 -static struct ata_port_operations ich_pata_ops = {
28489 +static const struct ata_port_operations ich_pata_ops = {
28490 .inherits = &piix_pata_ops,
28491 .cable_detect = ich_pata_cable_detect,
28492 .set_dmamode = ich_set_dmamode,
28493 };
28494
28495 -static struct ata_port_operations piix_sata_ops = {
28496 +static const struct ata_port_operations piix_sata_ops = {
28497 .inherits = &ata_bmdma_port_ops,
28498 };
28499
28500 -static struct ata_port_operations piix_sidpr_sata_ops = {
28501 +static const struct ata_port_operations piix_sidpr_sata_ops = {
28502 .inherits = &piix_sata_ops,
28503 .hardreset = sata_std_hardreset,
28504 .scr_read = piix_sidpr_scr_read,
28505 diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
28506 index b0882cd..c295d65 100644
28507 --- a/drivers/ata/libata-acpi.c
28508 +++ b/drivers/ata/libata-acpi.c
28509 @@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
28510 ata_acpi_uevent(dev->link->ap, dev, event);
28511 }
28512
28513 -static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
28514 +static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
28515 .handler = ata_acpi_dev_notify_dock,
28516 .uevent = ata_acpi_dev_uevent,
28517 };
28518
28519 -static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
28520 +static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
28521 .handler = ata_acpi_ap_notify_dock,
28522 .uevent = ata_acpi_ap_uevent,
28523 };
28524 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
28525 index d4f7f99..94f603e 100644
28526 --- a/drivers/ata/libata-core.c
28527 +++ b/drivers/ata/libata-core.c
28528 @@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
28529 struct ata_port *ap;
28530 unsigned int tag;
28531
28532 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
28533 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
28534 ap = qc->ap;
28535
28536 qc->flags = 0;
28537 @@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
28538 struct ata_port *ap;
28539 struct ata_link *link;
28540
28541 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
28542 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
28543 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
28544 ap = qc->ap;
28545 link = qc->dev->link;
28546 @@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device *gendev, void *res)
28547 * LOCKING:
28548 * None.
28549 */
28550 -static void ata_finalize_port_ops(struct ata_port_operations *ops)
28551 +static void ata_finalize_port_ops(const struct ata_port_operations *ops)
28552 {
28553 static DEFINE_SPINLOCK(lock);
28554 const struct ata_port_operations *cur;
28555 @@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
28556 return;
28557
28558 spin_lock(&lock);
28559 + pax_open_kernel();
28560
28561 for (cur = ops->inherits; cur; cur = cur->inherits) {
28562 void **inherit = (void **)cur;
28563 @@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
28564 if (IS_ERR(*pp))
28565 *pp = NULL;
28566
28567 - ops->inherits = NULL;
28568 + *(struct ata_port_operations **)&ops->inherits = NULL;
28569
28570 + pax_close_kernel();
28571 spin_unlock(&lock);
28572 }
28573
28574 @@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host)
28575 */
28576 /* KILLME - the only user left is ipr */
28577 void ata_host_init(struct ata_host *host, struct device *dev,
28578 - unsigned long flags, struct ata_port_operations *ops)
28579 + unsigned long flags, const struct ata_port_operations *ops)
28580 {
28581 spin_lock_init(&host->lock);
28582 host->dev = dev;
28583 @@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(struct ata_port *ap)
28584 /* truly dummy */
28585 }
28586
28587 -struct ata_port_operations ata_dummy_port_ops = {
28588 +const struct ata_port_operations ata_dummy_port_ops = {
28589 .qc_prep = ata_noop_qc_prep,
28590 .qc_issue = ata_dummy_qc_issue,
28591 .error_handler = ata_dummy_error_handler,
28592 diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
28593 index e5bdb9b..45a8e72 100644
28594 --- a/drivers/ata/libata-eh.c
28595 +++ b/drivers/ata/libata-eh.c
28596 @@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
28597 {
28598 struct ata_link *link;
28599
28600 + pax_track_stack();
28601 +
28602 ata_for_each_link(link, ap, HOST_FIRST)
28603 ata_eh_link_report(link);
28604 }
28605 @@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
28606 */
28607 void ata_std_error_handler(struct ata_port *ap)
28608 {
28609 - struct ata_port_operations *ops = ap->ops;
28610 + const struct ata_port_operations *ops = ap->ops;
28611 ata_reset_fn_t hardreset = ops->hardreset;
28612
28613 /* ignore built-in hardreset if SCR access is not available */
28614 diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
28615 index 51f0ffb..19ce3e3 100644
28616 --- a/drivers/ata/libata-pmp.c
28617 +++ b/drivers/ata/libata-pmp.c
28618 @@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries)
28619 */
28620 static int sata_pmp_eh_recover(struct ata_port *ap)
28621 {
28622 - struct ata_port_operations *ops = ap->ops;
28623 + const struct ata_port_operations *ops = ap->ops;
28624 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
28625 struct ata_link *pmp_link = &ap->link;
28626 struct ata_device *pmp_dev = pmp_link->device;
28627 diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
28628 index d8f35fe..288180a 100644
28629 --- a/drivers/ata/pata_acpi.c
28630 +++ b/drivers/ata/pata_acpi.c
28631 @@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_sht = {
28632 ATA_BMDMA_SHT(DRV_NAME),
28633 };
28634
28635 -static struct ata_port_operations pacpi_ops = {
28636 +static const struct ata_port_operations pacpi_ops = {
28637 .inherits = &ata_bmdma_port_ops,
28638 .qc_issue = pacpi_qc_issue,
28639 .cable_detect = pacpi_cable_detect,
28640 diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
28641 index 9434114..1f2f364 100644
28642 --- a/drivers/ata/pata_ali.c
28643 +++ b/drivers/ata/pata_ali.c
28644 @@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht = {
28645 * Port operations for PIO only ALi
28646 */
28647
28648 -static struct ata_port_operations ali_early_port_ops = {
28649 +static const struct ata_port_operations ali_early_port_ops = {
28650 .inherits = &ata_sff_port_ops,
28651 .cable_detect = ata_cable_40wire,
28652 .set_piomode = ali_set_piomode,
28653 @@ -382,7 +382,7 @@ static const struct ata_port_operations ali_dma_base_ops = {
28654 * Port operations for DMA capable ALi without cable
28655 * detect
28656 */
28657 -static struct ata_port_operations ali_20_port_ops = {
28658 +static const struct ata_port_operations ali_20_port_ops = {
28659 .inherits = &ali_dma_base_ops,
28660 .cable_detect = ata_cable_40wire,
28661 .mode_filter = ali_20_filter,
28662 @@ -393,7 +393,7 @@ static struct ata_port_operations ali_20_port_ops = {
28663 /*
28664 * Port operations for DMA capable ALi with cable detect
28665 */
28666 -static struct ata_port_operations ali_c2_port_ops = {
28667 +static const struct ata_port_operations ali_c2_port_ops = {
28668 .inherits = &ali_dma_base_ops,
28669 .check_atapi_dma = ali_check_atapi_dma,
28670 .cable_detect = ali_c2_cable_detect,
28671 @@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2_port_ops = {
28672 /*
28673 * Port operations for DMA capable ALi with cable detect
28674 */
28675 -static struct ata_port_operations ali_c4_port_ops = {
28676 +static const struct ata_port_operations ali_c4_port_ops = {
28677 .inherits = &ali_dma_base_ops,
28678 .check_atapi_dma = ali_check_atapi_dma,
28679 .cable_detect = ali_c2_cable_detect,
28680 @@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4_port_ops = {
28681 /*
28682 * Port operations for DMA capable ALi with cable detect and LBA48
28683 */
28684 -static struct ata_port_operations ali_c5_port_ops = {
28685 +static const struct ata_port_operations ali_c5_port_ops = {
28686 .inherits = &ali_dma_base_ops,
28687 .check_atapi_dma = ali_check_atapi_dma,
28688 .dev_config = ali_warn_atapi_dma,
28689 diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
28690 index 567f3f7..c8ee0da 100644
28691 --- a/drivers/ata/pata_amd.c
28692 +++ b/drivers/ata/pata_amd.c
28693 @@ -397,28 +397,28 @@ static const struct ata_port_operations amd_base_port_ops = {
28694 .prereset = amd_pre_reset,
28695 };
28696
28697 -static struct ata_port_operations amd33_port_ops = {
28698 +static const struct ata_port_operations amd33_port_ops = {
28699 .inherits = &amd_base_port_ops,
28700 .cable_detect = ata_cable_40wire,
28701 .set_piomode = amd33_set_piomode,
28702 .set_dmamode = amd33_set_dmamode,
28703 };
28704
28705 -static struct ata_port_operations amd66_port_ops = {
28706 +static const struct ata_port_operations amd66_port_ops = {
28707 .inherits = &amd_base_port_ops,
28708 .cable_detect = ata_cable_unknown,
28709 .set_piomode = amd66_set_piomode,
28710 .set_dmamode = amd66_set_dmamode,
28711 };
28712
28713 -static struct ata_port_operations amd100_port_ops = {
28714 +static const struct ata_port_operations amd100_port_ops = {
28715 .inherits = &amd_base_port_ops,
28716 .cable_detect = ata_cable_unknown,
28717 .set_piomode = amd100_set_piomode,
28718 .set_dmamode = amd100_set_dmamode,
28719 };
28720
28721 -static struct ata_port_operations amd133_port_ops = {
28722 +static const struct ata_port_operations amd133_port_ops = {
28723 .inherits = &amd_base_port_ops,
28724 .cable_detect = amd_cable_detect,
28725 .set_piomode = amd133_set_piomode,
28726 @@ -433,13 +433,13 @@ static const struct ata_port_operations nv_base_port_ops = {
28727 .host_stop = nv_host_stop,
28728 };
28729
28730 -static struct ata_port_operations nv100_port_ops = {
28731 +static const struct ata_port_operations nv100_port_ops = {
28732 .inherits = &nv_base_port_ops,
28733 .set_piomode = nv100_set_piomode,
28734 .set_dmamode = nv100_set_dmamode,
28735 };
28736
28737 -static struct ata_port_operations nv133_port_ops = {
28738 +static const struct ata_port_operations nv133_port_ops = {
28739 .inherits = &nv_base_port_ops,
28740 .set_piomode = nv133_set_piomode,
28741 .set_dmamode = nv133_set_dmamode,
28742 diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
28743 index d332cfd..4b7eaae 100644
28744 --- a/drivers/ata/pata_artop.c
28745 +++ b/drivers/ata/pata_artop.c
28746 @@ -311,7 +311,7 @@ static struct scsi_host_template artop_sht = {
28747 ATA_BMDMA_SHT(DRV_NAME),
28748 };
28749
28750 -static struct ata_port_operations artop6210_ops = {
28751 +static const struct ata_port_operations artop6210_ops = {
28752 .inherits = &ata_bmdma_port_ops,
28753 .cable_detect = ata_cable_40wire,
28754 .set_piomode = artop6210_set_piomode,
28755 @@ -320,7 +320,7 @@ static struct ata_port_operations artop6210_ops = {
28756 .qc_defer = artop6210_qc_defer,
28757 };
28758
28759 -static struct ata_port_operations artop6260_ops = {
28760 +static const struct ata_port_operations artop6260_ops = {
28761 .inherits = &ata_bmdma_port_ops,
28762 .cable_detect = artop6260_cable_detect,
28763 .set_piomode = artop6260_set_piomode,
28764 diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
28765 index 5c129f9..7bb7ccb 100644
28766 --- a/drivers/ata/pata_at32.c
28767 +++ b/drivers/ata/pata_at32.c
28768 @@ -172,7 +172,7 @@ static struct scsi_host_template at32_sht = {
28769 ATA_PIO_SHT(DRV_NAME),
28770 };
28771
28772 -static struct ata_port_operations at32_port_ops = {
28773 +static const struct ata_port_operations at32_port_ops = {
28774 .inherits = &ata_sff_port_ops,
28775 .cable_detect = ata_cable_40wire,
28776 .set_piomode = pata_at32_set_piomode,
28777 diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
28778 index 41c94b1..829006d 100644
28779 --- a/drivers/ata/pata_at91.c
28780 +++ b/drivers/ata/pata_at91.c
28781 @@ -195,7 +195,7 @@ static struct scsi_host_template pata_at91_sht = {
28782 ATA_PIO_SHT(DRV_NAME),
28783 };
28784
28785 -static struct ata_port_operations pata_at91_port_ops = {
28786 +static const struct ata_port_operations pata_at91_port_ops = {
28787 .inherits = &ata_sff_port_ops,
28788
28789 .sff_data_xfer = pata_at91_data_xfer_noirq,
28790 diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
28791 index ae4454d..d391eb4 100644
28792 --- a/drivers/ata/pata_atiixp.c
28793 +++ b/drivers/ata/pata_atiixp.c
28794 @@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_sht = {
28795 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28796 };
28797
28798 -static struct ata_port_operations atiixp_port_ops = {
28799 +static const struct ata_port_operations atiixp_port_ops = {
28800 .inherits = &ata_bmdma_port_ops,
28801
28802 .qc_prep = ata_sff_dumb_qc_prep,
28803 diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
28804 index 6fe7ded..2a425dc 100644
28805 --- a/drivers/ata/pata_atp867x.c
28806 +++ b/drivers/ata/pata_atp867x.c
28807 @@ -274,7 +274,7 @@ static struct scsi_host_template atp867x_sht = {
28808 ATA_BMDMA_SHT(DRV_NAME),
28809 };
28810
28811 -static struct ata_port_operations atp867x_ops = {
28812 +static const struct ata_port_operations atp867x_ops = {
28813 .inherits = &ata_bmdma_port_ops,
28814 .cable_detect = atp867x_cable_detect,
28815 .set_piomode = atp867x_set_piomode,
28816 diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
28817 index c4b47a3..b27a367 100644
28818 --- a/drivers/ata/pata_bf54x.c
28819 +++ b/drivers/ata/pata_bf54x.c
28820 @@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sht = {
28821 .dma_boundary = ATA_DMA_BOUNDARY,
28822 };
28823
28824 -static struct ata_port_operations bfin_pata_ops = {
28825 +static const struct ata_port_operations bfin_pata_ops = {
28826 .inherits = &ata_sff_port_ops,
28827
28828 .set_piomode = bfin_set_piomode,
28829 diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
28830 index 5acf9fa..84248be 100644
28831 --- a/drivers/ata/pata_cmd640.c
28832 +++ b/drivers/ata/pata_cmd640.c
28833 @@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_sht = {
28834 ATA_BMDMA_SHT(DRV_NAME),
28835 };
28836
28837 -static struct ata_port_operations cmd640_port_ops = {
28838 +static const struct ata_port_operations cmd640_port_ops = {
28839 .inherits = &ata_bmdma_port_ops,
28840 /* In theory xfer_noirq is not needed once we kill the prefetcher */
28841 .sff_data_xfer = ata_sff_data_xfer_noirq,
28842 diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
28843 index ccd2694..c869c3d 100644
28844 --- a/drivers/ata/pata_cmd64x.c
28845 +++ b/drivers/ata/pata_cmd64x.c
28846 @@ -271,18 +271,18 @@ static const struct ata_port_operations cmd64x_base_ops = {
28847 .set_dmamode = cmd64x_set_dmamode,
28848 };
28849
28850 -static struct ata_port_operations cmd64x_port_ops = {
28851 +static const struct ata_port_operations cmd64x_port_ops = {
28852 .inherits = &cmd64x_base_ops,
28853 .cable_detect = ata_cable_40wire,
28854 };
28855
28856 -static struct ata_port_operations cmd646r1_port_ops = {
28857 +static const struct ata_port_operations cmd646r1_port_ops = {
28858 .inherits = &cmd64x_base_ops,
28859 .bmdma_stop = cmd646r1_bmdma_stop,
28860 .cable_detect = ata_cable_40wire,
28861 };
28862
28863 -static struct ata_port_operations cmd648_port_ops = {
28864 +static const struct ata_port_operations cmd648_port_ops = {
28865 .inherits = &cmd64x_base_ops,
28866 .bmdma_stop = cmd648_bmdma_stop,
28867 .cable_detect = cmd648_cable_detect,
28868 diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
28869 index 0df83cf..d7595b0 100644
28870 --- a/drivers/ata/pata_cs5520.c
28871 +++ b/drivers/ata/pata_cs5520.c
28872 @@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_sht = {
28873 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28874 };
28875
28876 -static struct ata_port_operations cs5520_port_ops = {
28877 +static const struct ata_port_operations cs5520_port_ops = {
28878 .inherits = &ata_bmdma_port_ops,
28879 .qc_prep = ata_sff_dumb_qc_prep,
28880 .cable_detect = ata_cable_40wire,
28881 diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
28882 index c974b05..6d26b11 100644
28883 --- a/drivers/ata/pata_cs5530.c
28884 +++ b/drivers/ata/pata_cs5530.c
28885 @@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_sht = {
28886 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28887 };
28888
28889 -static struct ata_port_operations cs5530_port_ops = {
28890 +static const struct ata_port_operations cs5530_port_ops = {
28891 .inherits = &ata_bmdma_port_ops,
28892
28893 .qc_prep = ata_sff_dumb_qc_prep,
28894 diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
28895 index 403f561..aacd26b 100644
28896 --- a/drivers/ata/pata_cs5535.c
28897 +++ b/drivers/ata/pata_cs5535.c
28898 @@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_sht = {
28899 ATA_BMDMA_SHT(DRV_NAME),
28900 };
28901
28902 -static struct ata_port_operations cs5535_port_ops = {
28903 +static const struct ata_port_operations cs5535_port_ops = {
28904 .inherits = &ata_bmdma_port_ops,
28905 .cable_detect = cs5535_cable_detect,
28906 .set_piomode = cs5535_set_piomode,
28907 diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
28908 index 6da4cb4..de24a25 100644
28909 --- a/drivers/ata/pata_cs5536.c
28910 +++ b/drivers/ata/pata_cs5536.c
28911 @@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_sht = {
28912 ATA_BMDMA_SHT(DRV_NAME),
28913 };
28914
28915 -static struct ata_port_operations cs5536_port_ops = {
28916 +static const struct ata_port_operations cs5536_port_ops = {
28917 .inherits = &ata_bmdma_port_ops,
28918 .cable_detect = cs5536_cable_detect,
28919 .set_piomode = cs5536_set_piomode,
28920 diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
28921 index 8fb040b..b16a9c9 100644
28922 --- a/drivers/ata/pata_cypress.c
28923 +++ b/drivers/ata/pata_cypress.c
28924 @@ -113,7 +113,7 @@ static struct scsi_host_template cy82c693_sht = {
28925 ATA_BMDMA_SHT(DRV_NAME),
28926 };
28927
28928 -static struct ata_port_operations cy82c693_port_ops = {
28929 +static const struct ata_port_operations cy82c693_port_ops = {
28930 .inherits = &ata_bmdma_port_ops,
28931 .cable_detect = ata_cable_40wire,
28932 .set_piomode = cy82c693_set_piomode,
28933 diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
28934 index 2a6412f..555ee11 100644
28935 --- a/drivers/ata/pata_efar.c
28936 +++ b/drivers/ata/pata_efar.c
28937 @@ -222,7 +222,7 @@ static struct scsi_host_template efar_sht = {
28938 ATA_BMDMA_SHT(DRV_NAME),
28939 };
28940
28941 -static struct ata_port_operations efar_ops = {
28942 +static const struct ata_port_operations efar_ops = {
28943 .inherits = &ata_bmdma_port_ops,
28944 .cable_detect = efar_cable_detect,
28945 .set_piomode = efar_set_piomode,
28946 diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
28947 index b9d8836..0b92030 100644
28948 --- a/drivers/ata/pata_hpt366.c
28949 +++ b/drivers/ata/pata_hpt366.c
28950 @@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_sht = {
28951 * Configuration for HPT366/68
28952 */
28953
28954 -static struct ata_port_operations hpt366_port_ops = {
28955 +static const struct ata_port_operations hpt366_port_ops = {
28956 .inherits = &ata_bmdma_port_ops,
28957 .cable_detect = hpt36x_cable_detect,
28958 .mode_filter = hpt366_filter,
28959 diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
28960 index 5af7f19..00c4980 100644
28961 --- a/drivers/ata/pata_hpt37x.c
28962 +++ b/drivers/ata/pata_hpt37x.c
28963 @@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_sht = {
28964 * Configuration for HPT370
28965 */
28966
28967 -static struct ata_port_operations hpt370_port_ops = {
28968 +static const struct ata_port_operations hpt370_port_ops = {
28969 .inherits = &ata_bmdma_port_ops,
28970
28971 .bmdma_stop = hpt370_bmdma_stop,
28972 @@ -591,7 +591,7 @@ static struct ata_port_operations hpt370_port_ops = {
28973 * Configuration for HPT370A. Close to 370 but less filters
28974 */
28975
28976 -static struct ata_port_operations hpt370a_port_ops = {
28977 +static const struct ata_port_operations hpt370a_port_ops = {
28978 .inherits = &hpt370_port_ops,
28979 .mode_filter = hpt370a_filter,
28980 };
28981 @@ -601,7 +601,7 @@ static struct ata_port_operations hpt370a_port_ops = {
28982 * and DMA mode setting functionality.
28983 */
28984
28985 -static struct ata_port_operations hpt372_port_ops = {
28986 +static const struct ata_port_operations hpt372_port_ops = {
28987 .inherits = &ata_bmdma_port_ops,
28988
28989 .bmdma_stop = hpt37x_bmdma_stop,
28990 @@ -616,7 +616,7 @@ static struct ata_port_operations hpt372_port_ops = {
28991 * but we have a different cable detection procedure for function 1.
28992 */
28993
28994 -static struct ata_port_operations hpt374_fn1_port_ops = {
28995 +static const struct ata_port_operations hpt374_fn1_port_ops = {
28996 .inherits = &hpt372_port_ops,
28997 .prereset = hpt374_fn1_pre_reset,
28998 };
28999 diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
29000 index 100f227..2e39382 100644
29001 --- a/drivers/ata/pata_hpt3x2n.c
29002 +++ b/drivers/ata/pata_hpt3x2n.c
29003 @@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n_sht = {
29004 * Configuration for HPT3x2n.
29005 */
29006
29007 -static struct ata_port_operations hpt3x2n_port_ops = {
29008 +static const struct ata_port_operations hpt3x2n_port_ops = {
29009 .inherits = &ata_bmdma_port_ops,
29010
29011 .bmdma_stop = hpt3x2n_bmdma_stop,
29012 diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
29013 index 7e31025..6fca8f4 100644
29014 --- a/drivers/ata/pata_hpt3x3.c
29015 +++ b/drivers/ata/pata_hpt3x3.c
29016 @@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_sht = {
29017 ATA_BMDMA_SHT(DRV_NAME),
29018 };
29019
29020 -static struct ata_port_operations hpt3x3_port_ops = {
29021 +static const struct ata_port_operations hpt3x3_port_ops = {
29022 .inherits = &ata_bmdma_port_ops,
29023 .cable_detect = ata_cable_40wire,
29024 .set_piomode = hpt3x3_set_piomode,
29025 diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
29026 index b663b7f..9a26c2a 100644
29027 --- a/drivers/ata/pata_icside.c
29028 +++ b/drivers/ata/pata_icside.c
29029 @@ -319,7 +319,7 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
29030 }
29031 }
29032
29033 -static struct ata_port_operations pata_icside_port_ops = {
29034 +static const struct ata_port_operations pata_icside_port_ops = {
29035 .inherits = &ata_sff_port_ops,
29036 /* no need to build any PRD tables for DMA */
29037 .qc_prep = ata_noop_qc_prep,
29038 diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
29039 index 4bceb88..457dfb6 100644
29040 --- a/drivers/ata/pata_isapnp.c
29041 +++ b/drivers/ata/pata_isapnp.c
29042 @@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_sht = {
29043 ATA_PIO_SHT(DRV_NAME),
29044 };
29045
29046 -static struct ata_port_operations isapnp_port_ops = {
29047 +static const struct ata_port_operations isapnp_port_ops = {
29048 .inherits = &ata_sff_port_ops,
29049 .cable_detect = ata_cable_40wire,
29050 };
29051
29052 -static struct ata_port_operations isapnp_noalt_port_ops = {
29053 +static const struct ata_port_operations isapnp_noalt_port_ops = {
29054 .inherits = &ata_sff_port_ops,
29055 .cable_detect = ata_cable_40wire,
29056 /* No altstatus so we don't want to use the lost interrupt poll */
29057 diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
29058 index f156da8..24976e2 100644
29059 --- a/drivers/ata/pata_it8213.c
29060 +++ b/drivers/ata/pata_it8213.c
29061 @@ -234,7 +234,7 @@ static struct scsi_host_template it8213_sht = {
29062 };
29063
29064
29065 -static struct ata_port_operations it8213_ops = {
29066 +static const struct ata_port_operations it8213_ops = {
29067 .inherits = &ata_bmdma_port_ops,
29068 .cable_detect = it8213_cable_detect,
29069 .set_piomode = it8213_set_piomode,
29070 diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
29071 index 188bc2f..ca9e785 100644
29072 --- a/drivers/ata/pata_it821x.c
29073 +++ b/drivers/ata/pata_it821x.c
29074 @@ -800,7 +800,7 @@ static struct scsi_host_template it821x_sht = {
29075 ATA_BMDMA_SHT(DRV_NAME),
29076 };
29077
29078 -static struct ata_port_operations it821x_smart_port_ops = {
29079 +static const struct ata_port_operations it821x_smart_port_ops = {
29080 .inherits = &ata_bmdma_port_ops,
29081
29082 .check_atapi_dma= it821x_check_atapi_dma,
29083 @@ -814,7 +814,7 @@ static struct ata_port_operations it821x_smart_port_ops = {
29084 .port_start = it821x_port_start,
29085 };
29086
29087 -static struct ata_port_operations it821x_passthru_port_ops = {
29088 +static const struct ata_port_operations it821x_passthru_port_ops = {
29089 .inherits = &ata_bmdma_port_ops,
29090
29091 .check_atapi_dma= it821x_check_atapi_dma,
29092 @@ -830,7 +830,7 @@ static struct ata_port_operations it821x_passthru_port_ops = {
29093 .port_start = it821x_port_start,
29094 };
29095
29096 -static struct ata_port_operations it821x_rdc_port_ops = {
29097 +static const struct ata_port_operations it821x_rdc_port_ops = {
29098 .inherits = &ata_bmdma_port_ops,
29099
29100 .check_atapi_dma= it821x_check_atapi_dma,
29101 diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
29102 index ba54b08..4b952b7 100644
29103 --- a/drivers/ata/pata_ixp4xx_cf.c
29104 +++ b/drivers/ata/pata_ixp4xx_cf.c
29105 @@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_sht = {
29106 ATA_PIO_SHT(DRV_NAME),
29107 };
29108
29109 -static struct ata_port_operations ixp4xx_port_ops = {
29110 +static const struct ata_port_operations ixp4xx_port_ops = {
29111 .inherits = &ata_sff_port_ops,
29112 .sff_data_xfer = ixp4xx_mmio_data_xfer,
29113 .cable_detect = ata_cable_40wire,
29114 diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
29115 index 3a1474a..434b0ff 100644
29116 --- a/drivers/ata/pata_jmicron.c
29117 +++ b/drivers/ata/pata_jmicron.c
29118 @@ -111,7 +111,7 @@ static struct scsi_host_template jmicron_sht = {
29119 ATA_BMDMA_SHT(DRV_NAME),
29120 };
29121
29122 -static struct ata_port_operations jmicron_ops = {
29123 +static const struct ata_port_operations jmicron_ops = {
29124 .inherits = &ata_bmdma_port_ops,
29125 .prereset = jmicron_pre_reset,
29126 };
29127 diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
29128 index 6932e56..220e71d 100644
29129 --- a/drivers/ata/pata_legacy.c
29130 +++ b/drivers/ata/pata_legacy.c
29131 @@ -106,7 +106,7 @@ struct legacy_probe {
29132
29133 struct legacy_controller {
29134 const char *name;
29135 - struct ata_port_operations *ops;
29136 + const struct ata_port_operations *ops;
29137 unsigned int pio_mask;
29138 unsigned int flags;
29139 unsigned int pflags;
29140 @@ -223,12 +223,12 @@ static const struct ata_port_operations legacy_base_port_ops = {
29141 * pio_mask as well.
29142 */
29143
29144 -static struct ata_port_operations simple_port_ops = {
29145 +static const struct ata_port_operations simple_port_ops = {
29146 .inherits = &legacy_base_port_ops,
29147 .sff_data_xfer = ata_sff_data_xfer_noirq,
29148 };
29149
29150 -static struct ata_port_operations legacy_port_ops = {
29151 +static const struct ata_port_operations legacy_port_ops = {
29152 .inherits = &legacy_base_port_ops,
29153 .sff_data_xfer = ata_sff_data_xfer_noirq,
29154 .set_mode = legacy_set_mode,
29155 @@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
29156 return buflen;
29157 }
29158
29159 -static struct ata_port_operations pdc20230_port_ops = {
29160 +static const struct ata_port_operations pdc20230_port_ops = {
29161 .inherits = &legacy_base_port_ops,
29162 .set_piomode = pdc20230_set_piomode,
29163 .sff_data_xfer = pdc_data_xfer_vlb,
29164 @@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
29165 ioread8(ap->ioaddr.status_addr);
29166 }
29167
29168 -static struct ata_port_operations ht6560a_port_ops = {
29169 +static const struct ata_port_operations ht6560a_port_ops = {
29170 .inherits = &legacy_base_port_ops,
29171 .set_piomode = ht6560a_set_piomode,
29172 };
29173 @@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
29174 ioread8(ap->ioaddr.status_addr);
29175 }
29176
29177 -static struct ata_port_operations ht6560b_port_ops = {
29178 +static const struct ata_port_operations ht6560b_port_ops = {
29179 .inherits = &legacy_base_port_ops,
29180 .set_piomode = ht6560b_set_piomode,
29181 };
29182 @@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(struct ata_port *ap,
29183 }
29184
29185
29186 -static struct ata_port_operations opti82c611a_port_ops = {
29187 +static const struct ata_port_operations opti82c611a_port_ops = {
29188 .inherits = &legacy_base_port_ops,
29189 .set_piomode = opti82c611a_set_piomode,
29190 };
29191 @@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc)
29192 return ata_sff_qc_issue(qc);
29193 }
29194
29195 -static struct ata_port_operations opti82c46x_port_ops = {
29196 +static const struct ata_port_operations opti82c46x_port_ops = {
29197 .inherits = &legacy_base_port_ops,
29198 .set_piomode = opti82c46x_set_piomode,
29199 .qc_issue = opti82c46x_qc_issue,
29200 @@ -771,20 +771,20 @@ static int qdi_port(struct platform_device *dev,
29201 return 0;
29202 }
29203
29204 -static struct ata_port_operations qdi6500_port_ops = {
29205 +static const struct ata_port_operations qdi6500_port_ops = {
29206 .inherits = &legacy_base_port_ops,
29207 .set_piomode = qdi6500_set_piomode,
29208 .qc_issue = qdi_qc_issue,
29209 .sff_data_xfer = vlb32_data_xfer,
29210 };
29211
29212 -static struct ata_port_operations qdi6580_port_ops = {
29213 +static const struct ata_port_operations qdi6580_port_ops = {
29214 .inherits = &legacy_base_port_ops,
29215 .set_piomode = qdi6580_set_piomode,
29216 .sff_data_xfer = vlb32_data_xfer,
29217 };
29218
29219 -static struct ata_port_operations qdi6580dp_port_ops = {
29220 +static const struct ata_port_operations qdi6580dp_port_ops = {
29221 .inherits = &legacy_base_port_ops,
29222 .set_piomode = qdi6580dp_set_piomode,
29223 .sff_data_xfer = vlb32_data_xfer,
29224 @@ -855,7 +855,7 @@ static int winbond_port(struct platform_device *dev,
29225 return 0;
29226 }
29227
29228 -static struct ata_port_operations winbond_port_ops = {
29229 +static const struct ata_port_operations winbond_port_ops = {
29230 .inherits = &legacy_base_port_ops,
29231 .set_piomode = winbond_set_piomode,
29232 .sff_data_xfer = vlb32_data_xfer,
29233 @@ -978,7 +978,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
29234 int pio_modes = controller->pio_mask;
29235 unsigned long io = probe->port;
29236 u32 mask = (1 << probe->slot);
29237 - struct ata_port_operations *ops = controller->ops;
29238 + const struct ata_port_operations *ops = controller->ops;
29239 struct legacy_data *ld = &legacy_data[probe->slot];
29240 struct ata_host *host = NULL;
29241 struct ata_port *ap;
29242 diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
29243 index 2096fb7..4d090fc 100644
29244 --- a/drivers/ata/pata_marvell.c
29245 +++ b/drivers/ata/pata_marvell.c
29246 @@ -100,7 +100,7 @@ static struct scsi_host_template marvell_sht = {
29247 ATA_BMDMA_SHT(DRV_NAME),
29248 };
29249
29250 -static struct ata_port_operations marvell_ops = {
29251 +static const struct ata_port_operations marvell_ops = {
29252 .inherits = &ata_bmdma_port_ops,
29253 .cable_detect = marvell_cable_detect,
29254 .prereset = marvell_pre_reset,
29255 diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
29256 index 99d41be..7d56aa8 100644
29257 --- a/drivers/ata/pata_mpc52xx.c
29258 +++ b/drivers/ata/pata_mpc52xx.c
29259 @@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx_ata_sht = {
29260 ATA_PIO_SHT(DRV_NAME),
29261 };
29262
29263 -static struct ata_port_operations mpc52xx_ata_port_ops = {
29264 +static const struct ata_port_operations mpc52xx_ata_port_ops = {
29265 .inherits = &ata_bmdma_port_ops,
29266 .sff_dev_select = mpc52xx_ata_dev_select,
29267 .set_piomode = mpc52xx_ata_set_piomode,
29268 diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
29269 index b21f002..0a27e7f 100644
29270 --- a/drivers/ata/pata_mpiix.c
29271 +++ b/drivers/ata/pata_mpiix.c
29272 @@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_sht = {
29273 ATA_PIO_SHT(DRV_NAME),
29274 };
29275
29276 -static struct ata_port_operations mpiix_port_ops = {
29277 +static const struct ata_port_operations mpiix_port_ops = {
29278 .inherits = &ata_sff_port_ops,
29279 .qc_issue = mpiix_qc_issue,
29280 .cable_detect = ata_cable_40wire,
29281 diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
29282 index f0d52f7..89c3be3 100644
29283 --- a/drivers/ata/pata_netcell.c
29284 +++ b/drivers/ata/pata_netcell.c
29285 @@ -34,7 +34,7 @@ static struct scsi_host_template netcell_sht = {
29286 ATA_BMDMA_SHT(DRV_NAME),
29287 };
29288
29289 -static struct ata_port_operations netcell_ops = {
29290 +static const struct ata_port_operations netcell_ops = {
29291 .inherits = &ata_bmdma_port_ops,
29292 .cable_detect = ata_cable_80wire,
29293 .read_id = netcell_read_id,
29294 diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
29295 index dd53a66..a3f4317 100644
29296 --- a/drivers/ata/pata_ninja32.c
29297 +++ b/drivers/ata/pata_ninja32.c
29298 @@ -81,7 +81,7 @@ static struct scsi_host_template ninja32_sht = {
29299 ATA_BMDMA_SHT(DRV_NAME),
29300 };
29301
29302 -static struct ata_port_operations ninja32_port_ops = {
29303 +static const struct ata_port_operations ninja32_port_ops = {
29304 .inherits = &ata_bmdma_port_ops,
29305 .sff_dev_select = ninja32_dev_select,
29306 .cable_detect = ata_cable_40wire,
29307 diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
29308 index ca53fac..9aa93ef 100644
29309 --- a/drivers/ata/pata_ns87410.c
29310 +++ b/drivers/ata/pata_ns87410.c
29311 @@ -132,7 +132,7 @@ static struct scsi_host_template ns87410_sht = {
29312 ATA_PIO_SHT(DRV_NAME),
29313 };
29314
29315 -static struct ata_port_operations ns87410_port_ops = {
29316 +static const struct ata_port_operations ns87410_port_ops = {
29317 .inherits = &ata_sff_port_ops,
29318 .qc_issue = ns87410_qc_issue,
29319 .cable_detect = ata_cable_40wire,
29320 diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
29321 index 773b159..55f454e 100644
29322 --- a/drivers/ata/pata_ns87415.c
29323 +++ b/drivers/ata/pata_ns87415.c
29324 @@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct ata_port *ap)
29325 }
29326 #endif /* 87560 SuperIO Support */
29327
29328 -static struct ata_port_operations ns87415_pata_ops = {
29329 +static const struct ata_port_operations ns87415_pata_ops = {
29330 .inherits = &ata_bmdma_port_ops,
29331
29332 .check_atapi_dma = ns87415_check_atapi_dma,
29333 @@ -313,7 +313,7 @@ static struct ata_port_operations ns87415_pata_ops = {
29334 };
29335
29336 #if defined(CONFIG_SUPERIO)
29337 -static struct ata_port_operations ns87560_pata_ops = {
29338 +static const struct ata_port_operations ns87560_pata_ops = {
29339 .inherits = &ns87415_pata_ops,
29340 .sff_tf_read = ns87560_tf_read,
29341 .sff_check_status = ns87560_check_status,
29342 diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
29343 index d6f6956..639295b 100644
29344 --- a/drivers/ata/pata_octeon_cf.c
29345 +++ b/drivers/ata/pata_octeon_cf.c
29346 @@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
29347 return 0;
29348 }
29349
29350 +/* cannot be const */
29351 static struct ata_port_operations octeon_cf_ops = {
29352 .inherits = &ata_sff_port_ops,
29353 .check_atapi_dma = octeon_cf_check_atapi_dma,
29354 diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
29355 index 84ac503..adee1cd 100644
29356 --- a/drivers/ata/pata_oldpiix.c
29357 +++ b/drivers/ata/pata_oldpiix.c
29358 @@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix_sht = {
29359 ATA_BMDMA_SHT(DRV_NAME),
29360 };
29361
29362 -static struct ata_port_operations oldpiix_pata_ops = {
29363 +static const struct ata_port_operations oldpiix_pata_ops = {
29364 .inherits = &ata_bmdma_port_ops,
29365 .qc_issue = oldpiix_qc_issue,
29366 .cable_detect = ata_cable_40wire,
29367 diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
29368 index 99eddda..3a4c0aa 100644
29369 --- a/drivers/ata/pata_opti.c
29370 +++ b/drivers/ata/pata_opti.c
29371 @@ -152,7 +152,7 @@ static struct scsi_host_template opti_sht = {
29372 ATA_PIO_SHT(DRV_NAME),
29373 };
29374
29375 -static struct ata_port_operations opti_port_ops = {
29376 +static const struct ata_port_operations opti_port_ops = {
29377 .inherits = &ata_sff_port_ops,
29378 .cable_detect = ata_cable_40wire,
29379 .set_piomode = opti_set_piomode,
29380 diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
29381 index 86885a4..8e9968d 100644
29382 --- a/drivers/ata/pata_optidma.c
29383 +++ b/drivers/ata/pata_optidma.c
29384 @@ -337,7 +337,7 @@ static struct scsi_host_template optidma_sht = {
29385 ATA_BMDMA_SHT(DRV_NAME),
29386 };
29387
29388 -static struct ata_port_operations optidma_port_ops = {
29389 +static const struct ata_port_operations optidma_port_ops = {
29390 .inherits = &ata_bmdma_port_ops,
29391 .cable_detect = ata_cable_40wire,
29392 .set_piomode = optidma_set_pio_mode,
29393 @@ -346,7 +346,7 @@ static struct ata_port_operations optidma_port_ops = {
29394 .prereset = optidma_pre_reset,
29395 };
29396
29397 -static struct ata_port_operations optiplus_port_ops = {
29398 +static const struct ata_port_operations optiplus_port_ops = {
29399 .inherits = &optidma_port_ops,
29400 .set_piomode = optiplus_set_pio_mode,
29401 .set_dmamode = optiplus_set_dma_mode,
29402 diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
29403 index 11fb4cc..1a14022 100644
29404 --- a/drivers/ata/pata_palmld.c
29405 +++ b/drivers/ata/pata_palmld.c
29406 @@ -37,7 +37,7 @@ static struct scsi_host_template palmld_sht = {
29407 ATA_PIO_SHT(DRV_NAME),
29408 };
29409
29410 -static struct ata_port_operations palmld_port_ops = {
29411 +static const struct ata_port_operations palmld_port_ops = {
29412 .inherits = &ata_sff_port_ops,
29413 .sff_data_xfer = ata_sff_data_xfer_noirq,
29414 .cable_detect = ata_cable_40wire,
29415 diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
29416 index dc99e26..7f4b1e4 100644
29417 --- a/drivers/ata/pata_pcmcia.c
29418 +++ b/drivers/ata/pata_pcmcia.c
29419 @@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_sht = {
29420 ATA_PIO_SHT(DRV_NAME),
29421 };
29422
29423 -static struct ata_port_operations pcmcia_port_ops = {
29424 +static const struct ata_port_operations pcmcia_port_ops = {
29425 .inherits = &ata_sff_port_ops,
29426 .sff_data_xfer = ata_sff_data_xfer_noirq,
29427 .cable_detect = ata_cable_40wire,
29428 .set_mode = pcmcia_set_mode,
29429 };
29430
29431 -static struct ata_port_operations pcmcia_8bit_port_ops = {
29432 +static const struct ata_port_operations pcmcia_8bit_port_ops = {
29433 .inherits = &ata_sff_port_ops,
29434 .sff_data_xfer = ata_data_xfer_8bit,
29435 .cable_detect = ata_cable_40wire,
29436 @@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
29437 unsigned long io_base, ctl_base;
29438 void __iomem *io_addr, *ctl_addr;
29439 int n_ports = 1;
29440 - struct ata_port_operations *ops = &pcmcia_port_ops;
29441 + const struct ata_port_operations *ops = &pcmcia_port_ops;
29442
29443 info = kzalloc(sizeof(*info), GFP_KERNEL);
29444 if (info == NULL)
29445 diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
29446 index ca5cad0..3a1f125 100644
29447 --- a/drivers/ata/pata_pdc2027x.c
29448 +++ b/drivers/ata/pata_pdc2027x.c
29449 @@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027x_sht = {
29450 ATA_BMDMA_SHT(DRV_NAME),
29451 };
29452
29453 -static struct ata_port_operations pdc2027x_pata100_ops = {
29454 +static const struct ata_port_operations pdc2027x_pata100_ops = {
29455 .inherits = &ata_bmdma_port_ops,
29456 .check_atapi_dma = pdc2027x_check_atapi_dma,
29457 .cable_detect = pdc2027x_cable_detect,
29458 .prereset = pdc2027x_prereset,
29459 };
29460
29461 -static struct ata_port_operations pdc2027x_pata133_ops = {
29462 +static const struct ata_port_operations pdc2027x_pata133_ops = {
29463 .inherits = &pdc2027x_pata100_ops,
29464 .mode_filter = pdc2027x_mode_filter,
29465 .set_piomode = pdc2027x_set_piomode,
29466 diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
29467 index 2911120..4bf62aa 100644
29468 --- a/drivers/ata/pata_pdc202xx_old.c
29469 +++ b/drivers/ata/pata_pdc202xx_old.c
29470 @@ -274,7 +274,7 @@ static struct scsi_host_template pdc202xx_sht = {
29471 ATA_BMDMA_SHT(DRV_NAME),
29472 };
29473
29474 -static struct ata_port_operations pdc2024x_port_ops = {
29475 +static const struct ata_port_operations pdc2024x_port_ops = {
29476 .inherits = &ata_bmdma_port_ops,
29477
29478 .cable_detect = ata_cable_40wire,
29479 @@ -284,7 +284,7 @@ static struct ata_port_operations pdc2024x_port_ops = {
29480 .sff_exec_command = pdc202xx_exec_command,
29481 };
29482
29483 -static struct ata_port_operations pdc2026x_port_ops = {
29484 +static const struct ata_port_operations pdc2026x_port_ops = {
29485 .inherits = &pdc2024x_port_ops,
29486
29487 .check_atapi_dma = pdc2026x_check_atapi_dma,
29488 diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
29489 index 3f6ebc6..a18c358 100644
29490 --- a/drivers/ata/pata_platform.c
29491 +++ b/drivers/ata/pata_platform.c
29492 @@ -48,7 +48,7 @@ static struct scsi_host_template pata_platform_sht = {
29493 ATA_PIO_SHT(DRV_NAME),
29494 };
29495
29496 -static struct ata_port_operations pata_platform_port_ops = {
29497 +static const struct ata_port_operations pata_platform_port_ops = {
29498 .inherits = &ata_sff_port_ops,
29499 .sff_data_xfer = ata_sff_data_xfer_noirq,
29500 .cable_detect = ata_cable_unknown,
29501 diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
29502 index 45879dc..165a9f9 100644
29503 --- a/drivers/ata/pata_qdi.c
29504 +++ b/drivers/ata/pata_qdi.c
29505 @@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht = {
29506 ATA_PIO_SHT(DRV_NAME),
29507 };
29508
29509 -static struct ata_port_operations qdi6500_port_ops = {
29510 +static const struct ata_port_operations qdi6500_port_ops = {
29511 .inherits = &ata_sff_port_ops,
29512 .qc_issue = qdi_qc_issue,
29513 .sff_data_xfer = qdi_data_xfer,
29514 @@ -165,7 +165,7 @@ static struct ata_port_operations qdi6500_port_ops = {
29515 .set_piomode = qdi6500_set_piomode,
29516 };
29517
29518 -static struct ata_port_operations qdi6580_port_ops = {
29519 +static const struct ata_port_operations qdi6580_port_ops = {
29520 .inherits = &qdi6500_port_ops,
29521 .set_piomode = qdi6580_set_piomode,
29522 };
29523 diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
29524 index 4401b33..716c5cc 100644
29525 --- a/drivers/ata/pata_radisys.c
29526 +++ b/drivers/ata/pata_radisys.c
29527 @@ -187,7 +187,7 @@ static struct scsi_host_template radisys_sht = {
29528 ATA_BMDMA_SHT(DRV_NAME),
29529 };
29530
29531 -static struct ata_port_operations radisys_pata_ops = {
29532 +static const struct ata_port_operations radisys_pata_ops = {
29533 .inherits = &ata_bmdma_port_ops,
29534 .qc_issue = radisys_qc_issue,
29535 .cable_detect = ata_cable_unknown,
29536 diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
29537 index 45f1e10..fab6bca 100644
29538 --- a/drivers/ata/pata_rb532_cf.c
29539 +++ b/drivers/ata/pata_rb532_cf.c
29540 @@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
29541 return IRQ_HANDLED;
29542 }
29543
29544 -static struct ata_port_operations rb532_pata_port_ops = {
29545 +static const struct ata_port_operations rb532_pata_port_ops = {
29546 .inherits = &ata_sff_port_ops,
29547 .sff_data_xfer = ata_sff_data_xfer32,
29548 };
29549 diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
29550 index c843a1e..b5853c3 100644
29551 --- a/drivers/ata/pata_rdc.c
29552 +++ b/drivers/ata/pata_rdc.c
29553 @@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
29554 pci_write_config_byte(dev, 0x48, udma_enable);
29555 }
29556
29557 -static struct ata_port_operations rdc_pata_ops = {
29558 +static const struct ata_port_operations rdc_pata_ops = {
29559 .inherits = &ata_bmdma32_port_ops,
29560 .cable_detect = rdc_pata_cable_detect,
29561 .set_piomode = rdc_set_piomode,
29562 diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
29563 index a5e4dfe..080c8c9 100644
29564 --- a/drivers/ata/pata_rz1000.c
29565 +++ b/drivers/ata/pata_rz1000.c
29566 @@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_sht = {
29567 ATA_PIO_SHT(DRV_NAME),
29568 };
29569
29570 -static struct ata_port_operations rz1000_port_ops = {
29571 +static const struct ata_port_operations rz1000_port_ops = {
29572 .inherits = &ata_sff_port_ops,
29573 .cable_detect = ata_cable_40wire,
29574 .set_mode = rz1000_set_mode,
29575 diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
29576 index 3bbed83..e309daf 100644
29577 --- a/drivers/ata/pata_sc1200.c
29578 +++ b/drivers/ata/pata_sc1200.c
29579 @@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_sht = {
29580 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
29581 };
29582
29583 -static struct ata_port_operations sc1200_port_ops = {
29584 +static const struct ata_port_operations sc1200_port_ops = {
29585 .inherits = &ata_bmdma_port_ops,
29586 .qc_prep = ata_sff_dumb_qc_prep,
29587 .qc_issue = sc1200_qc_issue,
29588 diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
29589 index 4257d6b..4c1d9d5 100644
29590 --- a/drivers/ata/pata_scc.c
29591 +++ b/drivers/ata/pata_scc.c
29592 @@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht = {
29593 ATA_BMDMA_SHT(DRV_NAME),
29594 };
29595
29596 -static struct ata_port_operations scc_pata_ops = {
29597 +static const struct ata_port_operations scc_pata_ops = {
29598 .inherits = &ata_bmdma_port_ops,
29599
29600 .set_piomode = scc_set_piomode,
29601 diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
29602 index 99cceb4..e2e0a87 100644
29603 --- a/drivers/ata/pata_sch.c
29604 +++ b/drivers/ata/pata_sch.c
29605 @@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht = {
29606 ATA_BMDMA_SHT(DRV_NAME),
29607 };
29608
29609 -static struct ata_port_operations sch_pata_ops = {
29610 +static const struct ata_port_operations sch_pata_ops = {
29611 .inherits = &ata_bmdma_port_ops,
29612 .cable_detect = ata_cable_unknown,
29613 .set_piomode = sch_set_piomode,
29614 diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
29615 index beaed12..39969f1 100644
29616 --- a/drivers/ata/pata_serverworks.c
29617 +++ b/drivers/ata/pata_serverworks.c
29618 @@ -299,7 +299,7 @@ static struct scsi_host_template serverworks_sht = {
29619 ATA_BMDMA_SHT(DRV_NAME),
29620 };
29621
29622 -static struct ata_port_operations serverworks_osb4_port_ops = {
29623 +static const struct ata_port_operations serverworks_osb4_port_ops = {
29624 .inherits = &ata_bmdma_port_ops,
29625 .cable_detect = serverworks_cable_detect,
29626 .mode_filter = serverworks_osb4_filter,
29627 @@ -307,7 +307,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
29628 .set_dmamode = serverworks_set_dmamode,
29629 };
29630
29631 -static struct ata_port_operations serverworks_csb_port_ops = {
29632 +static const struct ata_port_operations serverworks_csb_port_ops = {
29633 .inherits = &serverworks_osb4_port_ops,
29634 .mode_filter = serverworks_csb_filter,
29635 };
29636 diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
29637 index a2ace48..0463b44 100644
29638 --- a/drivers/ata/pata_sil680.c
29639 +++ b/drivers/ata/pata_sil680.c
29640 @@ -194,7 +194,7 @@ static struct scsi_host_template sil680_sht = {
29641 ATA_BMDMA_SHT(DRV_NAME),
29642 };
29643
29644 -static struct ata_port_operations sil680_port_ops = {
29645 +static const struct ata_port_operations sil680_port_ops = {
29646 .inherits = &ata_bmdma32_port_ops,
29647 .cable_detect = sil680_cable_detect,
29648 .set_piomode = sil680_set_piomode,
29649 diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
29650 index 488e77b..b3724d5 100644
29651 --- a/drivers/ata/pata_sis.c
29652 +++ b/drivers/ata/pata_sis.c
29653 @@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht = {
29654 ATA_BMDMA_SHT(DRV_NAME),
29655 };
29656
29657 -static struct ata_port_operations sis_133_for_sata_ops = {
29658 +static const struct ata_port_operations sis_133_for_sata_ops = {
29659 .inherits = &ata_bmdma_port_ops,
29660 .set_piomode = sis_133_set_piomode,
29661 .set_dmamode = sis_133_set_dmamode,
29662 .cable_detect = sis_133_cable_detect,
29663 };
29664
29665 -static struct ata_port_operations sis_base_ops = {
29666 +static const struct ata_port_operations sis_base_ops = {
29667 .inherits = &ata_bmdma_port_ops,
29668 .prereset = sis_pre_reset,
29669 };
29670
29671 -static struct ata_port_operations sis_133_ops = {
29672 +static const struct ata_port_operations sis_133_ops = {
29673 .inherits = &sis_base_ops,
29674 .set_piomode = sis_133_set_piomode,
29675 .set_dmamode = sis_133_set_dmamode,
29676 .cable_detect = sis_133_cable_detect,
29677 };
29678
29679 -static struct ata_port_operations sis_133_early_ops = {
29680 +static const struct ata_port_operations sis_133_early_ops = {
29681 .inherits = &sis_base_ops,
29682 .set_piomode = sis_100_set_piomode,
29683 .set_dmamode = sis_133_early_set_dmamode,
29684 .cable_detect = sis_66_cable_detect,
29685 };
29686
29687 -static struct ata_port_operations sis_100_ops = {
29688 +static const struct ata_port_operations sis_100_ops = {
29689 .inherits = &sis_base_ops,
29690 .set_piomode = sis_100_set_piomode,
29691 .set_dmamode = sis_100_set_dmamode,
29692 .cable_detect = sis_66_cable_detect,
29693 };
29694
29695 -static struct ata_port_operations sis_66_ops = {
29696 +static const struct ata_port_operations sis_66_ops = {
29697 .inherits = &sis_base_ops,
29698 .set_piomode = sis_old_set_piomode,
29699 .set_dmamode = sis_66_set_dmamode,
29700 .cable_detect = sis_66_cable_detect,
29701 };
29702
29703 -static struct ata_port_operations sis_old_ops = {
29704 +static const struct ata_port_operations sis_old_ops = {
29705 .inherits = &sis_base_ops,
29706 .set_piomode = sis_old_set_piomode,
29707 .set_dmamode = sis_old_set_dmamode,
29708 diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
29709 index 29f733c..43e9ca0 100644
29710 --- a/drivers/ata/pata_sl82c105.c
29711 +++ b/drivers/ata/pata_sl82c105.c
29712 @@ -231,7 +231,7 @@ static struct scsi_host_template sl82c105_sht = {
29713 ATA_BMDMA_SHT(DRV_NAME),
29714 };
29715
29716 -static struct ata_port_operations sl82c105_port_ops = {
29717 +static const struct ata_port_operations sl82c105_port_ops = {
29718 .inherits = &ata_bmdma_port_ops,
29719 .qc_defer = sl82c105_qc_defer,
29720 .bmdma_start = sl82c105_bmdma_start,
29721 diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
29722 index f1f13ff..df39e99 100644
29723 --- a/drivers/ata/pata_triflex.c
29724 +++ b/drivers/ata/pata_triflex.c
29725 @@ -178,7 +178,7 @@ static struct scsi_host_template triflex_sht = {
29726 ATA_BMDMA_SHT(DRV_NAME),
29727 };
29728
29729 -static struct ata_port_operations triflex_port_ops = {
29730 +static const struct ata_port_operations triflex_port_ops = {
29731 .inherits = &ata_bmdma_port_ops,
29732 .bmdma_start = triflex_bmdma_start,
29733 .bmdma_stop = triflex_bmdma_stop,
29734 diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
29735 index 1d73b8d..98a4b29 100644
29736 --- a/drivers/ata/pata_via.c
29737 +++ b/drivers/ata/pata_via.c
29738 @@ -419,7 +419,7 @@ static struct scsi_host_template via_sht = {
29739 ATA_BMDMA_SHT(DRV_NAME),
29740 };
29741
29742 -static struct ata_port_operations via_port_ops = {
29743 +static const struct ata_port_operations via_port_ops = {
29744 .inherits = &ata_bmdma_port_ops,
29745 .cable_detect = via_cable_detect,
29746 .set_piomode = via_set_piomode,
29747 @@ -429,7 +429,7 @@ static struct ata_port_operations via_port_ops = {
29748 .port_start = via_port_start,
29749 };
29750
29751 -static struct ata_port_operations via_port_ops_noirq = {
29752 +static const struct ata_port_operations via_port_ops_noirq = {
29753 .inherits = &via_port_ops,
29754 .sff_data_xfer = ata_sff_data_xfer_noirq,
29755 };
29756 diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
29757 index 6d8619b..ad511c4 100644
29758 --- a/drivers/ata/pata_winbond.c
29759 +++ b/drivers/ata/pata_winbond.c
29760 @@ -125,7 +125,7 @@ static struct scsi_host_template winbond_sht = {
29761 ATA_PIO_SHT(DRV_NAME),
29762 };
29763
29764 -static struct ata_port_operations winbond_port_ops = {
29765 +static const struct ata_port_operations winbond_port_ops = {
29766 .inherits = &ata_sff_port_ops,
29767 .sff_data_xfer = winbond_data_xfer,
29768 .cable_detect = ata_cable_40wire,
29769 diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
29770 index 6c65b07..f996ec7 100644
29771 --- a/drivers/ata/pdc_adma.c
29772 +++ b/drivers/ata/pdc_adma.c
29773 @@ -145,7 +145,7 @@ static struct scsi_host_template adma_ata_sht = {
29774 .dma_boundary = ADMA_DMA_BOUNDARY,
29775 };
29776
29777 -static struct ata_port_operations adma_ata_ops = {
29778 +static const struct ata_port_operations adma_ata_ops = {
29779 .inherits = &ata_sff_port_ops,
29780
29781 .lost_interrupt = ATA_OP_NULL,
29782 diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
29783 index 172b57e..c49bc1e 100644
29784 --- a/drivers/ata/sata_fsl.c
29785 +++ b/drivers/ata/sata_fsl.c
29786 @@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fsl_sht = {
29787 .dma_boundary = ATA_DMA_BOUNDARY,
29788 };
29789
29790 -static struct ata_port_operations sata_fsl_ops = {
29791 +static const struct ata_port_operations sata_fsl_ops = {
29792 .inherits = &sata_pmp_port_ops,
29793
29794 .qc_defer = ata_std_qc_defer,
29795 diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
29796 index 4406902..60603ef 100644
29797 --- a/drivers/ata/sata_inic162x.c
29798 +++ b/drivers/ata/sata_inic162x.c
29799 @@ -721,7 +721,7 @@ static int inic_port_start(struct ata_port *ap)
29800 return 0;
29801 }
29802
29803 -static struct ata_port_operations inic_port_ops = {
29804 +static const struct ata_port_operations inic_port_ops = {
29805 .inherits = &sata_port_ops,
29806
29807 .check_atapi_dma = inic_check_atapi_dma,
29808 diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
29809 index cf41126..8107be6 100644
29810 --- a/drivers/ata/sata_mv.c
29811 +++ b/drivers/ata/sata_mv.c
29812 @@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht = {
29813 .dma_boundary = MV_DMA_BOUNDARY,
29814 };
29815
29816 -static struct ata_port_operations mv5_ops = {
29817 +static const struct ata_port_operations mv5_ops = {
29818 .inherits = &ata_sff_port_ops,
29819
29820 .lost_interrupt = ATA_OP_NULL,
29821 @@ -678,7 +678,7 @@ static struct ata_port_operations mv5_ops = {
29822 .port_stop = mv_port_stop,
29823 };
29824
29825 -static struct ata_port_operations mv6_ops = {
29826 +static const struct ata_port_operations mv6_ops = {
29827 .inherits = &mv5_ops,
29828 .dev_config = mv6_dev_config,
29829 .scr_read = mv_scr_read,
29830 @@ -698,7 +698,7 @@ static struct ata_port_operations mv6_ops = {
29831 .bmdma_status = mv_bmdma_status,
29832 };
29833
29834 -static struct ata_port_operations mv_iie_ops = {
29835 +static const struct ata_port_operations mv_iie_ops = {
29836 .inherits = &mv6_ops,
29837 .dev_config = ATA_OP_NULL,
29838 .qc_prep = mv_qc_prep_iie,
29839 diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
29840 index ae2297c..d5c9c33 100644
29841 --- a/drivers/ata/sata_nv.c
29842 +++ b/drivers/ata/sata_nv.c
29843 @@ -464,7 +464,7 @@ static struct scsi_host_template nv_swncq_sht = {
29844 * cases. Define nv_hardreset() which only kicks in for post-boot
29845 * probing and use it for all variants.
29846 */
29847 -static struct ata_port_operations nv_generic_ops = {
29848 +static const struct ata_port_operations nv_generic_ops = {
29849 .inherits = &ata_bmdma_port_ops,
29850 .lost_interrupt = ATA_OP_NULL,
29851 .scr_read = nv_scr_read,
29852 @@ -472,20 +472,20 @@ static struct ata_port_operations nv_generic_ops = {
29853 .hardreset = nv_hardreset,
29854 };
29855
29856 -static struct ata_port_operations nv_nf2_ops = {
29857 +static const struct ata_port_operations nv_nf2_ops = {
29858 .inherits = &nv_generic_ops,
29859 .freeze = nv_nf2_freeze,
29860 .thaw = nv_nf2_thaw,
29861 };
29862
29863 -static struct ata_port_operations nv_ck804_ops = {
29864 +static const struct ata_port_operations nv_ck804_ops = {
29865 .inherits = &nv_generic_ops,
29866 .freeze = nv_ck804_freeze,
29867 .thaw = nv_ck804_thaw,
29868 .host_stop = nv_ck804_host_stop,
29869 };
29870
29871 -static struct ata_port_operations nv_adma_ops = {
29872 +static const struct ata_port_operations nv_adma_ops = {
29873 .inherits = &nv_ck804_ops,
29874
29875 .check_atapi_dma = nv_adma_check_atapi_dma,
29876 @@ -509,7 +509,7 @@ static struct ata_port_operations nv_adma_ops = {
29877 .host_stop = nv_adma_host_stop,
29878 };
29879
29880 -static struct ata_port_operations nv_swncq_ops = {
29881 +static const struct ata_port_operations nv_swncq_ops = {
29882 .inherits = &nv_generic_ops,
29883
29884 .qc_defer = ata_std_qc_defer,
29885 diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
29886 index 07d8d00..6cc70bb 100644
29887 --- a/drivers/ata/sata_promise.c
29888 +++ b/drivers/ata/sata_promise.c
29889 @@ -195,7 +195,7 @@ static const struct ata_port_operations pdc_common_ops = {
29890 .error_handler = pdc_error_handler,
29891 };
29892
29893 -static struct ata_port_operations pdc_sata_ops = {
29894 +static const struct ata_port_operations pdc_sata_ops = {
29895 .inherits = &pdc_common_ops,
29896 .cable_detect = pdc_sata_cable_detect,
29897 .freeze = pdc_sata_freeze,
29898 @@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sata_ops = {
29899
29900 /* First-generation chips need a more restrictive ->check_atapi_dma op,
29901 and ->freeze/thaw that ignore the hotplug controls. */
29902 -static struct ata_port_operations pdc_old_sata_ops = {
29903 +static const struct ata_port_operations pdc_old_sata_ops = {
29904 .inherits = &pdc_sata_ops,
29905 .freeze = pdc_freeze,
29906 .thaw = pdc_thaw,
29907 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
29908 };
29909
29910 -static struct ata_port_operations pdc_pata_ops = {
29911 +static const struct ata_port_operations pdc_pata_ops = {
29912 .inherits = &pdc_common_ops,
29913 .cable_detect = pdc_pata_cable_detect,
29914 .freeze = pdc_freeze,
29915 diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
29916 index 326c0cf..36ecebe 100644
29917 --- a/drivers/ata/sata_qstor.c
29918 +++ b/drivers/ata/sata_qstor.c
29919 @@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_sht = {
29920 .dma_boundary = QS_DMA_BOUNDARY,
29921 };
29922
29923 -static struct ata_port_operations qs_ata_ops = {
29924 +static const struct ata_port_operations qs_ata_ops = {
29925 .inherits = &ata_sff_port_ops,
29926
29927 .check_atapi_dma = qs_check_atapi_dma,
29928 diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
29929 index 3cb69d5..0871d3c 100644
29930 --- a/drivers/ata/sata_sil.c
29931 +++ b/drivers/ata/sata_sil.c
29932 @@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht = {
29933 .sg_tablesize = ATA_MAX_PRD
29934 };
29935
29936 -static struct ata_port_operations sil_ops = {
29937 +static const struct ata_port_operations sil_ops = {
29938 .inherits = &ata_bmdma32_port_ops,
29939 .dev_config = sil_dev_config,
29940 .set_mode = sil_set_mode,
29941 diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
29942 index e6946fc..eddb794 100644
29943 --- a/drivers/ata/sata_sil24.c
29944 +++ b/drivers/ata/sata_sil24.c
29945 @@ -388,7 +388,7 @@ static struct scsi_host_template sil24_sht = {
29946 .dma_boundary = ATA_DMA_BOUNDARY,
29947 };
29948
29949 -static struct ata_port_operations sil24_ops = {
29950 +static const struct ata_port_operations sil24_ops = {
29951 .inherits = &sata_pmp_port_ops,
29952
29953 .qc_defer = sil24_qc_defer,
29954 diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
29955 index f8a91bf..9cb06b6 100644
29956 --- a/drivers/ata/sata_sis.c
29957 +++ b/drivers/ata/sata_sis.c
29958 @@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht = {
29959 ATA_BMDMA_SHT(DRV_NAME),
29960 };
29961
29962 -static struct ata_port_operations sis_ops = {
29963 +static const struct ata_port_operations sis_ops = {
29964 .inherits = &ata_bmdma_port_ops,
29965 .scr_read = sis_scr_read,
29966 .scr_write = sis_scr_write,
29967 diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
29968 index 7257f2d..d04c6f5 100644
29969 --- a/drivers/ata/sata_svw.c
29970 +++ b/drivers/ata/sata_svw.c
29971 @@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata_sht = {
29972 };
29973
29974
29975 -static struct ata_port_operations k2_sata_ops = {
29976 +static const struct ata_port_operations k2_sata_ops = {
29977 .inherits = &ata_bmdma_port_ops,
29978 .sff_tf_load = k2_sata_tf_load,
29979 .sff_tf_read = k2_sata_tf_read,
29980 diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
29981 index bbcf970..cd0df0d 100644
29982 --- a/drivers/ata/sata_sx4.c
29983 +++ b/drivers/ata/sata_sx4.c
29984 @@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sata_sht = {
29985 };
29986
29987 /* TODO: inherit from base port_ops after converting to new EH */
29988 -static struct ata_port_operations pdc_20621_ops = {
29989 +static const struct ata_port_operations pdc_20621_ops = {
29990 .inherits = &ata_sff_port_ops,
29991
29992 .check_atapi_dma = pdc_check_atapi_dma,
29993 diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
29994 index e5bff47..089d859 100644
29995 --- a/drivers/ata/sata_uli.c
29996 +++ b/drivers/ata/sata_uli.c
29997 @@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht = {
29998 ATA_BMDMA_SHT(DRV_NAME),
29999 };
30000
30001 -static struct ata_port_operations uli_ops = {
30002 +static const struct ata_port_operations uli_ops = {
30003 .inherits = &ata_bmdma_port_ops,
30004 .scr_read = uli_scr_read,
30005 .scr_write = uli_scr_write,
30006 diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
30007 index f5dcca7..77b94eb 100644
30008 --- a/drivers/ata/sata_via.c
30009 +++ b/drivers/ata/sata_via.c
30010 @@ -115,32 +115,32 @@ static struct scsi_host_template svia_sht = {
30011 ATA_BMDMA_SHT(DRV_NAME),
30012 };
30013
30014 -static struct ata_port_operations svia_base_ops = {
30015 +static const struct ata_port_operations svia_base_ops = {
30016 .inherits = &ata_bmdma_port_ops,
30017 .sff_tf_load = svia_tf_load,
30018 };
30019
30020 -static struct ata_port_operations vt6420_sata_ops = {
30021 +static const struct ata_port_operations vt6420_sata_ops = {
30022 .inherits = &svia_base_ops,
30023 .freeze = svia_noop_freeze,
30024 .prereset = vt6420_prereset,
30025 .bmdma_start = vt6420_bmdma_start,
30026 };
30027
30028 -static struct ata_port_operations vt6421_pata_ops = {
30029 +static const struct ata_port_operations vt6421_pata_ops = {
30030 .inherits = &svia_base_ops,
30031 .cable_detect = vt6421_pata_cable_detect,
30032 .set_piomode = vt6421_set_pio_mode,
30033 .set_dmamode = vt6421_set_dma_mode,
30034 };
30035
30036 -static struct ata_port_operations vt6421_sata_ops = {
30037 +static const struct ata_port_operations vt6421_sata_ops = {
30038 .inherits = &svia_base_ops,
30039 .scr_read = svia_scr_read,
30040 .scr_write = svia_scr_write,
30041 };
30042
30043 -static struct ata_port_operations vt8251_ops = {
30044 +static const struct ata_port_operations vt8251_ops = {
30045 .inherits = &svia_base_ops,
30046 .hardreset = sata_std_hardreset,
30047 .scr_read = vt8251_scr_read,
30048 diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
30049 index 8b2a278..51e65d3 100644
30050 --- a/drivers/ata/sata_vsc.c
30051 +++ b/drivers/ata/sata_vsc.c
30052 @@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sata_sht = {
30053 };
30054
30055
30056 -static struct ata_port_operations vsc_sata_ops = {
30057 +static const struct ata_port_operations vsc_sata_ops = {
30058 .inherits = &ata_bmdma_port_ops,
30059 /* The IRQ handling is not quite standard SFF behaviour so we
30060 cannot use the default lost interrupt handler */
30061 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
30062 index 5effec6..7e4019a 100644
30063 --- a/drivers/atm/adummy.c
30064 +++ b/drivers/atm/adummy.c
30065 @@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
30066 vcc->pop(vcc, skb);
30067 else
30068 dev_kfree_skb_any(skb);
30069 - atomic_inc(&vcc->stats->tx);
30070 + atomic_inc_unchecked(&vcc->stats->tx);
30071
30072 return 0;
30073 }
30074 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
30075 index 66e1813..26a27c6 100644
30076 --- a/drivers/atm/ambassador.c
30077 +++ b/drivers/atm/ambassador.c
30078 @@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
30079 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
30080
30081 // VC layer stats
30082 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
30083 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
30084
30085 // free the descriptor
30086 kfree (tx_descr);
30087 @@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
30088 dump_skb ("<<<", vc, skb);
30089
30090 // VC layer stats
30091 - atomic_inc(&atm_vcc->stats->rx);
30092 + atomic_inc_unchecked(&atm_vcc->stats->rx);
30093 __net_timestamp(skb);
30094 // end of our responsability
30095 atm_vcc->push (atm_vcc, skb);
30096 @@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
30097 } else {
30098 PRINTK (KERN_INFO, "dropped over-size frame");
30099 // should we count this?
30100 - atomic_inc(&atm_vcc->stats->rx_drop);
30101 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
30102 }
30103
30104 } else {
30105 @@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
30106 }
30107
30108 if (check_area (skb->data, skb->len)) {
30109 - atomic_inc(&atm_vcc->stats->tx_err);
30110 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
30111 return -ENOMEM; // ?
30112 }
30113
30114 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
30115 index 02ad83d..6daffeb 100644
30116 --- a/drivers/atm/atmtcp.c
30117 +++ b/drivers/atm/atmtcp.c
30118 @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
30119 if (vcc->pop) vcc->pop(vcc,skb);
30120 else dev_kfree_skb(skb);
30121 if (dev_data) return 0;
30122 - atomic_inc(&vcc->stats->tx_err);
30123 + atomic_inc_unchecked(&vcc->stats->tx_err);
30124 return -ENOLINK;
30125 }
30126 size = skb->len+sizeof(struct atmtcp_hdr);
30127 @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
30128 if (!new_skb) {
30129 if (vcc->pop) vcc->pop(vcc,skb);
30130 else dev_kfree_skb(skb);
30131 - atomic_inc(&vcc->stats->tx_err);
30132 + atomic_inc_unchecked(&vcc->stats->tx_err);
30133 return -ENOBUFS;
30134 }
30135 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
30136 @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
30137 if (vcc->pop) vcc->pop(vcc,skb);
30138 else dev_kfree_skb(skb);
30139 out_vcc->push(out_vcc,new_skb);
30140 - atomic_inc(&vcc->stats->tx);
30141 - atomic_inc(&out_vcc->stats->rx);
30142 + atomic_inc_unchecked(&vcc->stats->tx);
30143 + atomic_inc_unchecked(&out_vcc->stats->rx);
30144 return 0;
30145 }
30146
30147 @@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
30148 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
30149 read_unlock(&vcc_sklist_lock);
30150 if (!out_vcc) {
30151 - atomic_inc(&vcc->stats->tx_err);
30152 + atomic_inc_unchecked(&vcc->stats->tx_err);
30153 goto done;
30154 }
30155 skb_pull(skb,sizeof(struct atmtcp_hdr));
30156 @@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
30157 __net_timestamp(new_skb);
30158 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
30159 out_vcc->push(out_vcc,new_skb);
30160 - atomic_inc(&vcc->stats->tx);
30161 - atomic_inc(&out_vcc->stats->rx);
30162 + atomic_inc_unchecked(&vcc->stats->tx);
30163 + atomic_inc_unchecked(&out_vcc->stats->rx);
30164 done:
30165 if (vcc->pop) vcc->pop(vcc,skb);
30166 else dev_kfree_skb(skb);
30167 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
30168 index 0c30261..3da356e 100644
30169 --- a/drivers/atm/eni.c
30170 +++ b/drivers/atm/eni.c
30171 @@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
30172 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
30173 vcc->dev->number);
30174 length = 0;
30175 - atomic_inc(&vcc->stats->rx_err);
30176 + atomic_inc_unchecked(&vcc->stats->rx_err);
30177 }
30178 else {
30179 length = ATM_CELL_SIZE-1; /* no HEC */
30180 @@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
30181 size);
30182 }
30183 eff = length = 0;
30184 - atomic_inc(&vcc->stats->rx_err);
30185 + atomic_inc_unchecked(&vcc->stats->rx_err);
30186 }
30187 else {
30188 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
30189 @@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
30190 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
30191 vcc->dev->number,vcc->vci,length,size << 2,descr);
30192 length = eff = 0;
30193 - atomic_inc(&vcc->stats->rx_err);
30194 + atomic_inc_unchecked(&vcc->stats->rx_err);
30195 }
30196 }
30197 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
30198 @@ -770,7 +770,7 @@ rx_dequeued++;
30199 vcc->push(vcc,skb);
30200 pushed++;
30201 }
30202 - atomic_inc(&vcc->stats->rx);
30203 + atomic_inc_unchecked(&vcc->stats->rx);
30204 }
30205 wake_up(&eni_dev->rx_wait);
30206 }
30207 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
30208 PCI_DMA_TODEVICE);
30209 if (vcc->pop) vcc->pop(vcc,skb);
30210 else dev_kfree_skb_irq(skb);
30211 - atomic_inc(&vcc->stats->tx);
30212 + atomic_inc_unchecked(&vcc->stats->tx);
30213 wake_up(&eni_dev->tx_wait);
30214 dma_complete++;
30215 }
30216 @@ -1570,7 +1570,7 @@ tx_complete++;
30217 /*--------------------------------- entries ---------------------------------*/
30218
30219
30220 -static const char *media_name[] __devinitdata = {
30221 +static const char *media_name[] __devinitconst = {
30222 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
30223 "UTP", "05?", "06?", "07?", /* 4- 7 */
30224 "TAXI","09?", "10?", "11?", /* 8-11 */
30225 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
30226 index cd5049a..a51209f 100644
30227 --- a/drivers/atm/firestream.c
30228 +++ b/drivers/atm/firestream.c
30229 @@ -748,7 +748,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
30230 }
30231 }
30232
30233 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
30234 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
30235
30236 fs_dprintk (FS_DEBUG_TXMEM, "i");
30237 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
30238 @@ -815,7 +815,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
30239 #endif
30240 skb_put (skb, qe->p1 & 0xffff);
30241 ATM_SKB(skb)->vcc = atm_vcc;
30242 - atomic_inc(&atm_vcc->stats->rx);
30243 + atomic_inc_unchecked(&atm_vcc->stats->rx);
30244 __net_timestamp(skb);
30245 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
30246 atm_vcc->push (atm_vcc, skb);
30247 @@ -836,12 +836,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
30248 kfree (pe);
30249 }
30250 if (atm_vcc)
30251 - atomic_inc(&atm_vcc->stats->rx_drop);
30252 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
30253 break;
30254 case 0x1f: /* Reassembly abort: no buffers. */
30255 /* Silently increment error counter. */
30256 if (atm_vcc)
30257 - atomic_inc(&atm_vcc->stats->rx_drop);
30258 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
30259 break;
30260 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
30261 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
30262 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
30263 index f766cc4..a34002e 100644
30264 --- a/drivers/atm/fore200e.c
30265 +++ b/drivers/atm/fore200e.c
30266 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
30267 #endif
30268 /* check error condition */
30269 if (*entry->status & STATUS_ERROR)
30270 - atomic_inc(&vcc->stats->tx_err);
30271 + atomic_inc_unchecked(&vcc->stats->tx_err);
30272 else
30273 - atomic_inc(&vcc->stats->tx);
30274 + atomic_inc_unchecked(&vcc->stats->tx);
30275 }
30276 }
30277
30278 @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
30279 if (skb == NULL) {
30280 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
30281
30282 - atomic_inc(&vcc->stats->rx_drop);
30283 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30284 return -ENOMEM;
30285 }
30286
30287 @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
30288
30289 dev_kfree_skb_any(skb);
30290
30291 - atomic_inc(&vcc->stats->rx_drop);
30292 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30293 return -ENOMEM;
30294 }
30295
30296 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
30297
30298 vcc->push(vcc, skb);
30299 - atomic_inc(&vcc->stats->rx);
30300 + atomic_inc_unchecked(&vcc->stats->rx);
30301
30302 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
30303
30304 @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
30305 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
30306 fore200e->atm_dev->number,
30307 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
30308 - atomic_inc(&vcc->stats->rx_err);
30309 + atomic_inc_unchecked(&vcc->stats->rx_err);
30310 }
30311 }
30312
30313 @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
30314 goto retry_here;
30315 }
30316
30317 - atomic_inc(&vcc->stats->tx_err);
30318 + atomic_inc_unchecked(&vcc->stats->tx_err);
30319
30320 fore200e->tx_sat++;
30321 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
30322 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
30323 index 7066703..2b130de 100644
30324 --- a/drivers/atm/he.c
30325 +++ b/drivers/atm/he.c
30326 @@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
30327
30328 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
30329 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
30330 - atomic_inc(&vcc->stats->rx_drop);
30331 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30332 goto return_host_buffers;
30333 }
30334
30335 @@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
30336 RBRQ_LEN_ERR(he_dev->rbrq_head)
30337 ? "LEN_ERR" : "",
30338 vcc->vpi, vcc->vci);
30339 - atomic_inc(&vcc->stats->rx_err);
30340 + atomic_inc_unchecked(&vcc->stats->rx_err);
30341 goto return_host_buffers;
30342 }
30343
30344 @@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
30345 vcc->push(vcc, skb);
30346 spin_lock(&he_dev->global_lock);
30347
30348 - atomic_inc(&vcc->stats->rx);
30349 + atomic_inc_unchecked(&vcc->stats->rx);
30350
30351 return_host_buffers:
30352 ++pdus_assembled;
30353 @@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
30354 tpd->vcc->pop(tpd->vcc, tpd->skb);
30355 else
30356 dev_kfree_skb_any(tpd->skb);
30357 - atomic_inc(&tpd->vcc->stats->tx_err);
30358 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
30359 }
30360 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
30361 return;
30362 @@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
30363 vcc->pop(vcc, skb);
30364 else
30365 dev_kfree_skb_any(skb);
30366 - atomic_inc(&vcc->stats->tx_err);
30367 + atomic_inc_unchecked(&vcc->stats->tx_err);
30368 return -EINVAL;
30369 }
30370
30371 @@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
30372 vcc->pop(vcc, skb);
30373 else
30374 dev_kfree_skb_any(skb);
30375 - atomic_inc(&vcc->stats->tx_err);
30376 + atomic_inc_unchecked(&vcc->stats->tx_err);
30377 return -EINVAL;
30378 }
30379 #endif
30380 @@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
30381 vcc->pop(vcc, skb);
30382 else
30383 dev_kfree_skb_any(skb);
30384 - atomic_inc(&vcc->stats->tx_err);
30385 + atomic_inc_unchecked(&vcc->stats->tx_err);
30386 spin_unlock_irqrestore(&he_dev->global_lock, flags);
30387 return -ENOMEM;
30388 }
30389 @@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
30390 vcc->pop(vcc, skb);
30391 else
30392 dev_kfree_skb_any(skb);
30393 - atomic_inc(&vcc->stats->tx_err);
30394 + atomic_inc_unchecked(&vcc->stats->tx_err);
30395 spin_unlock_irqrestore(&he_dev->global_lock, flags);
30396 return -ENOMEM;
30397 }
30398 @@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
30399 __enqueue_tpd(he_dev, tpd, cid);
30400 spin_unlock_irqrestore(&he_dev->global_lock, flags);
30401
30402 - atomic_inc(&vcc->stats->tx);
30403 + atomic_inc_unchecked(&vcc->stats->tx);
30404
30405 return 0;
30406 }
30407 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
30408 index 4e49021..01b1512 100644
30409 --- a/drivers/atm/horizon.c
30410 +++ b/drivers/atm/horizon.c
30411 @@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
30412 {
30413 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
30414 // VC layer stats
30415 - atomic_inc(&vcc->stats->rx);
30416 + atomic_inc_unchecked(&vcc->stats->rx);
30417 __net_timestamp(skb);
30418 // end of our responsability
30419 vcc->push (vcc, skb);
30420 @@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
30421 dev->tx_iovec = NULL;
30422
30423 // VC layer stats
30424 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
30425 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
30426
30427 // free the skb
30428 hrz_kfree_skb (skb);
30429 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
30430 index e33ae00..9deb4ab 100644
30431 --- a/drivers/atm/idt77252.c
30432 +++ b/drivers/atm/idt77252.c
30433 @@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
30434 else
30435 dev_kfree_skb(skb);
30436
30437 - atomic_inc(&vcc->stats->tx);
30438 + atomic_inc_unchecked(&vcc->stats->tx);
30439 }
30440
30441 atomic_dec(&scq->used);
30442 @@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30443 if ((sb = dev_alloc_skb(64)) == NULL) {
30444 printk("%s: Can't allocate buffers for aal0.\n",
30445 card->name);
30446 - atomic_add(i, &vcc->stats->rx_drop);
30447 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
30448 break;
30449 }
30450 if (!atm_charge(vcc, sb->truesize)) {
30451 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
30452 card->name);
30453 - atomic_add(i - 1, &vcc->stats->rx_drop);
30454 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
30455 dev_kfree_skb(sb);
30456 break;
30457 }
30458 @@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30459 ATM_SKB(sb)->vcc = vcc;
30460 __net_timestamp(sb);
30461 vcc->push(vcc, sb);
30462 - atomic_inc(&vcc->stats->rx);
30463 + atomic_inc_unchecked(&vcc->stats->rx);
30464
30465 cell += ATM_CELL_PAYLOAD;
30466 }
30467 @@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30468 "(CDC: %08x)\n",
30469 card->name, len, rpp->len, readl(SAR_REG_CDC));
30470 recycle_rx_pool_skb(card, rpp);
30471 - atomic_inc(&vcc->stats->rx_err);
30472 + atomic_inc_unchecked(&vcc->stats->rx_err);
30473 return;
30474 }
30475 if (stat & SAR_RSQE_CRC) {
30476 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
30477 recycle_rx_pool_skb(card, rpp);
30478 - atomic_inc(&vcc->stats->rx_err);
30479 + atomic_inc_unchecked(&vcc->stats->rx_err);
30480 return;
30481 }
30482 if (skb_queue_len(&rpp->queue) > 1) {
30483 @@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30484 RXPRINTK("%s: Can't alloc RX skb.\n",
30485 card->name);
30486 recycle_rx_pool_skb(card, rpp);
30487 - atomic_inc(&vcc->stats->rx_err);
30488 + atomic_inc_unchecked(&vcc->stats->rx_err);
30489 return;
30490 }
30491 if (!atm_charge(vcc, skb->truesize)) {
30492 @@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30493 __net_timestamp(skb);
30494
30495 vcc->push(vcc, skb);
30496 - atomic_inc(&vcc->stats->rx);
30497 + atomic_inc_unchecked(&vcc->stats->rx);
30498
30499 return;
30500 }
30501 @@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30502 __net_timestamp(skb);
30503
30504 vcc->push(vcc, skb);
30505 - atomic_inc(&vcc->stats->rx);
30506 + atomic_inc_unchecked(&vcc->stats->rx);
30507
30508 if (skb->truesize > SAR_FB_SIZE_3)
30509 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
30510 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
30511 if (vcc->qos.aal != ATM_AAL0) {
30512 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
30513 card->name, vpi, vci);
30514 - atomic_inc(&vcc->stats->rx_drop);
30515 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30516 goto drop;
30517 }
30518
30519 if ((sb = dev_alloc_skb(64)) == NULL) {
30520 printk("%s: Can't allocate buffers for AAL0.\n",
30521 card->name);
30522 - atomic_inc(&vcc->stats->rx_err);
30523 + atomic_inc_unchecked(&vcc->stats->rx_err);
30524 goto drop;
30525 }
30526
30527 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
30528 ATM_SKB(sb)->vcc = vcc;
30529 __net_timestamp(sb);
30530 vcc->push(vcc, sb);
30531 - atomic_inc(&vcc->stats->rx);
30532 + atomic_inc_unchecked(&vcc->stats->rx);
30533
30534 drop:
30535 skb_pull(queue, 64);
30536 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
30537
30538 if (vc == NULL) {
30539 printk("%s: NULL connection in send().\n", card->name);
30540 - atomic_inc(&vcc->stats->tx_err);
30541 + atomic_inc_unchecked(&vcc->stats->tx_err);
30542 dev_kfree_skb(skb);
30543 return -EINVAL;
30544 }
30545 if (!test_bit(VCF_TX, &vc->flags)) {
30546 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
30547 - atomic_inc(&vcc->stats->tx_err);
30548 + atomic_inc_unchecked(&vcc->stats->tx_err);
30549 dev_kfree_skb(skb);
30550 return -EINVAL;
30551 }
30552 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
30553 break;
30554 default:
30555 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
30556 - atomic_inc(&vcc->stats->tx_err);
30557 + atomic_inc_unchecked(&vcc->stats->tx_err);
30558 dev_kfree_skb(skb);
30559 return -EINVAL;
30560 }
30561
30562 if (skb_shinfo(skb)->nr_frags != 0) {
30563 printk("%s: No scatter-gather yet.\n", card->name);
30564 - atomic_inc(&vcc->stats->tx_err);
30565 + atomic_inc_unchecked(&vcc->stats->tx_err);
30566 dev_kfree_skb(skb);
30567 return -EINVAL;
30568 }
30569 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
30570
30571 err = queue_skb(card, vc, skb, oam);
30572 if (err) {
30573 - atomic_inc(&vcc->stats->tx_err);
30574 + atomic_inc_unchecked(&vcc->stats->tx_err);
30575 dev_kfree_skb(skb);
30576 return err;
30577 }
30578 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
30579 skb = dev_alloc_skb(64);
30580 if (!skb) {
30581 printk("%s: Out of memory in send_oam().\n", card->name);
30582 - atomic_inc(&vcc->stats->tx_err);
30583 + atomic_inc_unchecked(&vcc->stats->tx_err);
30584 return -ENOMEM;
30585 }
30586 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
30587 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
30588 index b2c1b37..faa672b 100644
30589 --- a/drivers/atm/iphase.c
30590 +++ b/drivers/atm/iphase.c
30591 @@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
30592 status = (u_short) (buf_desc_ptr->desc_mode);
30593 if (status & (RX_CER | RX_PTE | RX_OFL))
30594 {
30595 - atomic_inc(&vcc->stats->rx_err);
30596 + atomic_inc_unchecked(&vcc->stats->rx_err);
30597 IF_ERR(printk("IA: bad packet, dropping it");)
30598 if (status & RX_CER) {
30599 IF_ERR(printk(" cause: packet CRC error\n");)
30600 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
30601 len = dma_addr - buf_addr;
30602 if (len > iadev->rx_buf_sz) {
30603 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
30604 - atomic_inc(&vcc->stats->rx_err);
30605 + atomic_inc_unchecked(&vcc->stats->rx_err);
30606 goto out_free_desc;
30607 }
30608
30609 @@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30610 ia_vcc = INPH_IA_VCC(vcc);
30611 if (ia_vcc == NULL)
30612 {
30613 - atomic_inc(&vcc->stats->rx_err);
30614 + atomic_inc_unchecked(&vcc->stats->rx_err);
30615 dev_kfree_skb_any(skb);
30616 atm_return(vcc, atm_guess_pdu2truesize(len));
30617 goto INCR_DLE;
30618 @@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30619 if ((length > iadev->rx_buf_sz) || (length >
30620 (skb->len - sizeof(struct cpcs_trailer))))
30621 {
30622 - atomic_inc(&vcc->stats->rx_err);
30623 + atomic_inc_unchecked(&vcc->stats->rx_err);
30624 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
30625 length, skb->len);)
30626 dev_kfree_skb_any(skb);
30627 @@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30628
30629 IF_RX(printk("rx_dle_intr: skb push");)
30630 vcc->push(vcc,skb);
30631 - atomic_inc(&vcc->stats->rx);
30632 + atomic_inc_unchecked(&vcc->stats->rx);
30633 iadev->rx_pkt_cnt++;
30634 }
30635 INCR_DLE:
30636 @@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
30637 {
30638 struct k_sonet_stats *stats;
30639 stats = &PRIV(_ia_dev[board])->sonet_stats;
30640 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
30641 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
30642 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
30643 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
30644 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
30645 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
30646 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
30647 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
30648 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
30649 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
30650 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
30651 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
30652 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
30653 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
30654 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
30655 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
30656 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
30657 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
30658 }
30659 ia_cmds.status = 0;
30660 break;
30661 @@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
30662 if ((desc == 0) || (desc > iadev->num_tx_desc))
30663 {
30664 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
30665 - atomic_inc(&vcc->stats->tx);
30666 + atomic_inc_unchecked(&vcc->stats->tx);
30667 if (vcc->pop)
30668 vcc->pop(vcc, skb);
30669 else
30670 @@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
30671 ATM_DESC(skb) = vcc->vci;
30672 skb_queue_tail(&iadev->tx_dma_q, skb);
30673
30674 - atomic_inc(&vcc->stats->tx);
30675 + atomic_inc_unchecked(&vcc->stats->tx);
30676 iadev->tx_pkt_cnt++;
30677 /* Increment transaction counter */
30678 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
30679
30680 #if 0
30681 /* add flow control logic */
30682 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
30683 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
30684 if (iavcc->vc_desc_cnt > 10) {
30685 vcc->tx_quota = vcc->tx_quota * 3 / 4;
30686 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
30687 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
30688 index cf97c34..8d30655 100644
30689 --- a/drivers/atm/lanai.c
30690 +++ b/drivers/atm/lanai.c
30691 @@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
30692 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
30693 lanai_endtx(lanai, lvcc);
30694 lanai_free_skb(lvcc->tx.atmvcc, skb);
30695 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
30696 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
30697 }
30698
30699 /* Try to fill the buffer - don't call unless there is backlog */
30700 @@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
30701 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
30702 __net_timestamp(skb);
30703 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
30704 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
30705 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
30706 out:
30707 lvcc->rx.buf.ptr = end;
30708 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
30709 @@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30710 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
30711 "vcc %d\n", lanai->number, (unsigned int) s, vci);
30712 lanai->stats.service_rxnotaal5++;
30713 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30714 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30715 return 0;
30716 }
30717 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
30718 @@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30719 int bytes;
30720 read_unlock(&vcc_sklist_lock);
30721 DPRINTK("got trashed rx pdu on vci %d\n", vci);
30722 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30723 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30724 lvcc->stats.x.aal5.service_trash++;
30725 bytes = (SERVICE_GET_END(s) * 16) -
30726 (((unsigned long) lvcc->rx.buf.ptr) -
30727 @@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30728 }
30729 if (s & SERVICE_STREAM) {
30730 read_unlock(&vcc_sklist_lock);
30731 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30732 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30733 lvcc->stats.x.aal5.service_stream++;
30734 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
30735 "PDU on VCI %d!\n", lanai->number, vci);
30736 @@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30737 return 0;
30738 }
30739 DPRINTK("got rx crc error on vci %d\n", vci);
30740 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30741 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30742 lvcc->stats.x.aal5.service_rxcrc++;
30743 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
30744 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
30745 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
30746 index 3da804b..d3b0eed 100644
30747 --- a/drivers/atm/nicstar.c
30748 +++ b/drivers/atm/nicstar.c
30749 @@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30750 if ((vc = (vc_map *) vcc->dev_data) == NULL)
30751 {
30752 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
30753 - atomic_inc(&vcc->stats->tx_err);
30754 + atomic_inc_unchecked(&vcc->stats->tx_err);
30755 dev_kfree_skb_any(skb);
30756 return -EINVAL;
30757 }
30758 @@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30759 if (!vc->tx)
30760 {
30761 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
30762 - atomic_inc(&vcc->stats->tx_err);
30763 + atomic_inc_unchecked(&vcc->stats->tx_err);
30764 dev_kfree_skb_any(skb);
30765 return -EINVAL;
30766 }
30767 @@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30768 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
30769 {
30770 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
30771 - atomic_inc(&vcc->stats->tx_err);
30772 + atomic_inc_unchecked(&vcc->stats->tx_err);
30773 dev_kfree_skb_any(skb);
30774 return -EINVAL;
30775 }
30776 @@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30777 if (skb_shinfo(skb)->nr_frags != 0)
30778 {
30779 printk("nicstar%d: No scatter-gather yet.\n", card->index);
30780 - atomic_inc(&vcc->stats->tx_err);
30781 + atomic_inc_unchecked(&vcc->stats->tx_err);
30782 dev_kfree_skb_any(skb);
30783 return -EINVAL;
30784 }
30785 @@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30786
30787 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
30788 {
30789 - atomic_inc(&vcc->stats->tx_err);
30790 + atomic_inc_unchecked(&vcc->stats->tx_err);
30791 dev_kfree_skb_any(skb);
30792 return -EIO;
30793 }
30794 - atomic_inc(&vcc->stats->tx);
30795 + atomic_inc_unchecked(&vcc->stats->tx);
30796
30797 return 0;
30798 }
30799 @@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30800 {
30801 printk("nicstar%d: Can't allocate buffers for aal0.\n",
30802 card->index);
30803 - atomic_add(i,&vcc->stats->rx_drop);
30804 + atomic_add_unchecked(i,&vcc->stats->rx_drop);
30805 break;
30806 }
30807 if (!atm_charge(vcc, sb->truesize))
30808 {
30809 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
30810 card->index);
30811 - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
30812 + atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
30813 dev_kfree_skb_any(sb);
30814 break;
30815 }
30816 @@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30817 ATM_SKB(sb)->vcc = vcc;
30818 __net_timestamp(sb);
30819 vcc->push(vcc, sb);
30820 - atomic_inc(&vcc->stats->rx);
30821 + atomic_inc_unchecked(&vcc->stats->rx);
30822 cell += ATM_CELL_PAYLOAD;
30823 }
30824
30825 @@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30826 if (iovb == NULL)
30827 {
30828 printk("nicstar%d: Out of iovec buffers.\n", card->index);
30829 - atomic_inc(&vcc->stats->rx_drop);
30830 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30831 recycle_rx_buf(card, skb);
30832 return;
30833 }
30834 @@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30835 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
30836 {
30837 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
30838 - atomic_inc(&vcc->stats->rx_err);
30839 + atomic_inc_unchecked(&vcc->stats->rx_err);
30840 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
30841 NS_SKB(iovb)->iovcnt = 0;
30842 iovb->len = 0;
30843 @@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30844 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
30845 card->index);
30846 which_list(card, skb);
30847 - atomic_inc(&vcc->stats->rx_err);
30848 + atomic_inc_unchecked(&vcc->stats->rx_err);
30849 recycle_rx_buf(card, skb);
30850 vc->rx_iov = NULL;
30851 recycle_iov_buf(card, iovb);
30852 @@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30853 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
30854 card->index);
30855 which_list(card, skb);
30856 - atomic_inc(&vcc->stats->rx_err);
30857 + atomic_inc_unchecked(&vcc->stats->rx_err);
30858 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30859 NS_SKB(iovb)->iovcnt);
30860 vc->rx_iov = NULL;
30861 @@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30862 printk(" - PDU size mismatch.\n");
30863 else
30864 printk(".\n");
30865 - atomic_inc(&vcc->stats->rx_err);
30866 + atomic_inc_unchecked(&vcc->stats->rx_err);
30867 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30868 NS_SKB(iovb)->iovcnt);
30869 vc->rx_iov = NULL;
30870 @@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30871 if (!atm_charge(vcc, skb->truesize))
30872 {
30873 push_rxbufs(card, skb);
30874 - atomic_inc(&vcc->stats->rx_drop);
30875 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30876 }
30877 else
30878 {
30879 @@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30880 ATM_SKB(skb)->vcc = vcc;
30881 __net_timestamp(skb);
30882 vcc->push(vcc, skb);
30883 - atomic_inc(&vcc->stats->rx);
30884 + atomic_inc_unchecked(&vcc->stats->rx);
30885 }
30886 }
30887 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
30888 @@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30889 if (!atm_charge(vcc, sb->truesize))
30890 {
30891 push_rxbufs(card, sb);
30892 - atomic_inc(&vcc->stats->rx_drop);
30893 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30894 }
30895 else
30896 {
30897 @@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30898 ATM_SKB(sb)->vcc = vcc;
30899 __net_timestamp(sb);
30900 vcc->push(vcc, sb);
30901 - atomic_inc(&vcc->stats->rx);
30902 + atomic_inc_unchecked(&vcc->stats->rx);
30903 }
30904
30905 push_rxbufs(card, skb);
30906 @@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30907 if (!atm_charge(vcc, skb->truesize))
30908 {
30909 push_rxbufs(card, skb);
30910 - atomic_inc(&vcc->stats->rx_drop);
30911 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30912 }
30913 else
30914 {
30915 @@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30916 ATM_SKB(skb)->vcc = vcc;
30917 __net_timestamp(skb);
30918 vcc->push(vcc, skb);
30919 - atomic_inc(&vcc->stats->rx);
30920 + atomic_inc_unchecked(&vcc->stats->rx);
30921 }
30922
30923 push_rxbufs(card, sb);
30924 @@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30925 if (hb == NULL)
30926 {
30927 printk("nicstar%d: Out of huge buffers.\n", card->index);
30928 - atomic_inc(&vcc->stats->rx_drop);
30929 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30930 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30931 NS_SKB(iovb)->iovcnt);
30932 vc->rx_iov = NULL;
30933 @@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30934 }
30935 else
30936 dev_kfree_skb_any(hb);
30937 - atomic_inc(&vcc->stats->rx_drop);
30938 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30939 }
30940 else
30941 {
30942 @@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30943 #endif /* NS_USE_DESTRUCTORS */
30944 __net_timestamp(hb);
30945 vcc->push(vcc, hb);
30946 - atomic_inc(&vcc->stats->rx);
30947 + atomic_inc_unchecked(&vcc->stats->rx);
30948 }
30949 }
30950
30951 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
30952 index 84c93ff..e6ed269 100644
30953 --- a/drivers/atm/solos-pci.c
30954 +++ b/drivers/atm/solos-pci.c
30955 @@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
30956 }
30957 atm_charge(vcc, skb->truesize);
30958 vcc->push(vcc, skb);
30959 - atomic_inc(&vcc->stats->rx);
30960 + atomic_inc_unchecked(&vcc->stats->rx);
30961 break;
30962
30963 case PKT_STATUS:
30964 @@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *buf)
30965 char msg[500];
30966 char item[10];
30967
30968 + pax_track_stack();
30969 +
30970 len = buf->len;
30971 for (i = 0; i < len; i++){
30972 if(i % 8 == 0)
30973 @@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_card *card)
30974 vcc = SKB_CB(oldskb)->vcc;
30975
30976 if (vcc) {
30977 - atomic_inc(&vcc->stats->tx);
30978 + atomic_inc_unchecked(&vcc->stats->tx);
30979 solos_pop(vcc, oldskb);
30980 } else
30981 dev_kfree_skb_irq(oldskb);
30982 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
30983 index 6dd3f59..ee377f3 100644
30984 --- a/drivers/atm/suni.c
30985 +++ b/drivers/atm/suni.c
30986 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
30987
30988
30989 #define ADD_LIMITED(s,v) \
30990 - atomic_add((v),&stats->s); \
30991 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
30992 + atomic_add_unchecked((v),&stats->s); \
30993 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
30994
30995
30996 static void suni_hz(unsigned long from_timer)
30997 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
30998 index fc8cb07..4a80e53 100644
30999 --- a/drivers/atm/uPD98402.c
31000 +++ b/drivers/atm/uPD98402.c
31001 @@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
31002 struct sonet_stats tmp;
31003 int error = 0;
31004
31005 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
31006 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
31007 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
31008 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
31009 if (zero && !error) {
31010 @@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
31011
31012
31013 #define ADD_LIMITED(s,v) \
31014 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
31015 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
31016 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
31017 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
31018 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
31019 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
31020
31021
31022 static void stat_event(struct atm_dev *dev)
31023 @@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev *dev)
31024 if (reason & uPD98402_INT_PFM) stat_event(dev);
31025 if (reason & uPD98402_INT_PCO) {
31026 (void) GET(PCOCR); /* clear interrupt cause */
31027 - atomic_add(GET(HECCT),
31028 + atomic_add_unchecked(GET(HECCT),
31029 &PRIV(dev)->sonet_stats.uncorr_hcs);
31030 }
31031 if ((reason & uPD98402_INT_RFO) &&
31032 @@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev *dev)
31033 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
31034 uPD98402_INT_LOS),PIMR); /* enable them */
31035 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
31036 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
31037 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
31038 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
31039 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
31040 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
31041 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
31042 return 0;
31043 }
31044
31045 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
31046 index 2e9635b..32927b4 100644
31047 --- a/drivers/atm/zatm.c
31048 +++ b/drivers/atm/zatm.c
31049 @@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
31050 }
31051 if (!size) {
31052 dev_kfree_skb_irq(skb);
31053 - if (vcc) atomic_inc(&vcc->stats->rx_err);
31054 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
31055 continue;
31056 }
31057 if (!atm_charge(vcc,skb->truesize)) {
31058 @@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
31059 skb->len = size;
31060 ATM_SKB(skb)->vcc = vcc;
31061 vcc->push(vcc,skb);
31062 - atomic_inc(&vcc->stats->rx);
31063 + atomic_inc_unchecked(&vcc->stats->rx);
31064 }
31065 zout(pos & 0xffff,MTA(mbx));
31066 #if 0 /* probably a stupid idea */
31067 @@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
31068 skb_queue_head(&zatm_vcc->backlog,skb);
31069 break;
31070 }
31071 - atomic_inc(&vcc->stats->tx);
31072 + atomic_inc_unchecked(&vcc->stats->tx);
31073 wake_up(&zatm_vcc->tx_wait);
31074 }
31075
31076 diff --git a/drivers/base/bus.c b/drivers/base/bus.c
31077 index 63c143e..fece183 100644
31078 --- a/drivers/base/bus.c
31079 +++ b/drivers/base/bus.c
31080 @@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
31081 return ret;
31082 }
31083
31084 -static struct sysfs_ops driver_sysfs_ops = {
31085 +static const struct sysfs_ops driver_sysfs_ops = {
31086 .show = drv_attr_show,
31087 .store = drv_attr_store,
31088 };
31089 @@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
31090 return ret;
31091 }
31092
31093 -static struct sysfs_ops bus_sysfs_ops = {
31094 +static const struct sysfs_ops bus_sysfs_ops = {
31095 .show = bus_attr_show,
31096 .store = bus_attr_store,
31097 };
31098 @@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
31099 return 0;
31100 }
31101
31102 -static struct kset_uevent_ops bus_uevent_ops = {
31103 +static const struct kset_uevent_ops bus_uevent_ops = {
31104 .filter = bus_uevent_filter,
31105 };
31106
31107 diff --git a/drivers/base/class.c b/drivers/base/class.c
31108 index 6e2c3b0..cb61871 100644
31109 --- a/drivers/base/class.c
31110 +++ b/drivers/base/class.c
31111 @@ -63,7 +63,7 @@ static void class_release(struct kobject *kobj)
31112 kfree(cp);
31113 }
31114
31115 -static struct sysfs_ops class_sysfs_ops = {
31116 +static const struct sysfs_ops class_sysfs_ops = {
31117 .show = class_attr_show,
31118 .store = class_attr_store,
31119 };
31120 diff --git a/drivers/base/core.c b/drivers/base/core.c
31121 index f33d768..a9358d0 100644
31122 --- a/drivers/base/core.c
31123 +++ b/drivers/base/core.c
31124 @@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
31125 return ret;
31126 }
31127
31128 -static struct sysfs_ops dev_sysfs_ops = {
31129 +static const struct sysfs_ops dev_sysfs_ops = {
31130 .show = dev_attr_show,
31131 .store = dev_attr_store,
31132 };
31133 @@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
31134 return retval;
31135 }
31136
31137 -static struct kset_uevent_ops device_uevent_ops = {
31138 +static const struct kset_uevent_ops device_uevent_ops = {
31139 .filter = dev_uevent_filter,
31140 .name = dev_uevent_name,
31141 .uevent = dev_uevent,
31142 diff --git a/drivers/base/memory.c b/drivers/base/memory.c
31143 index 989429c..2272b00 100644
31144 --- a/drivers/base/memory.c
31145 +++ b/drivers/base/memory.c
31146 @@ -44,7 +44,7 @@ static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uev
31147 return retval;
31148 }
31149
31150 -static struct kset_uevent_ops memory_uevent_ops = {
31151 +static const struct kset_uevent_ops memory_uevent_ops = {
31152 .name = memory_uevent_name,
31153 .uevent = memory_uevent,
31154 };
31155 diff --git a/drivers/base/sys.c b/drivers/base/sys.c
31156 index 3f202f7..61c4a6f 100644
31157 --- a/drivers/base/sys.c
31158 +++ b/drivers/base/sys.c
31159 @@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struct attribute *attr,
31160 return -EIO;
31161 }
31162
31163 -static struct sysfs_ops sysfs_ops = {
31164 +static const struct sysfs_ops sysfs_ops = {
31165 .show = sysdev_show,
31166 .store = sysdev_store,
31167 };
31168 @@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct kobject *kobj, struct attribute *attr,
31169 return -EIO;
31170 }
31171
31172 -static struct sysfs_ops sysfs_class_ops = {
31173 +static const struct sysfs_ops sysfs_class_ops = {
31174 .show = sysdev_class_show,
31175 .store = sysdev_class_store,
31176 };
31177 diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
31178 index eb4fa19..1954777 100644
31179 --- a/drivers/block/DAC960.c
31180 +++ b/drivers/block/DAC960.c
31181 @@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
31182 unsigned long flags;
31183 int Channel, TargetID;
31184
31185 + pax_track_stack();
31186 +
31187 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
31188 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
31189 sizeof(DAC960_SCSI_Inquiry_T) +
31190 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
31191 index 68b90d9..7e2e3f3 100644
31192 --- a/drivers/block/cciss.c
31193 +++ b/drivers/block/cciss.c
31194 @@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
31195 int err;
31196 u32 cp;
31197
31198 + memset(&arg64, 0, sizeof(arg64));
31199 +
31200 err = 0;
31201 err |=
31202 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
31203 @@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ctlr)
31204 /* Wait (up to 20 seconds) for a command to complete */
31205
31206 for (i = 20 * HZ; i > 0; i--) {
31207 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
31208 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
31209 if (done == FIFO_EMPTY)
31210 schedule_timeout_uninterruptible(1);
31211 else
31212 @@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
31213 resend_cmd1:
31214
31215 /* Disable interrupt on the board. */
31216 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
31217 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
31218
31219 /* Make sure there is room in the command FIFO */
31220 /* Actually it should be completely empty at this time */
31221 @@ -2884,13 +2886,13 @@ resend_cmd1:
31222 /* tape side of the driver. */
31223 for (i = 200000; i > 0; i--) {
31224 /* if fifo isn't full go */
31225 - if (!(h->access.fifo_full(h)))
31226 + if (!(h->access->fifo_full(h)))
31227 break;
31228 udelay(10);
31229 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
31230 " waiting!\n", h->ctlr);
31231 }
31232 - h->access.submit_command(h, c); /* Send the cmd */
31233 + h->access->submit_command(h, c); /* Send the cmd */
31234 do {
31235 complete = pollcomplete(h->ctlr);
31236
31237 @@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
31238 while (!hlist_empty(&h->reqQ)) {
31239 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
31240 /* can't do anything if fifo is full */
31241 - if ((h->access.fifo_full(h))) {
31242 + if ((h->access->fifo_full(h))) {
31243 printk(KERN_WARNING "cciss: fifo full\n");
31244 break;
31245 }
31246 @@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
31247 h->Qdepth--;
31248
31249 /* Tell the controller execute command */
31250 - h->access.submit_command(h, c);
31251 + h->access->submit_command(h, c);
31252
31253 /* Put job onto the completed Q */
31254 addQ(&h->cmpQ, c);
31255 @@ -3393,17 +3395,17 @@ startio:
31256
31257 static inline unsigned long get_next_completion(ctlr_info_t *h)
31258 {
31259 - return h->access.command_completed(h);
31260 + return h->access->command_completed(h);
31261 }
31262
31263 static inline int interrupt_pending(ctlr_info_t *h)
31264 {
31265 - return h->access.intr_pending(h);
31266 + return h->access->intr_pending(h);
31267 }
31268
31269 static inline long interrupt_not_for_us(ctlr_info_t *h)
31270 {
31271 - return (((h->access.intr_pending(h) == 0) ||
31272 + return (((h->access->intr_pending(h) == 0) ||
31273 (h->interrupts_enabled == 0)));
31274 }
31275
31276 @@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
31277 */
31278 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
31279 c->product_name = products[prod_index].product_name;
31280 - c->access = *(products[prod_index].access);
31281 + c->access = products[prod_index].access;
31282 c->nr_cmds = c->max_commands - 4;
31283 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
31284 (readb(&c->cfgtable->Signature[1]) != 'I') ||
31285 @@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
31286 }
31287
31288 /* make sure the board interrupts are off */
31289 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
31290 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
31291 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
31292 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
31293 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
31294 @@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
31295 cciss_scsi_setup(i);
31296
31297 /* Turn the interrupts on so we can service requests */
31298 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
31299 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
31300
31301 /* Get the firmware version */
31302 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
31303 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
31304 index 04d6bf8..36e712d 100644
31305 --- a/drivers/block/cciss.h
31306 +++ b/drivers/block/cciss.h
31307 @@ -90,7 +90,7 @@ struct ctlr_info
31308 // information about each logical volume
31309 drive_info_struct *drv[CISS_MAX_LUN];
31310
31311 - struct access_method access;
31312 + struct access_method *access;
31313
31314 /* queue and queue Info */
31315 struct hlist_head reqQ;
31316 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
31317 index 6422651..bb1bdef 100644
31318 --- a/drivers/block/cpqarray.c
31319 +++ b/drivers/block/cpqarray.c
31320 @@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
31321 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
31322 goto Enomem4;
31323 }
31324 - hba[i]->access.set_intr_mask(hba[i], 0);
31325 + hba[i]->access->set_intr_mask(hba[i], 0);
31326 if (request_irq(hba[i]->intr, do_ida_intr,
31327 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
31328 {
31329 @@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
31330 add_timer(&hba[i]->timer);
31331
31332 /* Enable IRQ now that spinlock and rate limit timer are set up */
31333 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
31334 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
31335
31336 for(j=0; j<NWD; j++) {
31337 struct gendisk *disk = ida_gendisk[i][j];
31338 @@ -695,7 +695,7 @@ DBGINFO(
31339 for(i=0; i<NR_PRODUCTS; i++) {
31340 if (board_id == products[i].board_id) {
31341 c->product_name = products[i].product_name;
31342 - c->access = *(products[i].access);
31343 + c->access = products[i].access;
31344 break;
31345 }
31346 }
31347 @@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(void)
31348 hba[ctlr]->intr = intr;
31349 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
31350 hba[ctlr]->product_name = products[j].product_name;
31351 - hba[ctlr]->access = *(products[j].access);
31352 + hba[ctlr]->access = products[j].access;
31353 hba[ctlr]->ctlr = ctlr;
31354 hba[ctlr]->board_id = board_id;
31355 hba[ctlr]->pci_dev = NULL; /* not PCI */
31356 @@ -896,6 +896,8 @@ static void do_ida_request(struct request_queue *q)
31357 struct scatterlist tmp_sg[SG_MAX];
31358 int i, dir, seg;
31359
31360 + pax_track_stack();
31361 +
31362 if (blk_queue_plugged(q))
31363 goto startio;
31364
31365 @@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
31366
31367 while((c = h->reqQ) != NULL) {
31368 /* Can't do anything if we're busy */
31369 - if (h->access.fifo_full(h) == 0)
31370 + if (h->access->fifo_full(h) == 0)
31371 return;
31372
31373 /* Get the first entry from the request Q */
31374 @@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
31375 h->Qdepth--;
31376
31377 /* Tell the controller to do our bidding */
31378 - h->access.submit_command(h, c);
31379 + h->access->submit_command(h, c);
31380
31381 /* Get onto the completion Q */
31382 addQ(&h->cmpQ, c);
31383 @@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
31384 unsigned long flags;
31385 __u32 a,a1;
31386
31387 - istat = h->access.intr_pending(h);
31388 + istat = h->access->intr_pending(h);
31389 /* Is this interrupt for us? */
31390 if (istat == 0)
31391 return IRQ_NONE;
31392 @@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
31393 */
31394 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
31395 if (istat & FIFO_NOT_EMPTY) {
31396 - while((a = h->access.command_completed(h))) {
31397 + while((a = h->access->command_completed(h))) {
31398 a1 = a; a &= ~3;
31399 if ((c = h->cmpQ) == NULL)
31400 {
31401 @@ -1434,11 +1436,11 @@ static int sendcmd(
31402 /*
31403 * Disable interrupt
31404 */
31405 - info_p->access.set_intr_mask(info_p, 0);
31406 + info_p->access->set_intr_mask(info_p, 0);
31407 /* Make sure there is room in the command FIFO */
31408 /* Actually it should be completely empty at this time. */
31409 for (i = 200000; i > 0; i--) {
31410 - temp = info_p->access.fifo_full(info_p);
31411 + temp = info_p->access->fifo_full(info_p);
31412 if (temp != 0) {
31413 break;
31414 }
31415 @@ -1451,7 +1453,7 @@ DBG(
31416 /*
31417 * Send the cmd
31418 */
31419 - info_p->access.submit_command(info_p, c);
31420 + info_p->access->submit_command(info_p, c);
31421 complete = pollcomplete(ctlr);
31422
31423 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
31424 @@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t *host)
31425 * we check the new geometry. Then turn interrupts back on when
31426 * we're done.
31427 */
31428 - host->access.set_intr_mask(host, 0);
31429 + host->access->set_intr_mask(host, 0);
31430 getgeometry(ctlr);
31431 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
31432 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
31433
31434 for(i=0; i<NWD; i++) {
31435 struct gendisk *disk = ida_gendisk[ctlr][i];
31436 @@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
31437 /* Wait (up to 2 seconds) for a command to complete */
31438
31439 for (i = 200000; i > 0; i--) {
31440 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
31441 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
31442 if (done == 0) {
31443 udelay(10); /* a short fixed delay */
31444 } else
31445 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
31446 index be73e9d..7fbf140 100644
31447 --- a/drivers/block/cpqarray.h
31448 +++ b/drivers/block/cpqarray.h
31449 @@ -99,7 +99,7 @@ struct ctlr_info {
31450 drv_info_t drv[NWD];
31451 struct proc_dir_entry *proc;
31452
31453 - struct access_method access;
31454 + struct access_method *access;
31455
31456 cmdlist_t *reqQ;
31457 cmdlist_t *cmpQ;
31458 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
31459 index 8ec2d70..2804b30 100644
31460 --- a/drivers/block/loop.c
31461 +++ b/drivers/block/loop.c
31462 @@ -282,7 +282,7 @@ static int __do_lo_send_write(struct file *file,
31463 mm_segment_t old_fs = get_fs();
31464
31465 set_fs(get_ds());
31466 - bw = file->f_op->write(file, buf, len, &pos);
31467 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
31468 set_fs(old_fs);
31469 if (likely(bw == len))
31470 return 0;
31471 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
31472 index 26ada47..083c480 100644
31473 --- a/drivers/block/nbd.c
31474 +++ b/drivers/block/nbd.c
31475 @@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
31476 struct kvec iov;
31477 sigset_t blocked, oldset;
31478
31479 + pax_track_stack();
31480 +
31481 if (unlikely(!sock)) {
31482 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
31483 lo->disk->disk_name, (send ? "send" : "recv"));
31484 @@ -569,6 +571,8 @@ static void do_nbd_request(struct request_queue *q)
31485 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
31486 unsigned int cmd, unsigned long arg)
31487 {
31488 + pax_track_stack();
31489 +
31490 switch (cmd) {
31491 case NBD_DISCONNECT: {
31492 struct request sreq;
31493 diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
31494 index a5d585d..d087be3 100644
31495 --- a/drivers/block/pktcdvd.c
31496 +++ b/drivers/block/pktcdvd.c
31497 @@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kobject *kobj,
31498 return len;
31499 }
31500
31501 -static struct sysfs_ops kobj_pkt_ops = {
31502 +static const struct sysfs_ops kobj_pkt_ops = {
31503 .show = kobj_pkt_show,
31504 .store = kobj_pkt_store
31505 };
31506 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
31507 index 6aad99e..89cd142 100644
31508 --- a/drivers/char/Kconfig
31509 +++ b/drivers/char/Kconfig
31510 @@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
31511
31512 config DEVKMEM
31513 bool "/dev/kmem virtual device support"
31514 - default y
31515 + default n
31516 + depends on !GRKERNSEC_KMEM
31517 help
31518 Say Y here if you want to support the /dev/kmem device. The
31519 /dev/kmem device is rarely used, but can be used for certain
31520 @@ -1114,6 +1115,7 @@ config DEVPORT
31521 bool
31522 depends on !M68K
31523 depends on ISA || PCI
31524 + depends on !GRKERNSEC_KMEM
31525 default y
31526
31527 source "drivers/s390/char/Kconfig"
31528 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
31529 index a96f319..a778a5b 100644
31530 --- a/drivers/char/agp/frontend.c
31531 +++ b/drivers/char/agp/frontend.c
31532 @@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
31533 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
31534 return -EFAULT;
31535
31536 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
31537 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
31538 return -EFAULT;
31539
31540 client = agp_find_client_by_pid(reserve.pid);
31541 diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
31542 index d8cff90..9628e70 100644
31543 --- a/drivers/char/briq_panel.c
31544 +++ b/drivers/char/briq_panel.c
31545 @@ -10,6 +10,7 @@
31546 #include <linux/types.h>
31547 #include <linux/errno.h>
31548 #include <linux/tty.h>
31549 +#include <linux/mutex.h>
31550 #include <linux/timer.h>
31551 #include <linux/kernel.h>
31552 #include <linux/wait.h>
31553 @@ -36,6 +37,7 @@ static int vfd_is_open;
31554 static unsigned char vfd[40];
31555 static int vfd_cursor;
31556 static unsigned char ledpb, led;
31557 +static DEFINE_MUTEX(vfd_mutex);
31558
31559 static void update_vfd(void)
31560 {
31561 @@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
31562 if (!vfd_is_open)
31563 return -EBUSY;
31564
31565 + mutex_lock(&vfd_mutex);
31566 for (;;) {
31567 char c;
31568 if (!indx)
31569 break;
31570 - if (get_user(c, buf))
31571 + if (get_user(c, buf)) {
31572 + mutex_unlock(&vfd_mutex);
31573 return -EFAULT;
31574 + }
31575 if (esc) {
31576 set_led(c);
31577 esc = 0;
31578 @@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
31579 buf++;
31580 }
31581 update_vfd();
31582 + mutex_unlock(&vfd_mutex);
31583
31584 return len;
31585 }
31586 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
31587 index 31e7c91..161afc0 100644
31588 --- a/drivers/char/genrtc.c
31589 +++ b/drivers/char/genrtc.c
31590 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *inode, struct file *file,
31591 switch (cmd) {
31592
31593 case RTC_PLL_GET:
31594 + memset(&pll, 0, sizeof(pll));
31595 if (get_rtc_pll(&pll))
31596 return -EINVAL;
31597 else
31598 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
31599 index 006466d..a2bb21c 100644
31600 --- a/drivers/char/hpet.c
31601 +++ b/drivers/char/hpet.c
31602 @@ -430,7 +430,7 @@ static int hpet_release(struct inode *inode, struct file *file)
31603 return 0;
31604 }
31605
31606 -static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
31607 +static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
31608
31609 static int
31610 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
31611 @@ -565,7 +565,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
31612 }
31613
31614 static int
31615 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
31616 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
31617 {
31618 struct hpet_timer __iomem *timer;
31619 struct hpet __iomem *hpet;
31620 @@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
31621 {
31622 struct hpet_info info;
31623
31624 + memset(&info, 0, sizeof(info));
31625 +
31626 if (devp->hd_ireqfreq)
31627 info.hi_ireqfreq =
31628 hpet_time_div(hpetp, devp->hd_ireqfreq);
31629 - else
31630 - info.hi_ireqfreq = 0;
31631 info.hi_flags =
31632 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
31633 info.hi_hpet = hpetp->hp_which;
31634 diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c
31635 index 0afc8b8..6913fc3 100644
31636 --- a/drivers/char/hvc_beat.c
31637 +++ b/drivers/char/hvc_beat.c
31638 @@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt)
31639 return cnt;
31640 }
31641
31642 -static struct hv_ops hvc_beat_get_put_ops = {
31643 +static const struct hv_ops hvc_beat_get_put_ops = {
31644 .get_chars = hvc_beat_get_chars,
31645 .put_chars = hvc_beat_put_chars,
31646 };
31647 diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
31648 index 98097f2..407dddc 100644
31649 --- a/drivers/char/hvc_console.c
31650 +++ b/drivers/char/hvc_console.c
31651 @@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_index(int index)
31652 * console interfaces but can still be used as a tty device. This has to be
31653 * static because kmalloc will not work during early console init.
31654 */
31655 -static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
31656 +static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
31657 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
31658 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
31659
31660 @@ -249,7 +249,7 @@ static void destroy_hvc_struct(struct kref *kref)
31661 * vty adapters do NOT get an hvc_instantiate() callback since they
31662 * appear after early console init.
31663 */
31664 -int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
31665 +int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
31666 {
31667 struct hvc_struct *hp;
31668
31669 @@ -758,7 +758,7 @@ static const struct tty_operations hvc_ops = {
31670 };
31671
31672 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
31673 - struct hv_ops *ops, int outbuf_size)
31674 + const struct hv_ops *ops, int outbuf_size)
31675 {
31676 struct hvc_struct *hp;
31677 int i;
31678 diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
31679 index 10950ca..ed176c3 100644
31680 --- a/drivers/char/hvc_console.h
31681 +++ b/drivers/char/hvc_console.h
31682 @@ -55,7 +55,7 @@ struct hvc_struct {
31683 int outbuf_size;
31684 int n_outbuf;
31685 uint32_t vtermno;
31686 - struct hv_ops *ops;
31687 + const struct hv_ops *ops;
31688 int irq_requested;
31689 int data;
31690 struct winsize ws;
31691 @@ -76,11 +76,11 @@ struct hv_ops {
31692 };
31693
31694 /* Register a vterm and a slot index for use as a console (console_init) */
31695 -extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
31696 +extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
31697
31698 /* register a vterm for hvc tty operation (module_init or hotplug add) */
31699 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
31700 - struct hv_ops *ops, int outbuf_size);
31701 + const struct hv_ops *ops, int outbuf_size);
31702 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
31703 extern int hvc_remove(struct hvc_struct *hp);
31704
31705 diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
31706 index 936d05b..fd02426 100644
31707 --- a/drivers/char/hvc_iseries.c
31708 +++ b/drivers/char/hvc_iseries.c
31709 @@ -197,7 +197,7 @@ done:
31710 return sent;
31711 }
31712
31713 -static struct hv_ops hvc_get_put_ops = {
31714 +static const struct hv_ops hvc_get_put_ops = {
31715 .get_chars = get_chars,
31716 .put_chars = put_chars,
31717 .notifier_add = notifier_add_irq,
31718 diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
31719 index b0e168f..69cda2a 100644
31720 --- a/drivers/char/hvc_iucv.c
31721 +++ b/drivers/char/hvc_iucv.c
31722 @@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(struct device *dev)
31723
31724
31725 /* HVC operations */
31726 -static struct hv_ops hvc_iucv_ops = {
31727 +static const struct hv_ops hvc_iucv_ops = {
31728 .get_chars = hvc_iucv_get_chars,
31729 .put_chars = hvc_iucv_put_chars,
31730 .notifier_add = hvc_iucv_notifier_add,
31731 diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
31732 index 88590d0..61c4a61 100644
31733 --- a/drivers/char/hvc_rtas.c
31734 +++ b/drivers/char/hvc_rtas.c
31735 @@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
31736 return i;
31737 }
31738
31739 -static struct hv_ops hvc_rtas_get_put_ops = {
31740 +static const struct hv_ops hvc_rtas_get_put_ops = {
31741 .get_chars = hvc_rtas_read_console,
31742 .put_chars = hvc_rtas_write_console,
31743 };
31744 diff --git a/drivers/char/hvc_udbg.c b/drivers/char/hvc_udbg.c
31745 index bd63ba8..b0957e6 100644
31746 --- a/drivers/char/hvc_udbg.c
31747 +++ b/drivers/char/hvc_udbg.c
31748 @@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno, char *buf, int count)
31749 return i;
31750 }
31751
31752 -static struct hv_ops hvc_udbg_ops = {
31753 +static const struct hv_ops hvc_udbg_ops = {
31754 .get_chars = hvc_udbg_get,
31755 .put_chars = hvc_udbg_put,
31756 };
31757 diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
31758 index 10be343..27370e9 100644
31759 --- a/drivers/char/hvc_vio.c
31760 +++ b/drivers/char/hvc_vio.c
31761 @@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t vtermno, char *buf, int count)
31762 return got;
31763 }
31764
31765 -static struct hv_ops hvc_get_put_ops = {
31766 +static const struct hv_ops hvc_get_put_ops = {
31767 .get_chars = filtered_get_chars,
31768 .put_chars = hvc_put_chars,
31769 .notifier_add = notifier_add_irq,
31770 diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
31771 index a6ee32b..94f8c26 100644
31772 --- a/drivers/char/hvc_xen.c
31773 +++ b/drivers/char/hvc_xen.c
31774 @@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno, char *buf, int len)
31775 return recv;
31776 }
31777
31778 -static struct hv_ops hvc_ops = {
31779 +static const struct hv_ops hvc_ops = {
31780 .get_chars = read_console,
31781 .put_chars = write_console,
31782 .notifier_add = notifier_add_irq,
31783 diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
31784 index 266b858..f3ee0bb 100644
31785 --- a/drivers/char/hvcs.c
31786 +++ b/drivers/char/hvcs.c
31787 @@ -82,6 +82,7 @@
31788 #include <asm/hvcserver.h>
31789 #include <asm/uaccess.h>
31790 #include <asm/vio.h>
31791 +#include <asm/local.h>
31792
31793 /*
31794 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
31795 @@ -269,7 +270,7 @@ struct hvcs_struct {
31796 unsigned int index;
31797
31798 struct tty_struct *tty;
31799 - int open_count;
31800 + local_t open_count;
31801
31802 /*
31803 * Used to tell the driver kernel_thread what operations need to take
31804 @@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
31805
31806 spin_lock_irqsave(&hvcsd->lock, flags);
31807
31808 - if (hvcsd->open_count > 0) {
31809 + if (local_read(&hvcsd->open_count) > 0) {
31810 spin_unlock_irqrestore(&hvcsd->lock, flags);
31811 printk(KERN_INFO "HVCS: vterm state unchanged. "
31812 "The hvcs device node is still in use.\n");
31813 @@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
31814 if ((retval = hvcs_partner_connect(hvcsd)))
31815 goto error_release;
31816
31817 - hvcsd->open_count = 1;
31818 + local_set(&hvcsd->open_count, 1);
31819 hvcsd->tty = tty;
31820 tty->driver_data = hvcsd;
31821
31822 @@ -1169,7 +1170,7 @@ fast_open:
31823
31824 spin_lock_irqsave(&hvcsd->lock, flags);
31825 kref_get(&hvcsd->kref);
31826 - hvcsd->open_count++;
31827 + local_inc(&hvcsd->open_count);
31828 hvcsd->todo_mask |= HVCS_SCHED_READ;
31829 spin_unlock_irqrestore(&hvcsd->lock, flags);
31830
31831 @@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
31832 hvcsd = tty->driver_data;
31833
31834 spin_lock_irqsave(&hvcsd->lock, flags);
31835 - if (--hvcsd->open_count == 0) {
31836 + if (local_dec_and_test(&hvcsd->open_count)) {
31837
31838 vio_disable_interrupts(hvcsd->vdev);
31839
31840 @@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
31841 free_irq(irq, hvcsd);
31842 kref_put(&hvcsd->kref, destroy_hvcs_struct);
31843 return;
31844 - } else if (hvcsd->open_count < 0) {
31845 + } else if (local_read(&hvcsd->open_count) < 0) {
31846 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
31847 " is missmanaged.\n",
31848 - hvcsd->vdev->unit_address, hvcsd->open_count);
31849 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
31850 }
31851
31852 spin_unlock_irqrestore(&hvcsd->lock, flags);
31853 @@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struct * tty)
31854
31855 spin_lock_irqsave(&hvcsd->lock, flags);
31856 /* Preserve this so that we know how many kref refs to put */
31857 - temp_open_count = hvcsd->open_count;
31858 + temp_open_count = local_read(&hvcsd->open_count);
31859
31860 /*
31861 * Don't kref put inside the spinlock because the destruction
31862 @@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struct * tty)
31863 hvcsd->tty->driver_data = NULL;
31864 hvcsd->tty = NULL;
31865
31866 - hvcsd->open_count = 0;
31867 + local_set(&hvcsd->open_count, 0);
31868
31869 /* This will drop any buffered data on the floor which is OK in a hangup
31870 * scenario. */
31871 @@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct *tty,
31872 * the middle of a write operation? This is a crummy place to do this
31873 * but we want to keep it all in the spinlock.
31874 */
31875 - if (hvcsd->open_count <= 0) {
31876 + if (local_read(&hvcsd->open_count) <= 0) {
31877 spin_unlock_irqrestore(&hvcsd->lock, flags);
31878 return -ENODEV;
31879 }
31880 @@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_struct *tty)
31881 {
31882 struct hvcs_struct *hvcsd = tty->driver_data;
31883
31884 - if (!hvcsd || hvcsd->open_count <= 0)
31885 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
31886 return 0;
31887
31888 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
31889 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
31890 index ec5e3f8..02455ba 100644
31891 --- a/drivers/char/ipmi/ipmi_msghandler.c
31892 +++ b/drivers/char/ipmi/ipmi_msghandler.c
31893 @@ -414,7 +414,7 @@ struct ipmi_smi {
31894 struct proc_dir_entry *proc_dir;
31895 char proc_dir_name[10];
31896
31897 - atomic_t stats[IPMI_NUM_STATS];
31898 + atomic_unchecked_t stats[IPMI_NUM_STATS];
31899
31900 /*
31901 * run_to_completion duplicate of smb_info, smi_info
31902 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
31903
31904
31905 #define ipmi_inc_stat(intf, stat) \
31906 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
31907 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
31908 #define ipmi_get_stat(intf, stat) \
31909 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
31910 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
31911
31912 static int is_lan_addr(struct ipmi_addr *addr)
31913 {
31914 @@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
31915 INIT_LIST_HEAD(&intf->cmd_rcvrs);
31916 init_waitqueue_head(&intf->waitq);
31917 for (i = 0; i < IPMI_NUM_STATS; i++)
31918 - atomic_set(&intf->stats[i], 0);
31919 + atomic_set_unchecked(&intf->stats[i], 0);
31920
31921 intf->proc_dir = NULL;
31922
31923 @@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
31924 struct ipmi_smi_msg smi_msg;
31925 struct ipmi_recv_msg recv_msg;
31926
31927 + pax_track_stack();
31928 +
31929 si = (struct ipmi_system_interface_addr *) &addr;
31930 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
31931 si->channel = IPMI_BMC_CHANNEL;
31932 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
31933 index abae8c9..8021979 100644
31934 --- a/drivers/char/ipmi/ipmi_si_intf.c
31935 +++ b/drivers/char/ipmi/ipmi_si_intf.c
31936 @@ -277,7 +277,7 @@ struct smi_info {
31937 unsigned char slave_addr;
31938
31939 /* Counters and things for the proc filesystem. */
31940 - atomic_t stats[SI_NUM_STATS];
31941 + atomic_unchecked_t stats[SI_NUM_STATS];
31942
31943 struct task_struct *thread;
31944
31945 @@ -285,9 +285,9 @@ struct smi_info {
31946 };
31947
31948 #define smi_inc_stat(smi, stat) \
31949 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
31950 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
31951 #define smi_get_stat(smi, stat) \
31952 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
31953 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
31954
31955 #define SI_MAX_PARMS 4
31956
31957 @@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info *new_smi)
31958 atomic_set(&new_smi->req_events, 0);
31959 new_smi->run_to_completion = 0;
31960 for (i = 0; i < SI_NUM_STATS; i++)
31961 - atomic_set(&new_smi->stats[i], 0);
31962 + atomic_set_unchecked(&new_smi->stats[i], 0);
31963
31964 new_smi->interrupt_disabled = 0;
31965 atomic_set(&new_smi->stop_operation, 0);
31966 diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
31967 index 402838f..55e2200 100644
31968 --- a/drivers/char/istallion.c
31969 +++ b/drivers/char/istallion.c
31970 @@ -187,7 +187,6 @@ static struct ktermios stli_deftermios = {
31971 * re-used for each stats call.
31972 */
31973 static comstats_t stli_comstats;
31974 -static combrd_t stli_brdstats;
31975 static struct asystats stli_cdkstats;
31976
31977 /*****************************************************************************/
31978 @@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __user *bp)
31979 {
31980 struct stlibrd *brdp;
31981 unsigned int i;
31982 + combrd_t stli_brdstats;
31983
31984 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
31985 return -EFAULT;
31986 @@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stliport __user *arg)
31987 struct stliport stli_dummyport;
31988 struct stliport *portp;
31989
31990 + pax_track_stack();
31991 +
31992 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
31993 return -EFAULT;
31994 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
31995 @@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stlibrd __user *arg)
31996 struct stlibrd stli_dummybrd;
31997 struct stlibrd *brdp;
31998
31999 + pax_track_stack();
32000 +
32001 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
32002 return -EFAULT;
32003 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
32004 diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
32005 index 950837c..e55a288 100644
32006 --- a/drivers/char/keyboard.c
32007 +++ b/drivers/char/keyboard.c
32008 @@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
32009 kbd->kbdmode == VC_MEDIUMRAW) &&
32010 value != KVAL(K_SAK))
32011 return; /* SAK is allowed even in raw mode */
32012 +
32013 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
32014 + {
32015 + void *func = fn_handler[value];
32016 + if (func == fn_show_state || func == fn_show_ptregs ||
32017 + func == fn_show_mem)
32018 + return;
32019 + }
32020 +#endif
32021 +
32022 fn_handler[value](vc);
32023 }
32024
32025 @@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_ids[] = {
32026 .evbit = { BIT_MASK(EV_SND) },
32027 },
32028
32029 - { }, /* Terminating entry */
32030 + { 0 }, /* Terminating entry */
32031 };
32032
32033 MODULE_DEVICE_TABLE(input, kbd_ids);
32034 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
32035 index 87c67b4..230527a 100644
32036 --- a/drivers/char/mbcs.c
32037 +++ b/drivers/char/mbcs.c
32038 @@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
32039 return 0;
32040 }
32041
32042 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
32043 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
32044 {
32045 .part_num = MBCS_PART_NUM,
32046 .mfg_num = MBCS_MFG_NUM,
32047 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
32048 index 1270f64..8495f49 100644
32049 --- a/drivers/char/mem.c
32050 +++ b/drivers/char/mem.c
32051 @@ -18,6 +18,7 @@
32052 #include <linux/raw.h>
32053 #include <linux/tty.h>
32054 #include <linux/capability.h>
32055 +#include <linux/security.h>
32056 #include <linux/ptrace.h>
32057 #include <linux/device.h>
32058 #include <linux/highmem.h>
32059 @@ -35,6 +36,10 @@
32060 # include <linux/efi.h>
32061 #endif
32062
32063 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
32064 +extern struct file_operations grsec_fops;
32065 +#endif
32066 +
32067 static inline unsigned long size_inside_page(unsigned long start,
32068 unsigned long size)
32069 {
32070 @@ -102,9 +107,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
32071
32072 while (cursor < to) {
32073 if (!devmem_is_allowed(pfn)) {
32074 +#ifdef CONFIG_GRKERNSEC_KMEM
32075 + gr_handle_mem_readwrite(from, to);
32076 +#else
32077 printk(KERN_INFO
32078 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
32079 current->comm, from, to);
32080 +#endif
32081 return 0;
32082 }
32083 cursor += PAGE_SIZE;
32084 @@ -112,6 +121,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
32085 }
32086 return 1;
32087 }
32088 +#elif defined(CONFIG_GRKERNSEC_KMEM)
32089 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
32090 +{
32091 + return 0;
32092 +}
32093 #else
32094 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
32095 {
32096 @@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * file, char __user * buf,
32097 #endif
32098
32099 while (count > 0) {
32100 + char *temp;
32101 +
32102 /*
32103 * Handle first page in case it's not aligned
32104 */
32105 @@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * file, char __user * buf,
32106 if (!ptr)
32107 return -EFAULT;
32108
32109 - if (copy_to_user(buf, ptr, sz)) {
32110 +#ifdef CONFIG_PAX_USERCOPY
32111 + temp = kmalloc(sz, GFP_KERNEL);
32112 + if (!temp) {
32113 + unxlate_dev_mem_ptr(p, ptr);
32114 + return -ENOMEM;
32115 + }
32116 + memcpy(temp, ptr, sz);
32117 +#else
32118 + temp = ptr;
32119 +#endif
32120 +
32121 + if (copy_to_user(buf, temp, sz)) {
32122 +
32123 +#ifdef CONFIG_PAX_USERCOPY
32124 + kfree(temp);
32125 +#endif
32126 +
32127 unxlate_dev_mem_ptr(p, ptr);
32128 return -EFAULT;
32129 }
32130
32131 +#ifdef CONFIG_PAX_USERCOPY
32132 + kfree(temp);
32133 +#endif
32134 +
32135 unxlate_dev_mem_ptr(p, ptr);
32136
32137 buf += sz;
32138 @@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
32139 size_t count, loff_t *ppos)
32140 {
32141 unsigned long p = *ppos;
32142 - ssize_t low_count, read, sz;
32143 + ssize_t low_count, read, sz, err = 0;
32144 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
32145 - int err = 0;
32146
32147 read = 0;
32148 if (p < (unsigned long) high_memory) {
32149 @@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
32150 }
32151 #endif
32152 while (low_count > 0) {
32153 + char *temp;
32154 +
32155 sz = size_inside_page(p, low_count);
32156
32157 /*
32158 @@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
32159 */
32160 kbuf = xlate_dev_kmem_ptr((char *)p);
32161
32162 - if (copy_to_user(buf, kbuf, sz))
32163 +#ifdef CONFIG_PAX_USERCOPY
32164 + temp = kmalloc(sz, GFP_KERNEL);
32165 + if (!temp)
32166 + return -ENOMEM;
32167 + memcpy(temp, kbuf, sz);
32168 +#else
32169 + temp = kbuf;
32170 +#endif
32171 +
32172 + err = copy_to_user(buf, temp, sz);
32173 +
32174 +#ifdef CONFIG_PAX_USERCOPY
32175 + kfree(temp);
32176 +#endif
32177 +
32178 + if (err)
32179 return -EFAULT;
32180 buf += sz;
32181 p += sz;
32182 @@ -889,6 +941,9 @@ static const struct memdev {
32183 #ifdef CONFIG_CRASH_DUMP
32184 [12] = { "oldmem", 0, &oldmem_fops, NULL },
32185 #endif
32186 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
32187 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
32188 +#endif
32189 };
32190
32191 static int memory_open(struct inode *inode, struct file *filp)
32192 diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
32193 index 918711a..4ffaf5e 100644
32194 --- a/drivers/char/mmtimer.c
32195 +++ b/drivers/char/mmtimer.c
32196 @@ -756,7 +756,7 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
32197 return err;
32198 }
32199
32200 -static struct k_clock sgi_clock = {
32201 +static k_clock_no_const sgi_clock = {
32202 .res = 0,
32203 .clock_set = sgi_clock_set,
32204 .clock_get = sgi_clock_get,
32205 diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
32206 index 674b3ab..a8d1970 100644
32207 --- a/drivers/char/pcmcia/ipwireless/tty.c
32208 +++ b/drivers/char/pcmcia/ipwireless/tty.c
32209 @@ -29,6 +29,7 @@
32210 #include <linux/tty_driver.h>
32211 #include <linux/tty_flip.h>
32212 #include <linux/uaccess.h>
32213 +#include <asm/local.h>
32214
32215 #include "tty.h"
32216 #include "network.h"
32217 @@ -51,7 +52,7 @@ struct ipw_tty {
32218 int tty_type;
32219 struct ipw_network *network;
32220 struct tty_struct *linux_tty;
32221 - int open_count;
32222 + local_t open_count;
32223 unsigned int control_lines;
32224 struct mutex ipw_tty_mutex;
32225 int tx_bytes_queued;
32226 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
32227 mutex_unlock(&tty->ipw_tty_mutex);
32228 return -ENODEV;
32229 }
32230 - if (tty->open_count == 0)
32231 + if (local_read(&tty->open_count) == 0)
32232 tty->tx_bytes_queued = 0;
32233
32234 - tty->open_count++;
32235 + local_inc(&tty->open_count);
32236
32237 tty->linux_tty = linux_tty;
32238 linux_tty->driver_data = tty;
32239 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
32240
32241 static void do_ipw_close(struct ipw_tty *tty)
32242 {
32243 - tty->open_count--;
32244 -
32245 - if (tty->open_count == 0) {
32246 + if (local_dec_return(&tty->open_count) == 0) {
32247 struct tty_struct *linux_tty = tty->linux_tty;
32248
32249 if (linux_tty != NULL) {
32250 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
32251 return;
32252
32253 mutex_lock(&tty->ipw_tty_mutex);
32254 - if (tty->open_count == 0) {
32255 + if (local_read(&tty->open_count) == 0) {
32256 mutex_unlock(&tty->ipw_tty_mutex);
32257 return;
32258 }
32259 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
32260 return;
32261 }
32262
32263 - if (!tty->open_count) {
32264 + if (!local_read(&tty->open_count)) {
32265 mutex_unlock(&tty->ipw_tty_mutex);
32266 return;
32267 }
32268 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
32269 return -ENODEV;
32270
32271 mutex_lock(&tty->ipw_tty_mutex);
32272 - if (!tty->open_count) {
32273 + if (!local_read(&tty->open_count)) {
32274 mutex_unlock(&tty->ipw_tty_mutex);
32275 return -EINVAL;
32276 }
32277 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
32278 if (!tty)
32279 return -ENODEV;
32280
32281 - if (!tty->open_count)
32282 + if (!local_read(&tty->open_count))
32283 return -EINVAL;
32284
32285 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
32286 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
32287 if (!tty)
32288 return 0;
32289
32290 - if (!tty->open_count)
32291 + if (!local_read(&tty->open_count))
32292 return 0;
32293
32294 return tty->tx_bytes_queued;
32295 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty, struct file *file)
32296 if (!tty)
32297 return -ENODEV;
32298
32299 - if (!tty->open_count)
32300 + if (!local_read(&tty->open_count))
32301 return -EINVAL;
32302
32303 return get_control_lines(tty);
32304 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty, struct file *file,
32305 if (!tty)
32306 return -ENODEV;
32307
32308 - if (!tty->open_count)
32309 + if (!local_read(&tty->open_count))
32310 return -EINVAL;
32311
32312 return set_control_lines(tty, set, clear);
32313 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file,
32314 if (!tty)
32315 return -ENODEV;
32316
32317 - if (!tty->open_count)
32318 + if (!local_read(&tty->open_count))
32319 return -EINVAL;
32320
32321 /* FIXME: Exactly how is the tty object locked here .. */
32322 @@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
32323 against a parallel ioctl etc */
32324 mutex_lock(&ttyj->ipw_tty_mutex);
32325 }
32326 - while (ttyj->open_count)
32327 + while (local_read(&ttyj->open_count))
32328 do_ipw_close(ttyj);
32329 ipwireless_disassociate_network_ttys(network,
32330 ttyj->channel_idx);
32331 diff --git a/drivers/char/pty.c b/drivers/char/pty.c
32332 index 62f282e..e45c45c 100644
32333 --- a/drivers/char/pty.c
32334 +++ b/drivers/char/pty.c
32335 @@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
32336 register_sysctl_table(pty_root_table);
32337
32338 /* Now create the /dev/ptmx special device */
32339 + pax_open_kernel();
32340 tty_default_fops(&ptmx_fops);
32341 - ptmx_fops.open = ptmx_open;
32342 + *(void **)&ptmx_fops.open = ptmx_open;
32343 + pax_close_kernel();
32344
32345 cdev_init(&ptmx_cdev, &ptmx_fops);
32346 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
32347 diff --git a/drivers/char/random.c b/drivers/char/random.c
32348 index 3a19e2d..6ed09d3 100644
32349 --- a/drivers/char/random.c
32350 +++ b/drivers/char/random.c
32351 @@ -254,8 +254,13 @@
32352 /*
32353 * Configuration information
32354 */
32355 +#ifdef CONFIG_GRKERNSEC_RANDNET
32356 +#define INPUT_POOL_WORDS 512
32357 +#define OUTPUT_POOL_WORDS 128
32358 +#else
32359 #define INPUT_POOL_WORDS 128
32360 #define OUTPUT_POOL_WORDS 32
32361 +#endif
32362 #define SEC_XFER_SIZE 512
32363
32364 /*
32365 @@ -292,10 +297,17 @@ static struct poolinfo {
32366 int poolwords;
32367 int tap1, tap2, tap3, tap4, tap5;
32368 } poolinfo_table[] = {
32369 +#ifdef CONFIG_GRKERNSEC_RANDNET
32370 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
32371 + { 512, 411, 308, 208, 104, 1 },
32372 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
32373 + { 128, 103, 76, 51, 25, 1 },
32374 +#else
32375 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
32376 { 128, 103, 76, 51, 25, 1 },
32377 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
32378 { 32, 26, 20, 14, 7, 1 },
32379 +#endif
32380 #if 0
32381 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
32382 { 2048, 1638, 1231, 819, 411, 1 },
32383 @@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
32384 #include <linux/sysctl.h>
32385
32386 static int min_read_thresh = 8, min_write_thresh;
32387 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
32388 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
32389 static int max_write_thresh = INPUT_POOL_WORDS * 32;
32390 static char sysctl_bootid[16];
32391
32392 diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
32393 index 0e29a23..0efc2c2 100644
32394 --- a/drivers/char/rocket.c
32395 +++ b/drivers/char/rocket.c
32396 @@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
32397 struct rocket_ports tmp;
32398 int board;
32399
32400 + pax_track_stack();
32401 +
32402 if (!retports)
32403 return -EFAULT;
32404 memset(&tmp, 0, sizeof (tmp));
32405 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
32406 index 8c262aa..4d3b058 100644
32407 --- a/drivers/char/sonypi.c
32408 +++ b/drivers/char/sonypi.c
32409 @@ -55,6 +55,7 @@
32410 #include <asm/uaccess.h>
32411 #include <asm/io.h>
32412 #include <asm/system.h>
32413 +#include <asm/local.h>
32414
32415 #include <linux/sonypi.h>
32416
32417 @@ -491,7 +492,7 @@ static struct sonypi_device {
32418 spinlock_t fifo_lock;
32419 wait_queue_head_t fifo_proc_list;
32420 struct fasync_struct *fifo_async;
32421 - int open_count;
32422 + local_t open_count;
32423 int model;
32424 struct input_dev *input_jog_dev;
32425 struct input_dev *input_key_dev;
32426 @@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
32427 static int sonypi_misc_release(struct inode *inode, struct file *file)
32428 {
32429 mutex_lock(&sonypi_device.lock);
32430 - sonypi_device.open_count--;
32431 + local_dec(&sonypi_device.open_count);
32432 mutex_unlock(&sonypi_device.lock);
32433 return 0;
32434 }
32435 @@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
32436 lock_kernel();
32437 mutex_lock(&sonypi_device.lock);
32438 /* Flush input queue on first open */
32439 - if (!sonypi_device.open_count)
32440 + if (!local_read(&sonypi_device.open_count))
32441 kfifo_reset(sonypi_device.fifo);
32442 - sonypi_device.open_count++;
32443 + local_inc(&sonypi_device.open_count);
32444 mutex_unlock(&sonypi_device.lock);
32445 unlock_kernel();
32446 return 0;
32447 diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
32448 index db6dcfa..13834cb 100644
32449 --- a/drivers/char/stallion.c
32450 +++ b/drivers/char/stallion.c
32451 @@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlport __user *arg)
32452 struct stlport stl_dummyport;
32453 struct stlport *portp;
32454
32455 + pax_track_stack();
32456 +
32457 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
32458 return -EFAULT;
32459 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
32460 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
32461 index a0789f6..cea3902 100644
32462 --- a/drivers/char/tpm/tpm.c
32463 +++ b/drivers/char/tpm/tpm.c
32464 @@ -405,7 +405,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
32465 chip->vendor.req_complete_val)
32466 goto out_recv;
32467
32468 - if ((status == chip->vendor.req_canceled)) {
32469 + if (status == chip->vendor.req_canceled) {
32470 dev_err(chip->dev, "Operation Canceled\n");
32471 rc = -ECANCELED;
32472 goto out;
32473 @@ -824,6 +824,8 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
32474
32475 struct tpm_chip *chip = dev_get_drvdata(dev);
32476
32477 + pax_track_stack();
32478 +
32479 tpm_cmd.header.in = tpm_readpubek_header;
32480 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
32481 "attempting to read the PUBEK");
32482 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
32483 index bf2170f..ce8cab9 100644
32484 --- a/drivers/char/tpm/tpm_bios.c
32485 +++ b/drivers/char/tpm/tpm_bios.c
32486 @@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
32487 event = addr;
32488
32489 if ((event->event_type == 0 && event->event_size == 0) ||
32490 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
32491 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
32492 return NULL;
32493
32494 return addr;
32495 @@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
32496 return NULL;
32497
32498 if ((event->event_type == 0 && event->event_size == 0) ||
32499 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
32500 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
32501 return NULL;
32502
32503 (*pos)++;
32504 @@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
32505 int i;
32506
32507 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
32508 - seq_putc(m, data[i]);
32509 + if (!seq_putc(m, data[i]))
32510 + return -EFAULT;
32511
32512 return 0;
32513 }
32514 @@ -409,8 +410,13 @@ static int read_log(struct tpm_bios_log *log)
32515 log->bios_event_log_end = log->bios_event_log + len;
32516
32517 virt = acpi_os_map_memory(start, len);
32518 + if (!virt) {
32519 + kfree(log->bios_event_log);
32520 + log->bios_event_log = NULL;
32521 + return -EFAULT;
32522 + }
32523
32524 - memcpy(log->bios_event_log, virt, len);
32525 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
32526
32527 acpi_os_unmap_memory(virt, len);
32528 return 0;
32529 diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
32530 index 123cedf..6664cb4 100644
32531 --- a/drivers/char/tty_io.c
32532 +++ b/drivers/char/tty_io.c
32533 @@ -146,7 +146,7 @@ static int tty_open(struct inode *, struct file *);
32534 static int tty_release(struct inode *, struct file *);
32535 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
32536 #ifdef CONFIG_COMPAT
32537 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
32538 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
32539 unsigned long arg);
32540 #else
32541 #define tty_compat_ioctl NULL
32542 @@ -1774,6 +1774,7 @@ got_driver:
32543
32544 if (IS_ERR(tty)) {
32545 mutex_unlock(&tty_mutex);
32546 + tty_driver_kref_put(driver);
32547 return PTR_ERR(tty);
32548 }
32549 }
32550 @@ -2603,8 +2604,10 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
32551 return retval;
32552 }
32553
32554 +EXPORT_SYMBOL(tty_ioctl);
32555 +
32556 #ifdef CONFIG_COMPAT
32557 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
32558 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
32559 unsigned long arg)
32560 {
32561 struct inode *inode = file->f_dentry->d_inode;
32562 @@ -2628,6 +2631,8 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
32563
32564 return retval;
32565 }
32566 +
32567 +EXPORT_SYMBOL(tty_compat_ioctl);
32568 #endif
32569
32570 /*
32571 @@ -3073,7 +3078,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
32572
32573 void tty_default_fops(struct file_operations *fops)
32574 {
32575 - *fops = tty_fops;
32576 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
32577 }
32578
32579 /*
32580 diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
32581 index d814a3d..b55b9c9 100644
32582 --- a/drivers/char/tty_ldisc.c
32583 +++ b/drivers/char/tty_ldisc.c
32584 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *ld)
32585 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
32586 struct tty_ldisc_ops *ldo = ld->ops;
32587
32588 - ldo->refcount--;
32589 + atomic_dec(&ldo->refcount);
32590 module_put(ldo->owner);
32591 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
32592
32593 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
32594 spin_lock_irqsave(&tty_ldisc_lock, flags);
32595 tty_ldiscs[disc] = new_ldisc;
32596 new_ldisc->num = disc;
32597 - new_ldisc->refcount = 0;
32598 + atomic_set(&new_ldisc->refcount, 0);
32599 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
32600
32601 return ret;
32602 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
32603 return -EINVAL;
32604
32605 spin_lock_irqsave(&tty_ldisc_lock, flags);
32606 - if (tty_ldiscs[disc]->refcount)
32607 + if (atomic_read(&tty_ldiscs[disc]->refcount))
32608 ret = -EBUSY;
32609 else
32610 tty_ldiscs[disc] = NULL;
32611 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
32612 if (ldops) {
32613 ret = ERR_PTR(-EAGAIN);
32614 if (try_module_get(ldops->owner)) {
32615 - ldops->refcount++;
32616 + atomic_inc(&ldops->refcount);
32617 ret = ldops;
32618 }
32619 }
32620 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
32621 unsigned long flags;
32622
32623 spin_lock_irqsave(&tty_ldisc_lock, flags);
32624 - ldops->refcount--;
32625 + atomic_dec(&ldops->refcount);
32626 module_put(ldops->owner);
32627 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
32628 }
32629 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
32630 index a035ae3..c27fe2c 100644
32631 --- a/drivers/char/virtio_console.c
32632 +++ b/drivers/char/virtio_console.c
32633 @@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *buf, int count)
32634 * virtqueue, so we let the drivers do some boutique early-output thing. */
32635 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
32636 {
32637 - virtio_cons.put_chars = put_chars;
32638 + pax_open_kernel();
32639 + *(void **)&virtio_cons.put_chars = put_chars;
32640 + pax_close_kernel();
32641 return hvc_instantiate(0, 0, &virtio_cons);
32642 }
32643
32644 @@ -213,11 +215,13 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
32645 out_vq = vqs[1];
32646
32647 /* Start using the new console output. */
32648 - virtio_cons.get_chars = get_chars;
32649 - virtio_cons.put_chars = put_chars;
32650 - virtio_cons.notifier_add = notifier_add_vio;
32651 - virtio_cons.notifier_del = notifier_del_vio;
32652 - virtio_cons.notifier_hangup = notifier_del_vio;
32653 + pax_open_kernel();
32654 + *(void **)&virtio_cons.get_chars = get_chars;
32655 + *(void **)&virtio_cons.put_chars = put_chars;
32656 + *(void **)&virtio_cons.notifier_add = notifier_add_vio;
32657 + *(void **)&virtio_cons.notifier_del = notifier_del_vio;
32658 + *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
32659 + pax_close_kernel();
32660
32661 /* The first argument of hvc_alloc() is the virtual console number, so
32662 * we use zero. The second argument is the parameter for the
32663 diff --git a/drivers/char/vt.c b/drivers/char/vt.c
32664 index 0c80c68..53d59c1 100644
32665 --- a/drivers/char/vt.c
32666 +++ b/drivers/char/vt.c
32667 @@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier);
32668
32669 static void notify_write(struct vc_data *vc, unsigned int unicode)
32670 {
32671 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
32672 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
32673 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
32674 }
32675
32676 diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
32677 index 6351a26..999af95 100644
32678 --- a/drivers/char/vt_ioctl.c
32679 +++ b/drivers/char/vt_ioctl.c
32680 @@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
32681 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
32682 return -EFAULT;
32683
32684 - if (!capable(CAP_SYS_TTY_CONFIG))
32685 - perm = 0;
32686 -
32687 switch (cmd) {
32688 case KDGKBENT:
32689 key_map = key_maps[s];
32690 @@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
32691 val = (i ? K_HOLE : K_NOSUCHMAP);
32692 return put_user(val, &user_kbe->kb_value);
32693 case KDSKBENT:
32694 + if (!capable(CAP_SYS_TTY_CONFIG))
32695 + perm = 0;
32696 +
32697 if (!perm)
32698 return -EPERM;
32699 +
32700 if (!i && v == K_NOSUCHMAP) {
32701 /* deallocate map */
32702 key_map = key_maps[s];
32703 @@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
32704 int i, j, k;
32705 int ret;
32706
32707 - if (!capable(CAP_SYS_TTY_CONFIG))
32708 - perm = 0;
32709 -
32710 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
32711 if (!kbs) {
32712 ret = -ENOMEM;
32713 @@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
32714 kfree(kbs);
32715 return ((p && *p) ? -EOVERFLOW : 0);
32716 case KDSKBSENT:
32717 + if (!capable(CAP_SYS_TTY_CONFIG))
32718 + perm = 0;
32719 +
32720 if (!perm) {
32721 ret = -EPERM;
32722 goto reterr;
32723 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
32724 index c7ae026..1769c1d 100644
32725 --- a/drivers/cpufreq/cpufreq.c
32726 +++ b/drivers/cpufreq/cpufreq.c
32727 @@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct kobject *kobj)
32728 complete(&policy->kobj_unregister);
32729 }
32730
32731 -static struct sysfs_ops sysfs_ops = {
32732 +static const struct sysfs_ops sysfs_ops = {
32733 .show = show,
32734 .store = store,
32735 };
32736 diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
32737 index 97b0038..2056670 100644
32738 --- a/drivers/cpuidle/sysfs.c
32739 +++ b/drivers/cpuidle/sysfs.c
32740 @@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr,
32741 return ret;
32742 }
32743
32744 -static struct sysfs_ops cpuidle_sysfs_ops = {
32745 +static const struct sysfs_ops cpuidle_sysfs_ops = {
32746 .show = cpuidle_show,
32747 .store = cpuidle_store,
32748 };
32749 @@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct kobject * kobj,
32750 return ret;
32751 }
32752
32753 -static struct sysfs_ops cpuidle_state_sysfs_ops = {
32754 +static const struct sysfs_ops cpuidle_state_sysfs_ops = {
32755 .show = cpuidle_state_show,
32756 };
32757
32758 @@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpuidle = {
32759 .release = cpuidle_state_sysfs_release,
32760 };
32761
32762 -static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
32763 +static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
32764 {
32765 kobject_put(&device->kobjs[i]->kobj);
32766 wait_for_completion(&device->kobjs[i]->kobj_unregister);
32767 diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
32768 index 5f753fc..0377ae9 100644
32769 --- a/drivers/crypto/hifn_795x.c
32770 +++ b/drivers/crypto/hifn_795x.c
32771 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
32772 0xCA, 0x34, 0x2B, 0x2E};
32773 struct scatterlist sg;
32774
32775 + pax_track_stack();
32776 +
32777 memset(src, 0, sizeof(src));
32778 memset(ctx.key, 0, sizeof(ctx.key));
32779
32780 diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
32781 index 71e6482..de8d96c 100644
32782 --- a/drivers/crypto/padlock-aes.c
32783 +++ b/drivers/crypto/padlock-aes.c
32784 @@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
32785 struct crypto_aes_ctx gen_aes;
32786 int cpu;
32787
32788 + pax_track_stack();
32789 +
32790 if (key_len % 8) {
32791 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
32792 return -EINVAL;
32793 diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
32794 index dcc4ab7..cc834bb 100644
32795 --- a/drivers/dma/ioat/dma.c
32796 +++ b/drivers/dma/ioat/dma.c
32797 @@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
32798 return entry->show(&chan->common, page);
32799 }
32800
32801 -struct sysfs_ops ioat_sysfs_ops = {
32802 +const struct sysfs_ops ioat_sysfs_ops = {
32803 .show = ioat_attr_show,
32804 };
32805
32806 diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
32807 index bbc3e78..f2db62c 100644
32808 --- a/drivers/dma/ioat/dma.h
32809 +++ b/drivers/dma/ioat/dma.h
32810 @@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
32811 unsigned long *phys_complete);
32812 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
32813 void ioat_kobject_del(struct ioatdma_device *device);
32814 -extern struct sysfs_ops ioat_sysfs_ops;
32815 +extern const struct sysfs_ops ioat_sysfs_ops;
32816 extern struct ioat_sysfs_entry ioat_version_attr;
32817 extern struct ioat_sysfs_entry ioat_cap_attr;
32818 #endif /* IOATDMA_H */
32819 diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
32820 index 9908c9e..3ceb0e5 100644
32821 --- a/drivers/dma/ioat/dma_v3.c
32822 +++ b/drivers/dma/ioat/dma_v3.c
32823 @@ -71,10 +71,10 @@
32824 /* provide a lookup table for setting the source address in the base or
32825 * extended descriptor of an xor or pq descriptor
32826 */
32827 -static const u8 xor_idx_to_desc __read_mostly = 0xd0;
32828 -static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
32829 -static const u8 pq_idx_to_desc __read_mostly = 0xf8;
32830 -static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
32831 +static const u8 xor_idx_to_desc = 0xd0;
32832 +static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
32833 +static const u8 pq_idx_to_desc = 0xf8;
32834 +static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
32835
32836 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
32837 {
32838 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
32839 index 85c464a..afd1e73 100644
32840 --- a/drivers/edac/amd64_edac.c
32841 +++ b/drivers/edac/amd64_edac.c
32842 @@ -3099,7 +3099,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
32843 * PCI core identifies what devices are on a system during boot, and then
32844 * inquiry this table to see if this driver is for a given device found.
32845 */
32846 -static const struct pci_device_id amd64_pci_table[] __devinitdata = {
32847 +static const struct pci_device_id amd64_pci_table[] __devinitconst = {
32848 {
32849 .vendor = PCI_VENDOR_ID_AMD,
32850 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
32851 diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
32852 index 2b95f1a..4f52793 100644
32853 --- a/drivers/edac/amd76x_edac.c
32854 +++ b/drivers/edac/amd76x_edac.c
32855 @@ -322,7 +322,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
32856 edac_mc_free(mci);
32857 }
32858
32859 -static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
32860 +static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
32861 {
32862 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32863 AMD762},
32864 diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
32865 index d205d49..74c9672 100644
32866 --- a/drivers/edac/e752x_edac.c
32867 +++ b/drivers/edac/e752x_edac.c
32868 @@ -1282,7 +1282,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
32869 edac_mc_free(mci);
32870 }
32871
32872 -static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
32873 +static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
32874 {
32875 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32876 E7520},
32877 diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
32878 index c7d11cc..c59c1ca 100644
32879 --- a/drivers/edac/e7xxx_edac.c
32880 +++ b/drivers/edac/e7xxx_edac.c
32881 @@ -526,7 +526,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
32882 edac_mc_free(mci);
32883 }
32884
32885 -static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
32886 +static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
32887 {
32888 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32889 E7205},
32890 diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
32891 index 5376457..5fdedbc 100644
32892 --- a/drivers/edac/edac_device_sysfs.c
32893 +++ b/drivers/edac/edac_device_sysfs.c
32894 @@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
32895 }
32896
32897 /* edac_dev file operations for an 'ctl_info' */
32898 -static struct sysfs_ops device_ctl_info_ops = {
32899 +static const struct sysfs_ops device_ctl_info_ops = {
32900 .show = edac_dev_ctl_info_show,
32901 .store = edac_dev_ctl_info_store
32902 };
32903 @@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(struct kobject *kobj,
32904 }
32905
32906 /* edac_dev file operations for an 'instance' */
32907 -static struct sysfs_ops device_instance_ops = {
32908 +static const struct sysfs_ops device_instance_ops = {
32909 .show = edac_dev_instance_show,
32910 .store = edac_dev_instance_store
32911 };
32912 @@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(struct kobject *kobj,
32913 }
32914
32915 /* edac_dev file operations for a 'block' */
32916 -static struct sysfs_ops device_block_ops = {
32917 +static const struct sysfs_ops device_block_ops = {
32918 .show = edac_dev_block_show,
32919 .store = edac_dev_block_store
32920 };
32921 diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
32922 index e1d4ce0..88840e9 100644
32923 --- a/drivers/edac/edac_mc_sysfs.c
32924 +++ b/drivers/edac/edac_mc_sysfs.c
32925 @@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
32926 return -EIO;
32927 }
32928
32929 -static struct sysfs_ops csrowfs_ops = {
32930 +static const struct sysfs_ops csrowfs_ops = {
32931 .show = csrowdev_show,
32932 .store = csrowdev_store
32933 };
32934 @@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
32935 }
32936
32937 /* Intermediate show/store table */
32938 -static struct sysfs_ops mci_ops = {
32939 +static const struct sysfs_ops mci_ops = {
32940 .show = mcidev_show,
32941 .store = mcidev_store
32942 };
32943 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
32944 index 422728c..d8d9c88 100644
32945 --- a/drivers/edac/edac_pci_sysfs.c
32946 +++ b/drivers/edac/edac_pci_sysfs.c
32947 @@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
32948 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
32949 static int edac_pci_poll_msec = 1000; /* one second workq period */
32950
32951 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
32952 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
32953 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
32954 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
32955
32956 static struct kobject *edac_pci_top_main_kobj;
32957 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
32958 @@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(struct kobject *kobj,
32959 }
32960
32961 /* fs_ops table */
32962 -static struct sysfs_ops pci_instance_ops = {
32963 +static const struct sysfs_ops pci_instance_ops = {
32964 .show = edac_pci_instance_show,
32965 .store = edac_pci_instance_store
32966 };
32967 @@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
32968 return -EIO;
32969 }
32970
32971 -static struct sysfs_ops edac_pci_sysfs_ops = {
32972 +static const struct sysfs_ops edac_pci_sysfs_ops = {
32973 .show = edac_pci_dev_show,
32974 .store = edac_pci_dev_store
32975 };
32976 @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32977 edac_printk(KERN_CRIT, EDAC_PCI,
32978 "Signaled System Error on %s\n",
32979 pci_name(dev));
32980 - atomic_inc(&pci_nonparity_count);
32981 + atomic_inc_unchecked(&pci_nonparity_count);
32982 }
32983
32984 if (status & (PCI_STATUS_PARITY)) {
32985 @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32986 "Master Data Parity Error on %s\n",
32987 pci_name(dev));
32988
32989 - atomic_inc(&pci_parity_count);
32990 + atomic_inc_unchecked(&pci_parity_count);
32991 }
32992
32993 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32994 @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32995 "Detected Parity Error on %s\n",
32996 pci_name(dev));
32997
32998 - atomic_inc(&pci_parity_count);
32999 + atomic_inc_unchecked(&pci_parity_count);
33000 }
33001 }
33002
33003 @@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33004 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
33005 "Signaled System Error on %s\n",
33006 pci_name(dev));
33007 - atomic_inc(&pci_nonparity_count);
33008 + atomic_inc_unchecked(&pci_nonparity_count);
33009 }
33010
33011 if (status & (PCI_STATUS_PARITY)) {
33012 @@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33013 "Master Data Parity Error on "
33014 "%s\n", pci_name(dev));
33015
33016 - atomic_inc(&pci_parity_count);
33017 + atomic_inc_unchecked(&pci_parity_count);
33018 }
33019
33020 if (status & (PCI_STATUS_DETECTED_PARITY)) {
33021 @@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33022 "Detected Parity Error on %s\n",
33023 pci_name(dev));
33024
33025 - atomic_inc(&pci_parity_count);
33026 + atomic_inc_unchecked(&pci_parity_count);
33027 }
33028 }
33029 }
33030 @@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
33031 if (!check_pci_errors)
33032 return;
33033
33034 - before_count = atomic_read(&pci_parity_count);
33035 + before_count = atomic_read_unchecked(&pci_parity_count);
33036
33037 /* scan all PCI devices looking for a Parity Error on devices and
33038 * bridges.
33039 @@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
33040 /* Only if operator has selected panic on PCI Error */
33041 if (edac_pci_get_panic_on_pe()) {
33042 /* If the count is different 'after' from 'before' */
33043 - if (before_count != atomic_read(&pci_parity_count))
33044 + if (before_count != atomic_read_unchecked(&pci_parity_count))
33045 panic("EDAC: PCI Parity Error");
33046 }
33047 }
33048 diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
33049 index 6c9a0f2..9c1cf7e 100644
33050 --- a/drivers/edac/i3000_edac.c
33051 +++ b/drivers/edac/i3000_edac.c
33052 @@ -471,7 +471,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
33053 edac_mc_free(mci);
33054 }
33055
33056 -static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
33057 +static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
33058 {
33059 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33060 I3000},
33061 diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
33062 index fde4db9..fe108f9 100644
33063 --- a/drivers/edac/i3200_edac.c
33064 +++ b/drivers/edac/i3200_edac.c
33065 @@ -444,7 +444,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
33066 edac_mc_free(mci);
33067 }
33068
33069 -static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
33070 +static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
33071 {
33072 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33073 I3200},
33074 diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
33075 index adc10a2..57d4ccf 100644
33076 --- a/drivers/edac/i5000_edac.c
33077 +++ b/drivers/edac/i5000_edac.c
33078 @@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
33079 *
33080 * The "E500P" device is the first device supported.
33081 */
33082 -static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
33083 +static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
33084 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
33085 .driver_data = I5000P},
33086
33087 diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
33088 index 22db05a..b2b5503 100644
33089 --- a/drivers/edac/i5100_edac.c
33090 +++ b/drivers/edac/i5100_edac.c
33091 @@ -944,7 +944,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
33092 edac_mc_free(mci);
33093 }
33094
33095 -static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
33096 +static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
33097 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
33098 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
33099 { 0, }
33100 diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
33101 index f99d106..f050710 100644
33102 --- a/drivers/edac/i5400_edac.c
33103 +++ b/drivers/edac/i5400_edac.c
33104 @@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
33105 *
33106 * The "E500P" device is the first device supported.
33107 */
33108 -static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
33109 +static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
33110 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
33111 {0,} /* 0 terminated list. */
33112 };
33113 diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
33114 index 577760a..9ce16ce 100644
33115 --- a/drivers/edac/i82443bxgx_edac.c
33116 +++ b/drivers/edac/i82443bxgx_edac.c
33117 @@ -381,7 +381,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
33118
33119 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
33120
33121 -static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
33122 +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
33123 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
33124 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
33125 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
33126 diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
33127 index c0088ba..64a7b98 100644
33128 --- a/drivers/edac/i82860_edac.c
33129 +++ b/drivers/edac/i82860_edac.c
33130 @@ -271,7 +271,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
33131 edac_mc_free(mci);
33132 }
33133
33134 -static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
33135 +static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
33136 {
33137 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33138 I82860},
33139 diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
33140 index b2d83b9..a34357b 100644
33141 --- a/drivers/edac/i82875p_edac.c
33142 +++ b/drivers/edac/i82875p_edac.c
33143 @@ -512,7 +512,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
33144 edac_mc_free(mci);
33145 }
33146
33147 -static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
33148 +static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
33149 {
33150 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33151 I82875P},
33152 diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
33153 index 2eed3ea..87bbbd1 100644
33154 --- a/drivers/edac/i82975x_edac.c
33155 +++ b/drivers/edac/i82975x_edac.c
33156 @@ -586,7 +586,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
33157 edac_mc_free(mci);
33158 }
33159
33160 -static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
33161 +static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
33162 {
33163 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33164 I82975X
33165 diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
33166 index 9900675..78ac2b6 100644
33167 --- a/drivers/edac/r82600_edac.c
33168 +++ b/drivers/edac/r82600_edac.c
33169 @@ -374,7 +374,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
33170 edac_mc_free(mci);
33171 }
33172
33173 -static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
33174 +static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
33175 {
33176 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
33177 },
33178 diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
33179 index d4ec605..4cfec4e 100644
33180 --- a/drivers/edac/x38_edac.c
33181 +++ b/drivers/edac/x38_edac.c
33182 @@ -441,7 +441,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
33183 edac_mc_free(mci);
33184 }
33185
33186 -static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
33187 +static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
33188 {
33189 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33190 X38},
33191 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
33192 index 3fc2ceb..daf098f 100644
33193 --- a/drivers/firewire/core-card.c
33194 +++ b/drivers/firewire/core-card.c
33195 @@ -558,7 +558,7 @@ void fw_card_release(struct kref *kref)
33196
33197 void fw_core_remove_card(struct fw_card *card)
33198 {
33199 - struct fw_card_driver dummy_driver = dummy_driver_template;
33200 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
33201
33202 card->driver->update_phy_reg(card, 4,
33203 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
33204 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
33205 index 4560d8f..36db24a 100644
33206 --- a/drivers/firewire/core-cdev.c
33207 +++ b/drivers/firewire/core-cdev.c
33208 @@ -1141,8 +1141,7 @@ static int init_iso_resource(struct client *client,
33209 int ret;
33210
33211 if ((request->channels == 0 && request->bandwidth == 0) ||
33212 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
33213 - request->bandwidth < 0)
33214 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
33215 return -EINVAL;
33216
33217 r = kmalloc(sizeof(*r), GFP_KERNEL);
33218 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
33219 index da628c7..cf54a2c 100644
33220 --- a/drivers/firewire/core-transaction.c
33221 +++ b/drivers/firewire/core-transaction.c
33222 @@ -36,6 +36,7 @@
33223 #include <linux/string.h>
33224 #include <linux/timer.h>
33225 #include <linux/types.h>
33226 +#include <linux/sched.h>
33227
33228 #include <asm/byteorder.h>
33229
33230 @@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
33231 struct transaction_callback_data d;
33232 struct fw_transaction t;
33233
33234 + pax_track_stack();
33235 +
33236 init_completion(&d.done);
33237 d.payload = payload;
33238 fw_send_request(card, &t, tcode, destination_id, generation, speed,
33239 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
33240 index 7ff6e75..a2965d9 100644
33241 --- a/drivers/firewire/core.h
33242 +++ b/drivers/firewire/core.h
33243 @@ -86,6 +86,7 @@ struct fw_card_driver {
33244
33245 int (*stop_iso)(struct fw_iso_context *ctx);
33246 };
33247 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
33248
33249 void fw_card_initialize(struct fw_card *card,
33250 const struct fw_card_driver *driver, struct device *device);
33251 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
33252 index 3a2ccb0..82fd7c4 100644
33253 --- a/drivers/firmware/dmi_scan.c
33254 +++ b/drivers/firmware/dmi_scan.c
33255 @@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
33256 }
33257 }
33258 else {
33259 - /*
33260 - * no iounmap() for that ioremap(); it would be a no-op, but
33261 - * it's so early in setup that sucker gets confused into doing
33262 - * what it shouldn't if we actually call it.
33263 - */
33264 p = dmi_ioremap(0xF0000, 0x10000);
33265 if (p == NULL)
33266 goto error;
33267 @@ -667,7 +662,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
33268 if (buf == NULL)
33269 return -1;
33270
33271 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
33272 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
33273
33274 iounmap(buf);
33275 return 0;
33276 diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
33277 index 9e4f59d..110e24e 100644
33278 --- a/drivers/firmware/edd.c
33279 +++ b/drivers/firmware/edd.c
33280 @@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, struct attribute *attr, char *buf)
33281 return ret;
33282 }
33283
33284 -static struct sysfs_ops edd_attr_ops = {
33285 +static const struct sysfs_ops edd_attr_ops = {
33286 .show = edd_attr_show,
33287 };
33288
33289 diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
33290 index f4f709d..082f06e 100644
33291 --- a/drivers/firmware/efivars.c
33292 +++ b/drivers/firmware/efivars.c
33293 @@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
33294 return ret;
33295 }
33296
33297 -static struct sysfs_ops efivar_attr_ops = {
33298 +static const struct sysfs_ops efivar_attr_ops = {
33299 .show = efivar_attr_show,
33300 .store = efivar_attr_store,
33301 };
33302 diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
33303 index 051d1eb..0a5d4e7 100644
33304 --- a/drivers/firmware/iscsi_ibft.c
33305 +++ b/drivers/firmware/iscsi_ibft.c
33306 @@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struct kobject *kobj,
33307 return ret;
33308 }
33309
33310 -static struct sysfs_ops ibft_attr_ops = {
33311 +static const struct sysfs_ops ibft_attr_ops = {
33312 .show = ibft_show_attribute,
33313 };
33314
33315 diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
33316 index 56f9234..8c58c7b 100644
33317 --- a/drivers/firmware/memmap.c
33318 +++ b/drivers/firmware/memmap.c
33319 @@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
33320 NULL
33321 };
33322
33323 -static struct sysfs_ops memmap_attr_ops = {
33324 +static const struct sysfs_ops memmap_attr_ops = {
33325 .show = memmap_attr_show,
33326 };
33327
33328 diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
33329 index b16c9a8..2af7d3f 100644
33330 --- a/drivers/gpio/vr41xx_giu.c
33331 +++ b/drivers/gpio/vr41xx_giu.c
33332 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
33333 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
33334 maskl, pendl, maskh, pendh);
33335
33336 - atomic_inc(&irq_err_count);
33337 + atomic_inc_unchecked(&irq_err_count);
33338
33339 return -EINVAL;
33340 }
33341 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
33342 index bea6efc..3dc0f42 100644
33343 --- a/drivers/gpu/drm/drm_crtc.c
33344 +++ b/drivers/gpu/drm/drm_crtc.c
33345 @@ -1323,7 +1323,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
33346 */
33347 if ((out_resp->count_modes >= mode_count) && mode_count) {
33348 copied = 0;
33349 - mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
33350 + mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
33351 list_for_each_entry(mode, &connector->modes, head) {
33352 drm_crtc_convert_to_umode(&u_mode, mode);
33353 if (copy_to_user(mode_ptr + copied,
33354 @@ -1338,8 +1338,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
33355
33356 if ((out_resp->count_props >= props_count) && props_count) {
33357 copied = 0;
33358 - prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
33359 - prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
33360 + prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
33361 + prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
33362 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
33363 if (connector->property_ids[i] != 0) {
33364 if (put_user(connector->property_ids[i],
33365 @@ -1361,7 +1361,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
33366
33367 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
33368 copied = 0;
33369 - encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
33370 + encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
33371 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
33372 if (connector->encoder_ids[i] != 0) {
33373 if (put_user(connector->encoder_ids[i],
33374 @@ -1513,7 +1513,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
33375 }
33376
33377 for (i = 0; i < crtc_req->count_connectors; i++) {
33378 - set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
33379 + set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
33380 if (get_user(out_id, &set_connectors_ptr[i])) {
33381 ret = -EFAULT;
33382 goto out;
33383 @@ -2118,7 +2118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
33384 out_resp->flags = property->flags;
33385
33386 if ((out_resp->count_values >= value_count) && value_count) {
33387 - values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
33388 + values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
33389 for (i = 0; i < value_count; i++) {
33390 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
33391 ret = -EFAULT;
33392 @@ -2131,7 +2131,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
33393 if (property->flags & DRM_MODE_PROP_ENUM) {
33394 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
33395 copied = 0;
33396 - enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
33397 + enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
33398 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
33399
33400 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
33401 @@ -2154,7 +2154,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
33402 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
33403 copied = 0;
33404 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
33405 - blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
33406 + blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
33407
33408 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
33409 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
33410 @@ -2226,7 +2226,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
33411 blob = obj_to_blob(obj);
33412
33413 if (out_resp->length == blob->length) {
33414 - blob_ptr = (void *)(unsigned long)out_resp->data;
33415 + blob_ptr = (void __user *)(unsigned long)out_resp->data;
33416 if (copy_to_user(blob_ptr, blob->data, blob->length)){
33417 ret = -EFAULT;
33418 goto done;
33419 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
33420 index 1b8745d..92fdbf6 100644
33421 --- a/drivers/gpu/drm/drm_crtc_helper.c
33422 +++ b/drivers/gpu/drm/drm_crtc_helper.c
33423 @@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
33424 struct drm_crtc *tmp;
33425 int crtc_mask = 1;
33426
33427 - WARN(!crtc, "checking null crtc?");
33428 + BUG_ON(!crtc);
33429
33430 dev = crtc->dev;
33431
33432 @@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
33433
33434 adjusted_mode = drm_mode_duplicate(dev, mode);
33435
33436 + pax_track_stack();
33437 +
33438 crtc->enabled = drm_helper_crtc_in_use(crtc);
33439
33440 if (!crtc->enabled)
33441 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
33442 index 0e27d98..dec8768 100644
33443 --- a/drivers/gpu/drm/drm_drv.c
33444 +++ b/drivers/gpu/drm/drm_drv.c
33445 @@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
33446 char *kdata = NULL;
33447
33448 atomic_inc(&dev->ioctl_count);
33449 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
33450 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
33451 ++file_priv->ioctl_count;
33452
33453 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
33454 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
33455 index 519161e..98c840c 100644
33456 --- a/drivers/gpu/drm/drm_fops.c
33457 +++ b/drivers/gpu/drm/drm_fops.c
33458 @@ -66,7 +66,7 @@ static int drm_setup(struct drm_device * dev)
33459 }
33460
33461 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
33462 - atomic_set(&dev->counts[i], 0);
33463 + atomic_set_unchecked(&dev->counts[i], 0);
33464
33465 dev->sigdata.lock = NULL;
33466
33467 @@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct file *filp)
33468
33469 retcode = drm_open_helper(inode, filp, dev);
33470 if (!retcode) {
33471 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
33472 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
33473 spin_lock(&dev->count_lock);
33474 - if (!dev->open_count++) {
33475 + if (local_inc_return(&dev->open_count) == 1) {
33476 spin_unlock(&dev->count_lock);
33477 retcode = drm_setup(dev);
33478 goto out;
33479 @@ -435,7 +435,7 @@ int drm_release(struct inode *inode, struct file *filp)
33480
33481 lock_kernel();
33482
33483 - DRM_DEBUG("open_count = %d\n", dev->open_count);
33484 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
33485
33486 if (dev->driver->preclose)
33487 dev->driver->preclose(dev, file_priv);
33488 @@ -447,7 +447,7 @@ int drm_release(struct inode *inode, struct file *filp)
33489 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
33490 task_pid_nr(current),
33491 (long)old_encode_dev(file_priv->minor->device),
33492 - dev->open_count);
33493 + local_read(&dev->open_count));
33494
33495 /* Release any auth tokens that might point to this file_priv,
33496 (do that under the drm_global_mutex) */
33497 @@ -529,9 +529,9 @@ int drm_release(struct inode *inode, struct file *filp)
33498 * End inline drm_release
33499 */
33500
33501 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
33502 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
33503 spin_lock(&dev->count_lock);
33504 - if (!--dev->open_count) {
33505 + if (local_dec_and_test(&dev->open_count)) {
33506 if (atomic_read(&dev->ioctl_count)) {
33507 DRM_ERROR("Device busy: %d\n",
33508 atomic_read(&dev->ioctl_count));
33509 diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
33510 index 8bf3770..79422805 100644
33511 --- a/drivers/gpu/drm/drm_gem.c
33512 +++ b/drivers/gpu/drm/drm_gem.c
33513 @@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
33514 spin_lock_init(&dev->object_name_lock);
33515 idr_init(&dev->object_name_idr);
33516 atomic_set(&dev->object_count, 0);
33517 - atomic_set(&dev->object_memory, 0);
33518 + atomic_set_unchecked(&dev->object_memory, 0);
33519 atomic_set(&dev->pin_count, 0);
33520 - atomic_set(&dev->pin_memory, 0);
33521 + atomic_set_unchecked(&dev->pin_memory, 0);
33522 atomic_set(&dev->gtt_count, 0);
33523 - atomic_set(&dev->gtt_memory, 0);
33524 + atomic_set_unchecked(&dev->gtt_memory, 0);
33525
33526 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
33527 if (!mm) {
33528 @@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
33529 goto fput;
33530 }
33531 atomic_inc(&dev->object_count);
33532 - atomic_add(obj->size, &dev->object_memory);
33533 + atomic_add_unchecked(obj->size, &dev->object_memory);
33534 return obj;
33535 fput:
33536 fput(obj->filp);
33537 @@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
33538
33539 fput(obj->filp);
33540 atomic_dec(&dev->object_count);
33541 - atomic_sub(obj->size, &dev->object_memory);
33542 + atomic_sub_unchecked(obj->size, &dev->object_memory);
33543 kfree(obj);
33544 }
33545 EXPORT_SYMBOL(drm_gem_object_free);
33546 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
33547 index f0f6c6b..34af322 100644
33548 --- a/drivers/gpu/drm/drm_info.c
33549 +++ b/drivers/gpu/drm/drm_info.c
33550 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
33551 struct drm_local_map *map;
33552 struct drm_map_list *r_list;
33553
33554 - /* Hardcoded from _DRM_FRAME_BUFFER,
33555 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
33556 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
33557 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
33558 + static const char * const types[] = {
33559 + [_DRM_FRAME_BUFFER] = "FB",
33560 + [_DRM_REGISTERS] = "REG",
33561 + [_DRM_SHM] = "SHM",
33562 + [_DRM_AGP] = "AGP",
33563 + [_DRM_SCATTER_GATHER] = "SG",
33564 + [_DRM_CONSISTENT] = "PCI",
33565 + [_DRM_GEM] = "GEM" };
33566 const char *type;
33567 int i;
33568
33569 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
33570 map = r_list->map;
33571 if (!map)
33572 continue;
33573 - if (map->type < 0 || map->type > 5)
33574 + if (map->type >= ARRAY_SIZE(types))
33575 type = "??";
33576 else
33577 type = types[map->type];
33578 @@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file *m, void* data)
33579 struct drm_device *dev = node->minor->dev;
33580
33581 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
33582 - seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
33583 + seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
33584 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
33585 - seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
33586 - seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
33587 + seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
33588 + seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
33589 seq_printf(m, "%d gtt total\n", dev->gtt_total);
33590 return 0;
33591 }
33592 @@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, void *data)
33593 mutex_lock(&dev->struct_mutex);
33594 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
33595 atomic_read(&dev->vma_count),
33596 +#ifdef CONFIG_GRKERNSEC_HIDESYM
33597 + NULL, 0);
33598 +#else
33599 high_memory, (u64)virt_to_phys(high_memory));
33600 +#endif
33601
33602 list_for_each_entry(pt, &dev->vmalist, head) {
33603 vma = pt->vma;
33604 @@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, void *data)
33605 continue;
33606 seq_printf(m,
33607 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
33608 - pt->pid, vma->vm_start, vma->vm_end,
33609 + pt->pid,
33610 +#ifdef CONFIG_GRKERNSEC_HIDESYM
33611 + 0, 0,
33612 +#else
33613 + vma->vm_start, vma->vm_end,
33614 +#endif
33615 vma->vm_flags & VM_READ ? 'r' : '-',
33616 vma->vm_flags & VM_WRITE ? 'w' : '-',
33617 vma->vm_flags & VM_EXEC ? 'x' : '-',
33618 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
33619 vma->vm_flags & VM_LOCKED ? 'l' : '-',
33620 vma->vm_flags & VM_IO ? 'i' : '-',
33621 +#ifdef CONFIG_GRKERNSEC_HIDESYM
33622 + 0);
33623 +#else
33624 vma->vm_pgoff);
33625 +#endif
33626
33627 #if defined(__i386__)
33628 pgprot = pgprot_val(vma->vm_page_prot);
33629 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
33630 index 282d9fd..71e5f11 100644
33631 --- a/drivers/gpu/drm/drm_ioc32.c
33632 +++ b/drivers/gpu/drm/drm_ioc32.c
33633 @@ -463,7 +463,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
33634 request = compat_alloc_user_space(nbytes);
33635 if (!access_ok(VERIFY_WRITE, request, nbytes))
33636 return -EFAULT;
33637 - list = (struct drm_buf_desc *) (request + 1);
33638 + list = (struct drm_buf_desc __user *) (request + 1);
33639
33640 if (__put_user(count, &request->count)
33641 || __put_user(list, &request->list))
33642 @@ -525,7 +525,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
33643 request = compat_alloc_user_space(nbytes);
33644 if (!access_ok(VERIFY_WRITE, request, nbytes))
33645 return -EFAULT;
33646 - list = (struct drm_buf_pub *) (request + 1);
33647 + list = (struct drm_buf_pub __user *) (request + 1);
33648
33649 if (__put_user(count, &request->count)
33650 || __put_user(list, &request->list))
33651 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
33652 index 9b9ff46..4ea724c 100644
33653 --- a/drivers/gpu/drm/drm_ioctl.c
33654 +++ b/drivers/gpu/drm/drm_ioctl.c
33655 @@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev, void *data,
33656 stats->data[i].value =
33657 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
33658 else
33659 - stats->data[i].value = atomic_read(&dev->counts[i]);
33660 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
33661 stats->data[i].type = dev->types[i];
33662 }
33663
33664 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
33665 index e2f70a5..c703e86 100644
33666 --- a/drivers/gpu/drm/drm_lock.c
33667 +++ b/drivers/gpu/drm/drm_lock.c
33668 @@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
33669 if (drm_lock_take(&master->lock, lock->context)) {
33670 master->lock.file_priv = file_priv;
33671 master->lock.lock_time = jiffies;
33672 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
33673 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
33674 break; /* Got lock */
33675 }
33676
33677 @@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
33678 return -EINVAL;
33679 }
33680
33681 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
33682 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
33683
33684 /* kernel_context_switch isn't used by any of the x86 drm
33685 * modules but is required by the Sparc driver.
33686 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
33687 index 7d1d88c..b9131b2 100644
33688 --- a/drivers/gpu/drm/i810/i810_dma.c
33689 +++ b/drivers/gpu/drm/i810/i810_dma.c
33690 @@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
33691 dma->buflist[vertex->idx],
33692 vertex->discard, vertex->used);
33693
33694 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
33695 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
33696 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
33697 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
33698 sarea_priv->last_enqueue = dev_priv->counter - 1;
33699 sarea_priv->last_dispatch = (int)hw_status[5];
33700
33701 @@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
33702 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
33703 mc->last_render);
33704
33705 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
33706 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
33707 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
33708 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
33709 sarea_priv->last_enqueue = dev_priv->counter - 1;
33710 sarea_priv->last_dispatch = (int)hw_status[5];
33711
33712 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
33713 index 21e2691..7321edd 100644
33714 --- a/drivers/gpu/drm/i810/i810_drv.h
33715 +++ b/drivers/gpu/drm/i810/i810_drv.h
33716 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
33717 int page_flipping;
33718
33719 wait_queue_head_t irq_queue;
33720 - atomic_t irq_received;
33721 - atomic_t irq_emitted;
33722 + atomic_unchecked_t irq_received;
33723 + atomic_unchecked_t irq_emitted;
33724
33725 int front_offset;
33726 } drm_i810_private_t;
33727 diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
33728 index da82afe..48a45de 100644
33729 --- a/drivers/gpu/drm/i830/i830_drv.h
33730 +++ b/drivers/gpu/drm/i830/i830_drv.h
33731 @@ -115,8 +115,8 @@ typedef struct drm_i830_private {
33732 int page_flipping;
33733
33734 wait_queue_head_t irq_queue;
33735 - atomic_t irq_received;
33736 - atomic_t irq_emitted;
33737 + atomic_unchecked_t irq_received;
33738 + atomic_unchecked_t irq_emitted;
33739
33740 int use_mi_batchbuffer_start;
33741
33742 diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
33743 index 91ec2bb..6f21fab 100644
33744 --- a/drivers/gpu/drm/i830/i830_irq.c
33745 +++ b/drivers/gpu/drm/i830/i830_irq.c
33746 @@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
33747
33748 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
33749
33750 - atomic_inc(&dev_priv->irq_received);
33751 + atomic_inc_unchecked(&dev_priv->irq_received);
33752 wake_up_interruptible(&dev_priv->irq_queue);
33753
33754 return IRQ_HANDLED;
33755 @@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_device * dev)
33756
33757 DRM_DEBUG("%s\n", __func__);
33758
33759 - atomic_inc(&dev_priv->irq_emitted);
33760 + atomic_inc_unchecked(&dev_priv->irq_emitted);
33761
33762 BEGIN_LP_RING(2);
33763 OUT_RING(0);
33764 OUT_RING(GFX_OP_USER_INTERRUPT);
33765 ADVANCE_LP_RING();
33766
33767 - return atomic_read(&dev_priv->irq_emitted);
33768 + return atomic_read_unchecked(&dev_priv->irq_emitted);
33769 }
33770
33771 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
33772 @@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
33773
33774 DRM_DEBUG("%s\n", __func__);
33775
33776 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
33777 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
33778 return 0;
33779
33780 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
33781 @@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
33782
33783 for (;;) {
33784 __set_current_state(TASK_INTERRUPTIBLE);
33785 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
33786 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
33787 break;
33788 if ((signed)(end - jiffies) <= 0) {
33789 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
33790 @@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct drm_device * dev)
33791 I830_WRITE16(I830REG_HWSTAM, 0xffff);
33792 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
33793 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
33794 - atomic_set(&dev_priv->irq_received, 0);
33795 - atomic_set(&dev_priv->irq_emitted, 0);
33796 + atomic_set_unchecked(&dev_priv->irq_received, 0);
33797 + atomic_set_unchecked(&dev_priv->irq_emitted, 0);
33798 init_waitqueue_head(&dev_priv->irq_queue);
33799 }
33800
33801 diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
33802 index 288fc50..c6092055 100644
33803 --- a/drivers/gpu/drm/i915/dvo.h
33804 +++ b/drivers/gpu/drm/i915/dvo.h
33805 @@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
33806 *
33807 * \return singly-linked list of modes or NULL if no modes found.
33808 */
33809 - struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
33810 + struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
33811
33812 /**
33813 * Clean up driver-specific bits of the output
33814 */
33815 - void (*destroy) (struct intel_dvo_device *dvo);
33816 + void (* const destroy) (struct intel_dvo_device *dvo);
33817
33818 /**
33819 * Debugging hook to dump device registers to log file
33820 */
33821 - void (*dump_regs)(struct intel_dvo_device *dvo);
33822 + void (* const dump_regs)(struct intel_dvo_device *dvo);
33823 };
33824
33825 -extern struct intel_dvo_dev_ops sil164_ops;
33826 -extern struct intel_dvo_dev_ops ch7xxx_ops;
33827 -extern struct intel_dvo_dev_ops ivch_ops;
33828 -extern struct intel_dvo_dev_ops tfp410_ops;
33829 -extern struct intel_dvo_dev_ops ch7017_ops;
33830 +extern const struct intel_dvo_dev_ops sil164_ops;
33831 +extern const struct intel_dvo_dev_ops ch7xxx_ops;
33832 +extern const struct intel_dvo_dev_ops ivch_ops;
33833 +extern const struct intel_dvo_dev_ops tfp410_ops;
33834 +extern const struct intel_dvo_dev_ops ch7017_ops;
33835
33836 #endif /* _INTEL_DVO_H */
33837 diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
33838 index 621815b..499d82e 100644
33839 --- a/drivers/gpu/drm/i915/dvo_ch7017.c
33840 +++ b/drivers/gpu/drm/i915/dvo_ch7017.c
33841 @@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_dvo_device *dvo)
33842 }
33843 }
33844
33845 -struct intel_dvo_dev_ops ch7017_ops = {
33846 +const struct intel_dvo_dev_ops ch7017_ops = {
33847 .init = ch7017_init,
33848 .detect = ch7017_detect,
33849 .mode_valid = ch7017_mode_valid,
33850 diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
33851 index a9b8962..ac769ba 100644
33852 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
33853 +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
33854 @@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_dvo_device *dvo)
33855 }
33856 }
33857
33858 -struct intel_dvo_dev_ops ch7xxx_ops = {
33859 +const struct intel_dvo_dev_ops ch7xxx_ops = {
33860 .init = ch7xxx_init,
33861 .detect = ch7xxx_detect,
33862 .mode_valid = ch7xxx_mode_valid,
33863 diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
33864 index aa176f9..ed2930c 100644
33865 --- a/drivers/gpu/drm/i915/dvo_ivch.c
33866 +++ b/drivers/gpu/drm/i915/dvo_ivch.c
33867 @@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
33868 }
33869 }
33870
33871 -struct intel_dvo_dev_ops ivch_ops= {
33872 +const struct intel_dvo_dev_ops ivch_ops= {
33873 .init = ivch_init,
33874 .dpms = ivch_dpms,
33875 .save = ivch_save,
33876 diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
33877 index e1c1f73..7dbebcf 100644
33878 --- a/drivers/gpu/drm/i915/dvo_sil164.c
33879 +++ b/drivers/gpu/drm/i915/dvo_sil164.c
33880 @@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_dvo_device *dvo)
33881 }
33882 }
33883
33884 -struct intel_dvo_dev_ops sil164_ops = {
33885 +const struct intel_dvo_dev_ops sil164_ops = {
33886 .init = sil164_init,
33887 .detect = sil164_detect,
33888 .mode_valid = sil164_mode_valid,
33889 diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
33890 index 16dce84..7e1b6f8 100644
33891 --- a/drivers/gpu/drm/i915/dvo_tfp410.c
33892 +++ b/drivers/gpu/drm/i915/dvo_tfp410.c
33893 @@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_dvo_device *dvo)
33894 }
33895 }
33896
33897 -struct intel_dvo_dev_ops tfp410_ops = {
33898 +const struct intel_dvo_dev_ops tfp410_ops = {
33899 .init = tfp410_init,
33900 .detect = tfp410_detect,
33901 .mode_valid = tfp410_mode_valid,
33902 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
33903 index 7e859d6..7d1cf2b 100644
33904 --- a/drivers/gpu/drm/i915/i915_debugfs.c
33905 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
33906 @@ -192,7 +192,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
33907 I915_READ(GTIMR));
33908 }
33909 seq_printf(m, "Interrupts received: %d\n",
33910 - atomic_read(&dev_priv->irq_received));
33911 + atomic_read_unchecked(&dev_priv->irq_received));
33912 if (dev_priv->hw_status_page != NULL) {
33913 seq_printf(m, "Current sequence: %d\n",
33914 i915_get_gem_seqno(dev));
33915 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
33916 index 5449239..7e4f68d 100644
33917 --- a/drivers/gpu/drm/i915/i915_drv.c
33918 +++ b/drivers/gpu/drm/i915/i915_drv.c
33919 @@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
33920 return i915_resume(dev);
33921 }
33922
33923 -static struct vm_operations_struct i915_gem_vm_ops = {
33924 +static const struct vm_operations_struct i915_gem_vm_ops = {
33925 .fault = i915_gem_fault,
33926 .open = drm_gem_vm_open,
33927 .close = drm_gem_vm_close,
33928 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
33929 index 97163f7..c24c7c7 100644
33930 --- a/drivers/gpu/drm/i915/i915_drv.h
33931 +++ b/drivers/gpu/drm/i915/i915_drv.h
33932 @@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
33933 /* display clock increase/decrease */
33934 /* pll clock increase/decrease */
33935 /* clock gating init */
33936 -};
33937 +} __no_const;
33938
33939 typedef struct drm_i915_private {
33940 struct drm_device *dev;
33941 @@ -197,7 +197,7 @@ typedef struct drm_i915_private {
33942 int page_flipping;
33943
33944 wait_queue_head_t irq_queue;
33945 - atomic_t irq_received;
33946 + atomic_unchecked_t irq_received;
33947 /** Protects user_irq_refcount and irq_mask_reg */
33948 spinlock_t user_irq_lock;
33949 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
33950 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
33951 index 27a3074..eb3f959 100644
33952 --- a/drivers/gpu/drm/i915/i915_gem.c
33953 +++ b/drivers/gpu/drm/i915/i915_gem.c
33954 @@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
33955
33956 args->aper_size = dev->gtt_total;
33957 args->aper_available_size = (args->aper_size -
33958 - atomic_read(&dev->pin_memory));
33959 + atomic_read_unchecked(&dev->pin_memory));
33960
33961 return 0;
33962 }
33963 @@ -2058,7 +2058,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
33964
33965 if (obj_priv->gtt_space) {
33966 atomic_dec(&dev->gtt_count);
33967 - atomic_sub(obj->size, &dev->gtt_memory);
33968 + atomic_sub_unchecked(obj->size, &dev->gtt_memory);
33969
33970 drm_mm_put_block(obj_priv->gtt_space);
33971 obj_priv->gtt_space = NULL;
33972 @@ -2701,7 +2701,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
33973 goto search_free;
33974 }
33975 atomic_inc(&dev->gtt_count);
33976 - atomic_add(obj->size, &dev->gtt_memory);
33977 + atomic_add_unchecked(obj->size, &dev->gtt_memory);
33978
33979 /* Assert that the object is not currently in any GPU domain. As it
33980 * wasn't in the GTT, there shouldn't be any way it could have been in
33981 @@ -3755,9 +3755,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
33982 "%d/%d gtt bytes\n",
33983 atomic_read(&dev->object_count),
33984 atomic_read(&dev->pin_count),
33985 - atomic_read(&dev->object_memory),
33986 - atomic_read(&dev->pin_memory),
33987 - atomic_read(&dev->gtt_memory),
33988 + atomic_read_unchecked(&dev->object_memory),
33989 + atomic_read_unchecked(&dev->pin_memory),
33990 + atomic_read_unchecked(&dev->gtt_memory),
33991 dev->gtt_total);
33992 }
33993 goto err;
33994 @@ -3989,7 +3989,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
33995 */
33996 if (obj_priv->pin_count == 1) {
33997 atomic_inc(&dev->pin_count);
33998 - atomic_add(obj->size, &dev->pin_memory);
33999 + atomic_add_unchecked(obj->size, &dev->pin_memory);
34000 if (!obj_priv->active &&
34001 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
34002 !list_empty(&obj_priv->list))
34003 @@ -4022,7 +4022,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
34004 list_move_tail(&obj_priv->list,
34005 &dev_priv->mm.inactive_list);
34006 atomic_dec(&dev->pin_count);
34007 - atomic_sub(obj->size, &dev->pin_memory);
34008 + atomic_sub_unchecked(obj->size, &dev->pin_memory);
34009 }
34010 i915_verify_inactive(dev, __FILE__, __LINE__);
34011 }
34012 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
34013 index 63f28ad..f5469da 100644
34014 --- a/drivers/gpu/drm/i915/i915_irq.c
34015 +++ b/drivers/gpu/drm/i915/i915_irq.c
34016 @@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
34017 int irq_received;
34018 int ret = IRQ_NONE;
34019
34020 - atomic_inc(&dev_priv->irq_received);
34021 + atomic_inc_unchecked(&dev_priv->irq_received);
34022
34023 if (IS_IGDNG(dev))
34024 return igdng_irq_handler(dev);
34025 @@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
34026 {
34027 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
34028
34029 - atomic_set(&dev_priv->irq_received, 0);
34030 + atomic_set_unchecked(&dev_priv->irq_received, 0);
34031
34032 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
34033 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
34034 diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
34035 index 5d9c6a7..d1b0e29 100644
34036 --- a/drivers/gpu/drm/i915/intel_sdvo.c
34037 +++ b/drivers/gpu/drm/i915/intel_sdvo.c
34038 @@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
34039 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
34040
34041 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
34042 - intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
34043 + pax_open_kernel();
34044 + *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
34045 + pax_close_kernel();
34046
34047 /* Read the regs to test if we can talk to the device */
34048 for (i = 0; i < 0x40; i++) {
34049 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
34050 index be6c6b9..8615d9c 100644
34051 --- a/drivers/gpu/drm/mga/mga_drv.h
34052 +++ b/drivers/gpu/drm/mga/mga_drv.h
34053 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
34054 u32 clear_cmd;
34055 u32 maccess;
34056
34057 - atomic_t vbl_received; /**< Number of vblanks received. */
34058 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
34059 wait_queue_head_t fence_queue;
34060 - atomic_t last_fence_retired;
34061 + atomic_unchecked_t last_fence_retired;
34062 u32 next_fence_to_post;
34063
34064 unsigned int fb_cpp;
34065 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
34066 index daa6041..a28a5da 100644
34067 --- a/drivers/gpu/drm/mga/mga_irq.c
34068 +++ b/drivers/gpu/drm/mga/mga_irq.c
34069 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
34070 if (crtc != 0)
34071 return 0;
34072
34073 - return atomic_read(&dev_priv->vbl_received);
34074 + return atomic_read_unchecked(&dev_priv->vbl_received);
34075 }
34076
34077
34078 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
34079 /* VBLANK interrupt */
34080 if (status & MGA_VLINEPEN) {
34081 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
34082 - atomic_inc(&dev_priv->vbl_received);
34083 + atomic_inc_unchecked(&dev_priv->vbl_received);
34084 drm_handle_vblank(dev, 0);
34085 handled = 1;
34086 }
34087 @@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
34088 MGA_WRITE(MGA_PRIMEND, prim_end);
34089 }
34090
34091 - atomic_inc(&dev_priv->last_fence_retired);
34092 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
34093 DRM_WAKEUP(&dev_priv->fence_queue);
34094 handled = 1;
34095 }
34096 @@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
34097 * using fences.
34098 */
34099 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
34100 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
34101 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
34102 - *sequence) <= (1 << 23)));
34103
34104 *sequence = cur_fence;
34105 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
34106 index 4c39a40..b22a9ea 100644
34107 --- a/drivers/gpu/drm/r128/r128_cce.c
34108 +++ b/drivers/gpu/drm/r128/r128_cce.c
34109 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
34110
34111 /* GH: Simple idle check.
34112 */
34113 - atomic_set(&dev_priv->idle_count, 0);
34114 + atomic_set_unchecked(&dev_priv->idle_count, 0);
34115
34116 /* We don't support anything other than bus-mastering ring mode,
34117 * but the ring can be in either AGP or PCI space for the ring
34118 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
34119 index 3c60829..4faf484 100644
34120 --- a/drivers/gpu/drm/r128/r128_drv.h
34121 +++ b/drivers/gpu/drm/r128/r128_drv.h
34122 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
34123 int is_pci;
34124 unsigned long cce_buffers_offset;
34125
34126 - atomic_t idle_count;
34127 + atomic_unchecked_t idle_count;
34128
34129 int page_flipping;
34130 int current_page;
34131 u32 crtc_offset;
34132 u32 crtc_offset_cntl;
34133
34134 - atomic_t vbl_received;
34135 + atomic_unchecked_t vbl_received;
34136
34137 u32 color_fmt;
34138 unsigned int front_offset;
34139 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
34140 index 69810fb..97bf17a 100644
34141 --- a/drivers/gpu/drm/r128/r128_irq.c
34142 +++ b/drivers/gpu/drm/r128/r128_irq.c
34143 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
34144 if (crtc != 0)
34145 return 0;
34146
34147 - return atomic_read(&dev_priv->vbl_received);
34148 + return atomic_read_unchecked(&dev_priv->vbl_received);
34149 }
34150
34151 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
34152 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
34153 /* VBLANK interrupt */
34154 if (status & R128_CRTC_VBLANK_INT) {
34155 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
34156 - atomic_inc(&dev_priv->vbl_received);
34157 + atomic_inc_unchecked(&dev_priv->vbl_received);
34158 drm_handle_vblank(dev, 0);
34159 return IRQ_HANDLED;
34160 }
34161 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
34162 index af2665c..51922d2 100644
34163 --- a/drivers/gpu/drm/r128/r128_state.c
34164 +++ b/drivers/gpu/drm/r128/r128_state.c
34165 @@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_private_t * dev_priv,
34166
34167 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
34168 {
34169 - if (atomic_read(&dev_priv->idle_count) == 0) {
34170 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
34171 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
34172 } else {
34173 - atomic_set(&dev_priv->idle_count, 0);
34174 + atomic_set_unchecked(&dev_priv->idle_count, 0);
34175 }
34176 }
34177
34178 diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
34179 index dd72b91..8644b3c 100644
34180 --- a/drivers/gpu/drm/radeon/atom.c
34181 +++ b/drivers/gpu/drm/radeon/atom.c
34182 @@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
34183 char name[512];
34184 int i;
34185
34186 + pax_track_stack();
34187 +
34188 ctx->card = card;
34189 ctx->bios = bios;
34190
34191 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
34192 index 0d79577..efaa7a5 100644
34193 --- a/drivers/gpu/drm/radeon/mkregtable.c
34194 +++ b/drivers/gpu/drm/radeon/mkregtable.c
34195 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
34196 regex_t mask_rex;
34197 regmatch_t match[4];
34198 char buf[1024];
34199 - size_t end;
34200 + long end;
34201 int len;
34202 int done = 0;
34203 int r;
34204 unsigned o;
34205 struct offset *offset;
34206 char last_reg_s[10];
34207 - int last_reg;
34208 + unsigned long last_reg;
34209
34210 if (regcomp
34211 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
34212 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
34213 index 6735213..38c2c67 100644
34214 --- a/drivers/gpu/drm/radeon/radeon.h
34215 +++ b/drivers/gpu/drm/radeon/radeon.h
34216 @@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device *rdev);
34217 */
34218 struct radeon_fence_driver {
34219 uint32_t scratch_reg;
34220 - atomic_t seq;
34221 + atomic_unchecked_t seq;
34222 uint32_t last_seq;
34223 unsigned long count_timeout;
34224 wait_queue_head_t queue;
34225 @@ -640,7 +640,7 @@ struct radeon_asic {
34226 uint32_t offset, uint32_t obj_size);
34227 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
34228 void (*bandwidth_update)(struct radeon_device *rdev);
34229 -};
34230 +} __no_const;
34231
34232 /*
34233 * Asic structures
34234 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
34235 index 4e928b9..d8b6008 100644
34236 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
34237 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
34238 @@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
34239 bool linkb;
34240 struct radeon_i2c_bus_rec ddc_bus;
34241
34242 + pax_track_stack();
34243 +
34244 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
34245
34246 if (data_offset == 0)
34247 @@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
34248 }
34249 }
34250
34251 -struct bios_connector {
34252 +static struct bios_connector {
34253 bool valid;
34254 uint16_t line_mux;
34255 uint16_t devices;
34256 int connector_type;
34257 struct radeon_i2c_bus_rec ddc_bus;
34258 -};
34259 +} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
34260
34261 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
34262 drm_device
34263 @@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
34264 uint8_t dac;
34265 union atom_supported_devices *supported_devices;
34266 int i, j;
34267 - struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
34268
34269 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
34270
34271 diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
34272 index 083a181..ccccae0 100644
34273 --- a/drivers/gpu/drm/radeon/radeon_display.c
34274 +++ b/drivers/gpu/drm/radeon/radeon_display.c
34275 @@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
34276
34277 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
34278 error = freq - current_freq;
34279 - error = error < 0 ? 0xffffffff : error;
34280 + error = (int32_t)error < 0 ? 0xffffffff : error;
34281 } else
34282 error = abs(current_freq - freq);
34283 vco_diff = abs(vco - best_vco);
34284 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
34285 index 76e4070..193fa7f 100644
34286 --- a/drivers/gpu/drm/radeon/radeon_drv.h
34287 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
34288 @@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
34289
34290 /* SW interrupt */
34291 wait_queue_head_t swi_queue;
34292 - atomic_t swi_emitted;
34293 + atomic_unchecked_t swi_emitted;
34294 int vblank_crtc;
34295 uint32_t irq_enable_reg;
34296 uint32_t r500_disp_irq_reg;
34297 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
34298 index 3beb26d..6ce9c4a 100644
34299 --- a/drivers/gpu/drm/radeon/radeon_fence.c
34300 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
34301 @@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
34302 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
34303 return 0;
34304 }
34305 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
34306 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
34307 if (!rdev->cp.ready) {
34308 /* FIXME: cp is not running assume everythings is done right
34309 * away
34310 @@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
34311 return r;
34312 }
34313 WREG32(rdev->fence_drv.scratch_reg, 0);
34314 - atomic_set(&rdev->fence_drv.seq, 0);
34315 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
34316 INIT_LIST_HEAD(&rdev->fence_drv.created);
34317 INIT_LIST_HEAD(&rdev->fence_drv.emited);
34318 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
34319 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
34320 index a1bf11d..4a123c0 100644
34321 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
34322 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
34323 @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
34324 request = compat_alloc_user_space(sizeof(*request));
34325 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
34326 || __put_user(req32.param, &request->param)
34327 - || __put_user((void __user *)(unsigned long)req32.value,
34328 + || __put_user((unsigned long)req32.value,
34329 &request->value))
34330 return -EFAULT;
34331
34332 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
34333 index b79ecc4..8dab92d 100644
34334 --- a/drivers/gpu/drm/radeon/radeon_irq.c
34335 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
34336 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
34337 unsigned int ret;
34338 RING_LOCALS;
34339
34340 - atomic_inc(&dev_priv->swi_emitted);
34341 - ret = atomic_read(&dev_priv->swi_emitted);
34342 + atomic_inc_unchecked(&dev_priv->swi_emitted);
34343 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
34344
34345 BEGIN_RING(4);
34346 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
34347 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
34348 drm_radeon_private_t *dev_priv =
34349 (drm_radeon_private_t *) dev->dev_private;
34350
34351 - atomic_set(&dev_priv->swi_emitted, 0);
34352 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
34353 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
34354
34355 dev->max_vblank_count = 0x001fffff;
34356 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
34357 index 4747910..48ca4b3 100644
34358 --- a/drivers/gpu/drm/radeon/radeon_state.c
34359 +++ b/drivers/gpu/drm/radeon/radeon_state.c
34360 @@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
34361 {
34362 drm_radeon_private_t *dev_priv = dev->dev_private;
34363 drm_radeon_getparam_t *param = data;
34364 - int value;
34365 + int value = 0;
34366
34367 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
34368
34369 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
34370 index 1381e06..0e53b17 100644
34371 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
34372 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
34373 @@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_device *rdev)
34374 DRM_INFO("radeon: ttm finalized\n");
34375 }
34376
34377 -static struct vm_operations_struct radeon_ttm_vm_ops;
34378 -static const struct vm_operations_struct *ttm_vm_ops = NULL;
34379 -
34380 -static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
34381 -{
34382 - struct ttm_buffer_object *bo;
34383 - int r;
34384 -
34385 - bo = (struct ttm_buffer_object *)vma->vm_private_data;
34386 - if (bo == NULL) {
34387 - return VM_FAULT_NOPAGE;
34388 - }
34389 - r = ttm_vm_ops->fault(vma, vmf);
34390 - return r;
34391 -}
34392 -
34393 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
34394 {
34395 struct drm_file *file_priv;
34396 struct radeon_device *rdev;
34397 - int r;
34398
34399 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
34400 return drm_mmap(filp, vma);
34401 @@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
34402
34403 file_priv = (struct drm_file *)filp->private_data;
34404 rdev = file_priv->minor->dev->dev_private;
34405 - if (rdev == NULL) {
34406 + if (!rdev)
34407 return -EINVAL;
34408 - }
34409 - r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
34410 - if (unlikely(r != 0)) {
34411 - return r;
34412 - }
34413 - if (unlikely(ttm_vm_ops == NULL)) {
34414 - ttm_vm_ops = vma->vm_ops;
34415 - radeon_ttm_vm_ops = *ttm_vm_ops;
34416 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
34417 - }
34418 - vma->vm_ops = &radeon_ttm_vm_ops;
34419 - return 0;
34420 + return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
34421 }
34422
34423
34424 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
34425 index b12ff76..0bd0c6e 100644
34426 --- a/drivers/gpu/drm/radeon/rs690.c
34427 +++ b/drivers/gpu/drm/radeon/rs690.c
34428 @@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
34429 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
34430 rdev->pm.sideport_bandwidth.full)
34431 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
34432 - read_delay_latency.full = rfixed_const(370 * 800 * 1000);
34433 + read_delay_latency.full = rfixed_const(800 * 1000);
34434 read_delay_latency.full = rfixed_div(read_delay_latency,
34435 rdev->pm.igp_sideport_mclk);
34436 + a.full = rfixed_const(370);
34437 + read_delay_latency.full = rfixed_mul(read_delay_latency, a);
34438 } else {
34439 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
34440 rdev->pm.k8_bandwidth.full)
34441 diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
34442 index 0ed436e..e6e7ce3 100644
34443 --- a/drivers/gpu/drm/ttm/ttm_bo.c
34444 +++ b/drivers/gpu/drm/ttm/ttm_bo.c
34445 @@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_attrs[] = {
34446 NULL
34447 };
34448
34449 -static struct sysfs_ops ttm_bo_global_ops = {
34450 +static const struct sysfs_ops ttm_bo_global_ops = {
34451 .show = &ttm_bo_global_show
34452 };
34453
34454 diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
34455 index 1c040d0..f9e4af8 100644
34456 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
34457 +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
34458 @@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
34459 {
34460 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
34461 vma->vm_private_data;
34462 - struct ttm_bo_device *bdev = bo->bdev;
34463 + struct ttm_bo_device *bdev;
34464 unsigned long bus_base;
34465 unsigned long bus_offset;
34466 unsigned long bus_size;
34467 @@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
34468 unsigned long address = (unsigned long)vmf->virtual_address;
34469 int retval = VM_FAULT_NOPAGE;
34470
34471 + if (!bo)
34472 + return VM_FAULT_NOPAGE;
34473 + bdev = bo->bdev;
34474 +
34475 /*
34476 * Work around locking order reversal in fault / nopfn
34477 * between mmap_sem and bo_reserve: Perform a trylock operation
34478 diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
34479 index b170071..28ae90e 100644
34480 --- a/drivers/gpu/drm/ttm/ttm_global.c
34481 +++ b/drivers/gpu/drm/ttm/ttm_global.c
34482 @@ -36,7 +36,7 @@
34483 struct ttm_global_item {
34484 struct mutex mutex;
34485 void *object;
34486 - int refcount;
34487 + atomic_t refcount;
34488 };
34489
34490 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
34491 @@ -49,7 +49,7 @@ void ttm_global_init(void)
34492 struct ttm_global_item *item = &glob[i];
34493 mutex_init(&item->mutex);
34494 item->object = NULL;
34495 - item->refcount = 0;
34496 + atomic_set(&item->refcount, 0);
34497 }
34498 }
34499
34500 @@ -59,7 +59,7 @@ void ttm_global_release(void)
34501 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
34502 struct ttm_global_item *item = &glob[i];
34503 BUG_ON(item->object != NULL);
34504 - BUG_ON(item->refcount != 0);
34505 + BUG_ON(atomic_read(&item->refcount) != 0);
34506 }
34507 }
34508
34509 @@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
34510 void *object;
34511
34512 mutex_lock(&item->mutex);
34513 - if (item->refcount == 0) {
34514 + if (atomic_read(&item->refcount) == 0) {
34515 item->object = kzalloc(ref->size, GFP_KERNEL);
34516 if (unlikely(item->object == NULL)) {
34517 ret = -ENOMEM;
34518 @@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
34519 goto out_err;
34520
34521 }
34522 - ++item->refcount;
34523 + atomic_inc(&item->refcount);
34524 ref->object = item->object;
34525 object = item->object;
34526 mutex_unlock(&item->mutex);
34527 @@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_global_reference *ref)
34528 struct ttm_global_item *item = &glob[ref->global_type];
34529
34530 mutex_lock(&item->mutex);
34531 - BUG_ON(item->refcount == 0);
34532 + BUG_ON(atomic_read(&item->refcount) == 0);
34533 BUG_ON(ref->object != item->object);
34534 - if (--item->refcount == 0) {
34535 + if (atomic_dec_and_test(&item->refcount)) {
34536 ref->release(ref);
34537 item->object = NULL;
34538 }
34539 diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
34540 index 072c281..d8ef483 100644
34541 --- a/drivers/gpu/drm/ttm/ttm_memory.c
34542 +++ b/drivers/gpu/drm/ttm/ttm_memory.c
34543 @@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_attrs[] = {
34544 NULL
34545 };
34546
34547 -static struct sysfs_ops ttm_mem_zone_ops = {
34548 +static const struct sysfs_ops ttm_mem_zone_ops = {
34549 .show = &ttm_mem_zone_show,
34550 .store = &ttm_mem_zone_store
34551 };
34552 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
34553 index cafcb84..b8e66cc 100644
34554 --- a/drivers/gpu/drm/via/via_drv.h
34555 +++ b/drivers/gpu/drm/via/via_drv.h
34556 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
34557 typedef uint32_t maskarray_t[5];
34558
34559 typedef struct drm_via_irq {
34560 - atomic_t irq_received;
34561 + atomic_unchecked_t irq_received;
34562 uint32_t pending_mask;
34563 uint32_t enable_mask;
34564 wait_queue_head_t irq_queue;
34565 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
34566 struct timeval last_vblank;
34567 int last_vblank_valid;
34568 unsigned usec_per_vblank;
34569 - atomic_t vbl_received;
34570 + atomic_unchecked_t vbl_received;
34571 drm_via_state_t hc_state;
34572 char pci_buf[VIA_PCI_BUF_SIZE];
34573 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
34574 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
34575 index 5935b88..127a8a6 100644
34576 --- a/drivers/gpu/drm/via/via_irq.c
34577 +++ b/drivers/gpu/drm/via/via_irq.c
34578 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
34579 if (crtc != 0)
34580 return 0;
34581
34582 - return atomic_read(&dev_priv->vbl_received);
34583 + return atomic_read_unchecked(&dev_priv->vbl_received);
34584 }
34585
34586 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34587 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34588
34589 status = VIA_READ(VIA_REG_INTERRUPT);
34590 if (status & VIA_IRQ_VBLANK_PENDING) {
34591 - atomic_inc(&dev_priv->vbl_received);
34592 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
34593 + atomic_inc_unchecked(&dev_priv->vbl_received);
34594 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
34595 do_gettimeofday(&cur_vblank);
34596 if (dev_priv->last_vblank_valid) {
34597 dev_priv->usec_per_vblank =
34598 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34599 dev_priv->last_vblank = cur_vblank;
34600 dev_priv->last_vblank_valid = 1;
34601 }
34602 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
34603 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
34604 DRM_DEBUG("US per vblank is: %u\n",
34605 dev_priv->usec_per_vblank);
34606 }
34607 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34608
34609 for (i = 0; i < dev_priv->num_irqs; ++i) {
34610 if (status & cur_irq->pending_mask) {
34611 - atomic_inc(&cur_irq->irq_received);
34612 + atomic_inc_unchecked(&cur_irq->irq_received);
34613 DRM_WAKEUP(&cur_irq->irq_queue);
34614 handled = 1;
34615 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
34616 @@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
34617 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
34618 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
34619 masks[irq][4]));
34620 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
34621 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
34622 } else {
34623 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
34624 (((cur_irq_sequence =
34625 - atomic_read(&cur_irq->irq_received)) -
34626 + atomic_read_unchecked(&cur_irq->irq_received)) -
34627 *sequence) <= (1 << 23)));
34628 }
34629 *sequence = cur_irq_sequence;
34630 @@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct drm_device * dev)
34631 }
34632
34633 for (i = 0; i < dev_priv->num_irqs; ++i) {
34634 - atomic_set(&cur_irq->irq_received, 0);
34635 + atomic_set_unchecked(&cur_irq->irq_received, 0);
34636 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
34637 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
34638 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
34639 @@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
34640 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
34641 case VIA_IRQ_RELATIVE:
34642 irqwait->request.sequence +=
34643 - atomic_read(&cur_irq->irq_received);
34644 + atomic_read_unchecked(&cur_irq->irq_received);
34645 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
34646 case VIA_IRQ_ABSOLUTE:
34647 break;
34648 diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
34649 index aa8688d..6a0140c 100644
34650 --- a/drivers/gpu/vga/vgaarb.c
34651 +++ b/drivers/gpu/vga/vgaarb.c
34652 @@ -894,14 +894,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
34653 uc = &priv->cards[i];
34654 }
34655
34656 - if (!uc)
34657 - return -EINVAL;
34658 + if (!uc) {
34659 + ret_val = -EINVAL;
34660 + goto done;
34661 + }
34662
34663 - if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
34664 - return -EINVAL;
34665 + if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
34666 + ret_val = -EINVAL;
34667 + goto done;
34668 + }
34669
34670 - if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
34671 - return -EINVAL;
34672 + if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
34673 + ret_val = -EINVAL;
34674 + goto done;
34675 + }
34676
34677 vga_put(pdev, io_state);
34678
34679 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
34680 index 11f8069..4783396 100644
34681 --- a/drivers/hid/hid-core.c
34682 +++ b/drivers/hid/hid-core.c
34683 @@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device *hdev)
34684
34685 int hid_add_device(struct hid_device *hdev)
34686 {
34687 - static atomic_t id = ATOMIC_INIT(0);
34688 + static atomic_unchecked_t id = ATOMIC_INIT(0);
34689 int ret;
34690
34691 if (WARN_ON(hdev->status & HID_STAT_ADDED))
34692 @@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hdev)
34693 /* XXX hack, any other cleaner solution after the driver core
34694 * is converted to allow more than 20 bytes as the device name? */
34695 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
34696 - hdev->vendor, hdev->product, atomic_inc_return(&id));
34697 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
34698
34699 ret = device_add(&hdev->dev);
34700 if (!ret)
34701 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
34702 index 8b6ee24..70f657d 100644
34703 --- a/drivers/hid/usbhid/hiddev.c
34704 +++ b/drivers/hid/usbhid/hiddev.c
34705 @@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
34706 return put_user(HID_VERSION, (int __user *)arg);
34707
34708 case HIDIOCAPPLICATION:
34709 - if (arg < 0 || arg >= hid->maxapplication)
34710 + if (arg >= hid->maxapplication)
34711 return -EINVAL;
34712
34713 for (i = 0; i < hid->maxcollection; i++)
34714 diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
34715 index 5d5ed69..f40533e 100644
34716 --- a/drivers/hwmon/lis3lv02d.c
34717 +++ b/drivers/hwmon/lis3lv02d.c
34718 @@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
34719 * the lid is closed. This leads to interrupts as soon as a little move
34720 * is done.
34721 */
34722 - atomic_inc(&lis3_dev.count);
34723 + atomic_inc_unchecked(&lis3_dev.count);
34724
34725 wake_up_interruptible(&lis3_dev.misc_wait);
34726 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
34727 @@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
34728 if (test_and_set_bit(0, &lis3_dev.misc_opened))
34729 return -EBUSY; /* already open */
34730
34731 - atomic_set(&lis3_dev.count, 0);
34732 + atomic_set_unchecked(&lis3_dev.count, 0);
34733
34734 /*
34735 * The sensor can generate interrupts for free-fall and direction
34736 @@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
34737 add_wait_queue(&lis3_dev.misc_wait, &wait);
34738 while (true) {
34739 set_current_state(TASK_INTERRUPTIBLE);
34740 - data = atomic_xchg(&lis3_dev.count, 0);
34741 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
34742 if (data)
34743 break;
34744
34745 @@ -244,7 +244,7 @@ out:
34746 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
34747 {
34748 poll_wait(file, &lis3_dev.misc_wait, wait);
34749 - if (atomic_read(&lis3_dev.count))
34750 + if (atomic_read_unchecked(&lis3_dev.count))
34751 return POLLIN | POLLRDNORM;
34752 return 0;
34753 }
34754 diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
34755 index 7cdd76f..fe0efdf 100644
34756 --- a/drivers/hwmon/lis3lv02d.h
34757 +++ b/drivers/hwmon/lis3lv02d.h
34758 @@ -201,7 +201,7 @@ struct lis3lv02d {
34759
34760 struct input_polled_dev *idev; /* input device */
34761 struct platform_device *pdev; /* platform device */
34762 - atomic_t count; /* interrupt count after last read */
34763 + atomic_unchecked_t count; /* interrupt count after last read */
34764 int xcalib; /* calibrated null value for x */
34765 int ycalib; /* calibrated null value for y */
34766 int zcalib; /* calibrated null value for z */
34767 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
34768 index 740785e..5a5c6c6 100644
34769 --- a/drivers/hwmon/sht15.c
34770 +++ b/drivers/hwmon/sht15.c
34771 @@ -112,7 +112,7 @@ struct sht15_data {
34772 int supply_uV;
34773 int supply_uV_valid;
34774 struct work_struct update_supply_work;
34775 - atomic_t interrupt_handled;
34776 + atomic_unchecked_t interrupt_handled;
34777 };
34778
34779 /**
34780 @@ -245,13 +245,13 @@ static inline int sht15_update_single_val(struct sht15_data *data,
34781 return ret;
34782
34783 gpio_direction_input(data->pdata->gpio_data);
34784 - atomic_set(&data->interrupt_handled, 0);
34785 + atomic_set_unchecked(&data->interrupt_handled, 0);
34786
34787 enable_irq(gpio_to_irq(data->pdata->gpio_data));
34788 if (gpio_get_value(data->pdata->gpio_data) == 0) {
34789 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
34790 /* Only relevant if the interrupt hasn't occured. */
34791 - if (!atomic_read(&data->interrupt_handled))
34792 + if (!atomic_read_unchecked(&data->interrupt_handled))
34793 schedule_work(&data->read_work);
34794 }
34795 ret = wait_event_timeout(data->wait_queue,
34796 @@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
34797 struct sht15_data *data = d;
34798 /* First disable the interrupt */
34799 disable_irq_nosync(irq);
34800 - atomic_inc(&data->interrupt_handled);
34801 + atomic_inc_unchecked(&data->interrupt_handled);
34802 /* Then schedule a reading work struct */
34803 if (data->flag != SHT15_READING_NOTHING)
34804 schedule_work(&data->read_work);
34805 @@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
34806 here as could have gone low in meantime so verify
34807 it hasn't!
34808 */
34809 - atomic_set(&data->interrupt_handled, 0);
34810 + atomic_set_unchecked(&data->interrupt_handled, 0);
34811 enable_irq(gpio_to_irq(data->pdata->gpio_data));
34812 /* If still not occured or another handler has been scheduled */
34813 if (gpio_get_value(data->pdata->gpio_data)
34814 - || atomic_read(&data->interrupt_handled))
34815 + || atomic_read_unchecked(&data->interrupt_handled))
34816 return;
34817 }
34818 /* Read the data back from the device */
34819 diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
34820 index 97851c5..cb40626 100644
34821 --- a/drivers/hwmon/w83791d.c
34822 +++ b/drivers/hwmon/w83791d.c
34823 @@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_client *client, int kind,
34824 struct i2c_board_info *info);
34825 static int w83791d_remove(struct i2c_client *client);
34826
34827 -static int w83791d_read(struct i2c_client *client, u8 register);
34828 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
34829 +static int w83791d_read(struct i2c_client *client, u8 reg);
34830 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
34831 static struct w83791d_data *w83791d_update_device(struct device *dev);
34832
34833 #ifdef DEBUG
34834 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
34835 index 378fcb5..5e91fa8 100644
34836 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
34837 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
34838 @@ -43,7 +43,7 @@
34839 extern struct i2c_adapter amd756_smbus;
34840
34841 static struct i2c_adapter *s4882_adapter;
34842 -static struct i2c_algorithm *s4882_algo;
34843 +static i2c_algorithm_no_const *s4882_algo;
34844
34845 /* Wrapper access functions for multiplexed SMBus */
34846 static DEFINE_MUTEX(amd756_lock);
34847 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
34848 index 29015eb..af2d8e9 100644
34849 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
34850 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
34851 @@ -41,7 +41,7 @@
34852 extern struct i2c_adapter *nforce2_smbus;
34853
34854 static struct i2c_adapter *s4985_adapter;
34855 -static struct i2c_algorithm *s4985_algo;
34856 +static i2c_algorithm_no_const *s4985_algo;
34857
34858 /* Wrapper access functions for multiplexed SMBus */
34859 static DEFINE_MUTEX(nforce2_lock);
34860 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
34861 index 878f8ec..12376fc 100644
34862 --- a/drivers/ide/aec62xx.c
34863 +++ b/drivers/ide/aec62xx.c
34864 @@ -180,7 +180,7 @@ static const struct ide_port_ops atp86x_port_ops = {
34865 .cable_detect = atp86x_cable_detect,
34866 };
34867
34868 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
34869 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
34870 { /* 0: AEC6210 */
34871 .name = DRV_NAME,
34872 .init_chipset = init_chipset_aec62xx,
34873 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
34874 index e59b6de..4b4fc65 100644
34875 --- a/drivers/ide/alim15x3.c
34876 +++ b/drivers/ide/alim15x3.c
34877 @@ -509,7 +509,7 @@ static const struct ide_dma_ops ali_dma_ops = {
34878 .dma_sff_read_status = ide_dma_sff_read_status,
34879 };
34880
34881 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
34882 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
34883 .name = DRV_NAME,
34884 .init_chipset = init_chipset_ali15x3,
34885 .init_hwif = init_hwif_ali15x3,
34886 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
34887 index 628cd2e..087a414 100644
34888 --- a/drivers/ide/amd74xx.c
34889 +++ b/drivers/ide/amd74xx.c
34890 @@ -221,7 +221,7 @@ static const struct ide_port_ops amd_port_ops = {
34891 .udma_mask = udma, \
34892 }
34893
34894 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
34895 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
34896 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
34897 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
34898 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
34899 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
34900 index 837322b..837fd71 100644
34901 --- a/drivers/ide/atiixp.c
34902 +++ b/drivers/ide/atiixp.c
34903 @@ -137,7 +137,7 @@ static const struct ide_port_ops atiixp_port_ops = {
34904 .cable_detect = atiixp_cable_detect,
34905 };
34906
34907 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
34908 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
34909 { /* 0: IXP200/300/400/700 */
34910 .name = DRV_NAME,
34911 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
34912 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
34913 index ca0c46f..d55318a 100644
34914 --- a/drivers/ide/cmd64x.c
34915 +++ b/drivers/ide/cmd64x.c
34916 @@ -372,7 +372,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
34917 .dma_sff_read_status = ide_dma_sff_read_status,
34918 };
34919
34920 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
34921 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
34922 { /* 0: CMD643 */
34923 .name = DRV_NAME,
34924 .init_chipset = init_chipset_cmd64x,
34925 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
34926 index 09f98ed..cebc5bc 100644
34927 --- a/drivers/ide/cs5520.c
34928 +++ b/drivers/ide/cs5520.c
34929 @@ -93,7 +93,7 @@ static const struct ide_port_ops cs5520_port_ops = {
34930 .set_dma_mode = cs5520_set_dma_mode,
34931 };
34932
34933 -static const struct ide_port_info cyrix_chipset __devinitdata = {
34934 +static const struct ide_port_info cyrix_chipset __devinitconst = {
34935 .name = DRV_NAME,
34936 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
34937 .port_ops = &cs5520_port_ops,
34938 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
34939 index 40bf05e..7d58ca0 100644
34940 --- a/drivers/ide/cs5530.c
34941 +++ b/drivers/ide/cs5530.c
34942 @@ -244,7 +244,7 @@ static const struct ide_port_ops cs5530_port_ops = {
34943 .udma_filter = cs5530_udma_filter,
34944 };
34945
34946 -static const struct ide_port_info cs5530_chipset __devinitdata = {
34947 +static const struct ide_port_info cs5530_chipset __devinitconst = {
34948 .name = DRV_NAME,
34949 .init_chipset = init_chipset_cs5530,
34950 .init_hwif = init_hwif_cs5530,
34951 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
34952 index 983d957..53e6172 100644
34953 --- a/drivers/ide/cs5535.c
34954 +++ b/drivers/ide/cs5535.c
34955 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
34956 .cable_detect = cs5535_cable_detect,
34957 };
34958
34959 -static const struct ide_port_info cs5535_chipset __devinitdata = {
34960 +static const struct ide_port_info cs5535_chipset __devinitconst = {
34961 .name = DRV_NAME,
34962 .port_ops = &cs5535_port_ops,
34963 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
34964 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
34965 index 74fc540..8e933d8 100644
34966 --- a/drivers/ide/cy82c693.c
34967 +++ b/drivers/ide/cy82c693.c
34968 @@ -288,7 +288,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
34969 .set_dma_mode = cy82c693_set_dma_mode,
34970 };
34971
34972 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
34973 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
34974 .name = DRV_NAME,
34975 .init_iops = init_iops_cy82c693,
34976 .port_ops = &cy82c693_port_ops,
34977 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
34978 index 7ce68ef..e78197d 100644
34979 --- a/drivers/ide/hpt366.c
34980 +++ b/drivers/ide/hpt366.c
34981 @@ -507,7 +507,7 @@ static struct hpt_timings hpt37x_timings = {
34982 }
34983 };
34984
34985 -static const struct hpt_info hpt36x __devinitdata = {
34986 +static const struct hpt_info hpt36x __devinitconst = {
34987 .chip_name = "HPT36x",
34988 .chip_type = HPT36x,
34989 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
34990 @@ -515,7 +515,7 @@ static const struct hpt_info hpt36x __devinitdata = {
34991 .timings = &hpt36x_timings
34992 };
34993
34994 -static const struct hpt_info hpt370 __devinitdata = {
34995 +static const struct hpt_info hpt370 __devinitconst = {
34996 .chip_name = "HPT370",
34997 .chip_type = HPT370,
34998 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34999 @@ -523,7 +523,7 @@ static const struct hpt_info hpt370 __devinitdata = {
35000 .timings = &hpt37x_timings
35001 };
35002
35003 -static const struct hpt_info hpt370a __devinitdata = {
35004 +static const struct hpt_info hpt370a __devinitconst = {
35005 .chip_name = "HPT370A",
35006 .chip_type = HPT370A,
35007 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
35008 @@ -531,7 +531,7 @@ static const struct hpt_info hpt370a __devinitdata = {
35009 .timings = &hpt37x_timings
35010 };
35011
35012 -static const struct hpt_info hpt374 __devinitdata = {
35013 +static const struct hpt_info hpt374 __devinitconst = {
35014 .chip_name = "HPT374",
35015 .chip_type = HPT374,
35016 .udma_mask = ATA_UDMA5,
35017 @@ -539,7 +539,7 @@ static const struct hpt_info hpt374 __devinitdata = {
35018 .timings = &hpt37x_timings
35019 };
35020
35021 -static const struct hpt_info hpt372 __devinitdata = {
35022 +static const struct hpt_info hpt372 __devinitconst = {
35023 .chip_name = "HPT372",
35024 .chip_type = HPT372,
35025 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35026 @@ -547,7 +547,7 @@ static const struct hpt_info hpt372 __devinitdata = {
35027 .timings = &hpt37x_timings
35028 };
35029
35030 -static const struct hpt_info hpt372a __devinitdata = {
35031 +static const struct hpt_info hpt372a __devinitconst = {
35032 .chip_name = "HPT372A",
35033 .chip_type = HPT372A,
35034 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35035 @@ -555,7 +555,7 @@ static const struct hpt_info hpt372a __devinitdata = {
35036 .timings = &hpt37x_timings
35037 };
35038
35039 -static const struct hpt_info hpt302 __devinitdata = {
35040 +static const struct hpt_info hpt302 __devinitconst = {
35041 .chip_name = "HPT302",
35042 .chip_type = HPT302,
35043 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35044 @@ -563,7 +563,7 @@ static const struct hpt_info hpt302 __devinitdata = {
35045 .timings = &hpt37x_timings
35046 };
35047
35048 -static const struct hpt_info hpt371 __devinitdata = {
35049 +static const struct hpt_info hpt371 __devinitconst = {
35050 .chip_name = "HPT371",
35051 .chip_type = HPT371,
35052 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35053 @@ -571,7 +571,7 @@ static const struct hpt_info hpt371 __devinitdata = {
35054 .timings = &hpt37x_timings
35055 };
35056
35057 -static const struct hpt_info hpt372n __devinitdata = {
35058 +static const struct hpt_info hpt372n __devinitconst = {
35059 .chip_name = "HPT372N",
35060 .chip_type = HPT372N,
35061 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35062 @@ -579,7 +579,7 @@ static const struct hpt_info hpt372n __devinitdata = {
35063 .timings = &hpt37x_timings
35064 };
35065
35066 -static const struct hpt_info hpt302n __devinitdata = {
35067 +static const struct hpt_info hpt302n __devinitconst = {
35068 .chip_name = "HPT302N",
35069 .chip_type = HPT302N,
35070 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35071 @@ -587,7 +587,7 @@ static const struct hpt_info hpt302n __devinitdata = {
35072 .timings = &hpt37x_timings
35073 };
35074
35075 -static const struct hpt_info hpt371n __devinitdata = {
35076 +static const struct hpt_info hpt371n __devinitconst = {
35077 .chip_name = "HPT371N",
35078 .chip_type = HPT371N,
35079 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35080 @@ -1422,7 +1422,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
35081 .dma_sff_read_status = ide_dma_sff_read_status,
35082 };
35083
35084 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
35085 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
35086 { /* 0: HPT36x */
35087 .name = DRV_NAME,
35088 .init_chipset = init_chipset_hpt366,
35089 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
35090 index 2de76cc..74186a1 100644
35091 --- a/drivers/ide/ide-cd.c
35092 +++ b/drivers/ide/ide-cd.c
35093 @@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
35094 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
35095 if ((unsigned long)buf & alignment
35096 || blk_rq_bytes(rq) & q->dma_pad_mask
35097 - || object_is_on_stack(buf))
35098 + || object_starts_on_stack(buf))
35099 drive->dma = 0;
35100 }
35101 }
35102 diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
35103 index fefbdfc..62ff465 100644
35104 --- a/drivers/ide/ide-floppy.c
35105 +++ b/drivers/ide/ide-floppy.c
35106 @@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
35107 u8 pc_buf[256], header_len, desc_cnt;
35108 int i, rc = 1, blocks, length;
35109
35110 + pax_track_stack();
35111 +
35112 ide_debug_log(IDE_DBG_FUNC, "enter");
35113
35114 drive->bios_cyl = 0;
35115 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
35116 index 39d4e01..11538ce 100644
35117 --- a/drivers/ide/ide-pci-generic.c
35118 +++ b/drivers/ide/ide-pci-generic.c
35119 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
35120 .udma_mask = ATA_UDMA6, \
35121 }
35122
35123 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
35124 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
35125 /* 0: Unknown */
35126 DECLARE_GENERIC_PCI_DEV(0),
35127
35128 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
35129 index 0d266a5..aaca790 100644
35130 --- a/drivers/ide/it8172.c
35131 +++ b/drivers/ide/it8172.c
35132 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
35133 .set_dma_mode = it8172_set_dma_mode,
35134 };
35135
35136 -static const struct ide_port_info it8172_port_info __devinitdata = {
35137 +static const struct ide_port_info it8172_port_info __devinitconst = {
35138 .name = DRV_NAME,
35139 .port_ops = &it8172_port_ops,
35140 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
35141 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
35142 index 4797616..4be488a 100644
35143 --- a/drivers/ide/it8213.c
35144 +++ b/drivers/ide/it8213.c
35145 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
35146 .cable_detect = it8213_cable_detect,
35147 };
35148
35149 -static const struct ide_port_info it8213_chipset __devinitdata = {
35150 +static const struct ide_port_info it8213_chipset __devinitconst = {
35151 .name = DRV_NAME,
35152 .enablebits = { {0x41, 0x80, 0x80} },
35153 .port_ops = &it8213_port_ops,
35154 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
35155 index 51aa745..146ee60 100644
35156 --- a/drivers/ide/it821x.c
35157 +++ b/drivers/ide/it821x.c
35158 @@ -627,7 +627,7 @@ static const struct ide_port_ops it821x_port_ops = {
35159 .cable_detect = it821x_cable_detect,
35160 };
35161
35162 -static const struct ide_port_info it821x_chipset __devinitdata = {
35163 +static const struct ide_port_info it821x_chipset __devinitconst = {
35164 .name = DRV_NAME,
35165 .init_chipset = init_chipset_it821x,
35166 .init_hwif = init_hwif_it821x,
35167 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
35168 index bf2be64..9270098 100644
35169 --- a/drivers/ide/jmicron.c
35170 +++ b/drivers/ide/jmicron.c
35171 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
35172 .cable_detect = jmicron_cable_detect,
35173 };
35174
35175 -static const struct ide_port_info jmicron_chipset __devinitdata = {
35176 +static const struct ide_port_info jmicron_chipset __devinitconst = {
35177 .name = DRV_NAME,
35178 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
35179 .port_ops = &jmicron_port_ops,
35180 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
35181 index 95327a2..73f78d8 100644
35182 --- a/drivers/ide/ns87415.c
35183 +++ b/drivers/ide/ns87415.c
35184 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
35185 .dma_sff_read_status = superio_dma_sff_read_status,
35186 };
35187
35188 -static const struct ide_port_info ns87415_chipset __devinitdata = {
35189 +static const struct ide_port_info ns87415_chipset __devinitconst = {
35190 .name = DRV_NAME,
35191 .init_hwif = init_hwif_ns87415,
35192 .tp_ops = &ns87415_tp_ops,
35193 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
35194 index f1d70d6..e1de05b 100644
35195 --- a/drivers/ide/opti621.c
35196 +++ b/drivers/ide/opti621.c
35197 @@ -202,7 +202,7 @@ static const struct ide_port_ops opti621_port_ops = {
35198 .set_pio_mode = opti621_set_pio_mode,
35199 };
35200
35201 -static const struct ide_port_info opti621_chipset __devinitdata = {
35202 +static const struct ide_port_info opti621_chipset __devinitconst = {
35203 .name = DRV_NAME,
35204 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
35205 .port_ops = &opti621_port_ops,
35206 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
35207 index 65ba823..7311f4d 100644
35208 --- a/drivers/ide/pdc202xx_new.c
35209 +++ b/drivers/ide/pdc202xx_new.c
35210 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
35211 .udma_mask = udma, \
35212 }
35213
35214 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
35215 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
35216 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
35217 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
35218 };
35219 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
35220 index cb812f3..af816ef 100644
35221 --- a/drivers/ide/pdc202xx_old.c
35222 +++ b/drivers/ide/pdc202xx_old.c
35223 @@ -285,7 +285,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
35224 .max_sectors = sectors, \
35225 }
35226
35227 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
35228 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
35229 { /* 0: PDC20246 */
35230 .name = DRV_NAME,
35231 .init_chipset = init_chipset_pdc202xx,
35232 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
35233 index bf14f39..15c4b98 100644
35234 --- a/drivers/ide/piix.c
35235 +++ b/drivers/ide/piix.c
35236 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
35237 .udma_mask = udma, \
35238 }
35239
35240 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
35241 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
35242 /* 0: MPIIX */
35243 { /*
35244 * MPIIX actually has only a single IDE channel mapped to
35245 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
35246 index a6414a8..c04173e 100644
35247 --- a/drivers/ide/rz1000.c
35248 +++ b/drivers/ide/rz1000.c
35249 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
35250 }
35251 }
35252
35253 -static const struct ide_port_info rz1000_chipset __devinitdata = {
35254 +static const struct ide_port_info rz1000_chipset __devinitconst = {
35255 .name = DRV_NAME,
35256 .host_flags = IDE_HFLAG_NO_DMA,
35257 };
35258 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
35259 index d467478..9203942 100644
35260 --- a/drivers/ide/sc1200.c
35261 +++ b/drivers/ide/sc1200.c
35262 @@ -290,7 +290,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
35263 .dma_sff_read_status = ide_dma_sff_read_status,
35264 };
35265
35266 -static const struct ide_port_info sc1200_chipset __devinitdata = {
35267 +static const struct ide_port_info sc1200_chipset __devinitconst = {
35268 .name = DRV_NAME,
35269 .port_ops = &sc1200_port_ops,
35270 .dma_ops = &sc1200_dma_ops,
35271 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
35272 index 1104bb3..59c5194 100644
35273 --- a/drivers/ide/scc_pata.c
35274 +++ b/drivers/ide/scc_pata.c
35275 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
35276 .dma_sff_read_status = scc_dma_sff_read_status,
35277 };
35278
35279 -static const struct ide_port_info scc_chipset __devinitdata = {
35280 +static const struct ide_port_info scc_chipset __devinitconst = {
35281 .name = "sccIDE",
35282 .init_iops = init_iops_scc,
35283 .init_dma = scc_init_dma,
35284 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
35285 index b6554ef..6cc2cc3 100644
35286 --- a/drivers/ide/serverworks.c
35287 +++ b/drivers/ide/serverworks.c
35288 @@ -353,7 +353,7 @@ static const struct ide_port_ops svwks_port_ops = {
35289 .cable_detect = svwks_cable_detect,
35290 };
35291
35292 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
35293 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
35294 { /* 0: OSB4 */
35295 .name = DRV_NAME,
35296 .init_chipset = init_chipset_svwks,
35297 diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
35298 index ab3db61..afed580 100644
35299 --- a/drivers/ide/setup-pci.c
35300 +++ b/drivers/ide/setup-pci.c
35301 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
35302 int ret, i, n_ports = dev2 ? 4 : 2;
35303 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
35304
35305 + pax_track_stack();
35306 +
35307 for (i = 0; i < n_ports / 2; i++) {
35308 ret = ide_setup_pci_controller(pdev[i], d, !i);
35309 if (ret < 0)
35310 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
35311 index d95df52..0b03a39 100644
35312 --- a/drivers/ide/siimage.c
35313 +++ b/drivers/ide/siimage.c
35314 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
35315 .udma_mask = ATA_UDMA6, \
35316 }
35317
35318 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
35319 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
35320 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
35321 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
35322 };
35323 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
35324 index 3b88eba..ca8699d 100644
35325 --- a/drivers/ide/sis5513.c
35326 +++ b/drivers/ide/sis5513.c
35327 @@ -561,7 +561,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
35328 .cable_detect = sis_cable_detect,
35329 };
35330
35331 -static const struct ide_port_info sis5513_chipset __devinitdata = {
35332 +static const struct ide_port_info sis5513_chipset __devinitconst = {
35333 .name = DRV_NAME,
35334 .init_chipset = init_chipset_sis5513,
35335 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
35336 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
35337 index d698da4..fca42a4 100644
35338 --- a/drivers/ide/sl82c105.c
35339 +++ b/drivers/ide/sl82c105.c
35340 @@ -319,7 +319,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
35341 .dma_sff_read_status = ide_dma_sff_read_status,
35342 };
35343
35344 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
35345 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
35346 .name = DRV_NAME,
35347 .init_chipset = init_chipset_sl82c105,
35348 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
35349 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
35350 index 1ccfb40..83d5779 100644
35351 --- a/drivers/ide/slc90e66.c
35352 +++ b/drivers/ide/slc90e66.c
35353 @@ -131,7 +131,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
35354 .cable_detect = slc90e66_cable_detect,
35355 };
35356
35357 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
35358 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
35359 .name = DRV_NAME,
35360 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
35361 .port_ops = &slc90e66_port_ops,
35362 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
35363 index 05a93d6..5f9e325 100644
35364 --- a/drivers/ide/tc86c001.c
35365 +++ b/drivers/ide/tc86c001.c
35366 @@ -190,7 +190,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
35367 .dma_sff_read_status = ide_dma_sff_read_status,
35368 };
35369
35370 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
35371 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
35372 .name = DRV_NAME,
35373 .init_hwif = init_hwif_tc86c001,
35374 .port_ops = &tc86c001_port_ops,
35375 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
35376 index 8773c3b..7907d6c 100644
35377 --- a/drivers/ide/triflex.c
35378 +++ b/drivers/ide/triflex.c
35379 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
35380 .set_dma_mode = triflex_set_mode,
35381 };
35382
35383 -static const struct ide_port_info triflex_device __devinitdata = {
35384 +static const struct ide_port_info triflex_device __devinitconst = {
35385 .name = DRV_NAME,
35386 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
35387 .port_ops = &triflex_port_ops,
35388 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
35389 index 4b42ca0..e494a98 100644
35390 --- a/drivers/ide/trm290.c
35391 +++ b/drivers/ide/trm290.c
35392 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
35393 .dma_check = trm290_dma_check,
35394 };
35395
35396 -static const struct ide_port_info trm290_chipset __devinitdata = {
35397 +static const struct ide_port_info trm290_chipset __devinitconst = {
35398 .name = DRV_NAME,
35399 .init_hwif = init_hwif_trm290,
35400 .tp_ops = &trm290_tp_ops,
35401 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
35402 index 028de26..520d5d5 100644
35403 --- a/drivers/ide/via82cxxx.c
35404 +++ b/drivers/ide/via82cxxx.c
35405 @@ -374,7 +374,7 @@ static const struct ide_port_ops via_port_ops = {
35406 .cable_detect = via82cxxx_cable_detect,
35407 };
35408
35409 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
35410 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
35411 .name = DRV_NAME,
35412 .init_chipset = init_chipset_via82cxxx,
35413 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
35414 diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
35415 index 2cd00b5..14de699 100644
35416 --- a/drivers/ieee1394/dv1394.c
35417 +++ b/drivers/ieee1394/dv1394.c
35418 @@ -739,7 +739,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
35419 based upon DIF section and sequence
35420 */
35421
35422 -static void inline
35423 +static inline void
35424 frame_put_packet (struct frame *f, struct packet *p)
35425 {
35426 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
35427 diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
35428 index e947d8f..6a966b9 100644
35429 --- a/drivers/ieee1394/hosts.c
35430 +++ b/drivers/ieee1394/hosts.c
35431 @@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command,
35432 }
35433
35434 static struct hpsb_host_driver dummy_driver = {
35435 + .name = "dummy",
35436 .transmit_packet = dummy_transmit_packet,
35437 .devctl = dummy_devctl,
35438 .isoctl = dummy_isoctl
35439 diff --git a/drivers/ieee1394/init_ohci1394_dma.c b/drivers/ieee1394/init_ohci1394_dma.c
35440 index ddaab6e..8d37435 100644
35441 --- a/drivers/ieee1394/init_ohci1394_dma.c
35442 +++ b/drivers/ieee1394/init_ohci1394_dma.c
35443 @@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
35444 for (func = 0; func < 8; func++) {
35445 u32 class = read_pci_config(num,slot,func,
35446 PCI_CLASS_REVISION);
35447 - if ((class == 0xffffffff))
35448 + if (class == 0xffffffff)
35449 continue; /* No device at this func */
35450
35451 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
35452 diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
35453 index 65c1429..5d8c11f 100644
35454 --- a/drivers/ieee1394/ohci1394.c
35455 +++ b/drivers/ieee1394/ohci1394.c
35456 @@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
35457 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
35458
35459 /* Module Parameters */
35460 -static int phys_dma = 1;
35461 +static int phys_dma;
35462 module_param(phys_dma, int, 0444);
35463 -MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
35464 +MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
35465
35466 static void dma_trm_tasklet(unsigned long data);
35467 static void dma_trm_reset(struct dma_trm_ctx *d);
35468 diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
35469 index f199896..78c9fc8 100644
35470 --- a/drivers/ieee1394/sbp2.c
35471 +++ b/drivers/ieee1394/sbp2.c
35472 @@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
35473 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
35474 MODULE_LICENSE("GPL");
35475
35476 -static int sbp2_module_init(void)
35477 +static int __init sbp2_module_init(void)
35478 {
35479 int ret;
35480
35481 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
35482 index a5dea6b..0cefe8f 100644
35483 --- a/drivers/infiniband/core/cm.c
35484 +++ b/drivers/infiniband/core/cm.c
35485 @@ -112,7 +112,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
35486
35487 struct cm_counter_group {
35488 struct kobject obj;
35489 - atomic_long_t counter[CM_ATTR_COUNT];
35490 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
35491 };
35492
35493 struct cm_counter_attribute {
35494 @@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm_work *work,
35495 struct ib_mad_send_buf *msg = NULL;
35496 int ret;
35497
35498 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35499 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35500 counter[CM_REQ_COUNTER]);
35501
35502 /* Quick state check to discard duplicate REQs. */
35503 @@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
35504 if (!cm_id_priv)
35505 return;
35506
35507 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35508 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35509 counter[CM_REP_COUNTER]);
35510 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
35511 if (ret)
35512 @@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work *work)
35513 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
35514 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
35515 spin_unlock_irq(&cm_id_priv->lock);
35516 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35517 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35518 counter[CM_RTU_COUNTER]);
35519 goto out;
35520 }
35521 @@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_work *work)
35522 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
35523 dreq_msg->local_comm_id);
35524 if (!cm_id_priv) {
35525 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35526 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35527 counter[CM_DREQ_COUNTER]);
35528 cm_issue_drep(work->port, work->mad_recv_wc);
35529 return -EINVAL;
35530 @@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_work *work)
35531 case IB_CM_MRA_REP_RCVD:
35532 break;
35533 case IB_CM_TIMEWAIT:
35534 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35535 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35536 counter[CM_DREQ_COUNTER]);
35537 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
35538 goto unlock;
35539 @@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_work *work)
35540 cm_free_msg(msg);
35541 goto deref;
35542 case IB_CM_DREQ_RCVD:
35543 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35544 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35545 counter[CM_DREQ_COUNTER]);
35546 goto unlock;
35547 default:
35548 @@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work *work)
35549 ib_modify_mad(cm_id_priv->av.port->mad_agent,
35550 cm_id_priv->msg, timeout)) {
35551 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
35552 - atomic_long_inc(&work->port->
35553 + atomic_long_inc_unchecked(&work->port->
35554 counter_group[CM_RECV_DUPLICATES].
35555 counter[CM_MRA_COUNTER]);
35556 goto out;
35557 @@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work *work)
35558 break;
35559 case IB_CM_MRA_REQ_RCVD:
35560 case IB_CM_MRA_REP_RCVD:
35561 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35562 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35563 counter[CM_MRA_COUNTER]);
35564 /* fall through */
35565 default:
35566 @@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work *work)
35567 case IB_CM_LAP_IDLE:
35568 break;
35569 case IB_CM_MRA_LAP_SENT:
35570 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35571 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35572 counter[CM_LAP_COUNTER]);
35573 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
35574 goto unlock;
35575 @@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work *work)
35576 cm_free_msg(msg);
35577 goto deref;
35578 case IB_CM_LAP_RCVD:
35579 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35580 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35581 counter[CM_LAP_COUNTER]);
35582 goto unlock;
35583 default:
35584 @@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
35585 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
35586 if (cur_cm_id_priv) {
35587 spin_unlock_irq(&cm.lock);
35588 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35589 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35590 counter[CM_SIDR_REQ_COUNTER]);
35591 goto out; /* Duplicate message. */
35592 }
35593 @@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
35594 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
35595 msg->retries = 1;
35596
35597 - atomic_long_add(1 + msg->retries,
35598 + atomic_long_add_unchecked(1 + msg->retries,
35599 &port->counter_group[CM_XMIT].counter[attr_index]);
35600 if (msg->retries)
35601 - atomic_long_add(msg->retries,
35602 + atomic_long_add_unchecked(msg->retries,
35603 &port->counter_group[CM_XMIT_RETRIES].
35604 counter[attr_index]);
35605
35606 @@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
35607 }
35608
35609 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
35610 - atomic_long_inc(&port->counter_group[CM_RECV].
35611 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
35612 counter[attr_id - CM_ATTR_ID_OFFSET]);
35613
35614 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
35615 @@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
35616 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
35617
35618 return sprintf(buf, "%ld\n",
35619 - atomic_long_read(&group->counter[cm_attr->index]));
35620 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
35621 }
35622
35623 -static struct sysfs_ops cm_counter_ops = {
35624 +static const struct sysfs_ops cm_counter_ops = {
35625 .show = cm_show_counter
35626 };
35627
35628 diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
35629 index 8fd3a6f..61d8075 100644
35630 --- a/drivers/infiniband/core/cma.c
35631 +++ b/drivers/infiniband/core/cma.c
35632 @@ -2267,6 +2267,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
35633
35634 req.private_data_len = sizeof(struct cma_hdr) +
35635 conn_param->private_data_len;
35636 + if (req.private_data_len < conn_param->private_data_len)
35637 + return -EINVAL;
35638 +
35639 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
35640 if (!req.private_data)
35641 return -ENOMEM;
35642 @@ -2314,6 +2317,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
35643 memset(&req, 0, sizeof req);
35644 offset = cma_user_data_offset(id_priv->id.ps);
35645 req.private_data_len = offset + conn_param->private_data_len;
35646 + if (req.private_data_len < conn_param->private_data_len)
35647 + return -EINVAL;
35648 +
35649 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
35650 if (!private_data)
35651 return -ENOMEM;
35652 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
35653 index 4507043..14ad522 100644
35654 --- a/drivers/infiniband/core/fmr_pool.c
35655 +++ b/drivers/infiniband/core/fmr_pool.c
35656 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
35657
35658 struct task_struct *thread;
35659
35660 - atomic_t req_ser;
35661 - atomic_t flush_ser;
35662 + atomic_unchecked_t req_ser;
35663 + atomic_unchecked_t flush_ser;
35664
35665 wait_queue_head_t force_wait;
35666 };
35667 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
35668 struct ib_fmr_pool *pool = pool_ptr;
35669
35670 do {
35671 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
35672 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
35673 ib_fmr_batch_release(pool);
35674
35675 - atomic_inc(&pool->flush_ser);
35676 + atomic_inc_unchecked(&pool->flush_ser);
35677 wake_up_interruptible(&pool->force_wait);
35678
35679 if (pool->flush_function)
35680 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
35681 }
35682
35683 set_current_state(TASK_INTERRUPTIBLE);
35684 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
35685 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
35686 !kthread_should_stop())
35687 schedule();
35688 __set_current_state(TASK_RUNNING);
35689 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
35690 pool->dirty_watermark = params->dirty_watermark;
35691 pool->dirty_len = 0;
35692 spin_lock_init(&pool->pool_lock);
35693 - atomic_set(&pool->req_ser, 0);
35694 - atomic_set(&pool->flush_ser, 0);
35695 + atomic_set_unchecked(&pool->req_ser, 0);
35696 + atomic_set_unchecked(&pool->flush_ser, 0);
35697 init_waitqueue_head(&pool->force_wait);
35698
35699 pool->thread = kthread_run(ib_fmr_cleanup_thread,
35700 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
35701 }
35702 spin_unlock_irq(&pool->pool_lock);
35703
35704 - serial = atomic_inc_return(&pool->req_ser);
35705 + serial = atomic_inc_return_unchecked(&pool->req_ser);
35706 wake_up_process(pool->thread);
35707
35708 if (wait_event_interruptible(pool->force_wait,
35709 - atomic_read(&pool->flush_ser) - serial >= 0))
35710 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
35711 return -EINTR;
35712
35713 return 0;
35714 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
35715 } else {
35716 list_add_tail(&fmr->list, &pool->dirty_list);
35717 if (++pool->dirty_len >= pool->dirty_watermark) {
35718 - atomic_inc(&pool->req_ser);
35719 + atomic_inc_unchecked(&pool->req_ser);
35720 wake_up_process(pool->thread);
35721 }
35722 }
35723 diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
35724 index 158a214..1558bb7 100644
35725 --- a/drivers/infiniband/core/sysfs.c
35726 +++ b/drivers/infiniband/core/sysfs.c
35727 @@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kobject *kobj,
35728 return port_attr->show(p, port_attr, buf);
35729 }
35730
35731 -static struct sysfs_ops port_sysfs_ops = {
35732 +static const struct sysfs_ops port_sysfs_ops = {
35733 .show = port_attr_show
35734 };
35735
35736 diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
35737 index 5440da0..1194ecb 100644
35738 --- a/drivers/infiniband/core/uverbs_marshall.c
35739 +++ b/drivers/infiniband/core/uverbs_marshall.c
35740 @@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
35741 dst->grh.sgid_index = src->grh.sgid_index;
35742 dst->grh.hop_limit = src->grh.hop_limit;
35743 dst->grh.traffic_class = src->grh.traffic_class;
35744 + memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
35745 dst->dlid = src->dlid;
35746 dst->sl = src->sl;
35747 dst->src_path_bits = src->src_path_bits;
35748 dst->static_rate = src->static_rate;
35749 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
35750 dst->port_num = src->port_num;
35751 + dst->reserved = 0;
35752 }
35753 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
35754
35755 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
35756 struct ib_qp_attr *src)
35757 {
35758 + dst->qp_state = src->qp_state;
35759 dst->cur_qp_state = src->cur_qp_state;
35760 dst->path_mtu = src->path_mtu;
35761 dst->path_mig_state = src->path_mig_state;
35762 @@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
35763 dst->rnr_retry = src->rnr_retry;
35764 dst->alt_port_num = src->alt_port_num;
35765 dst->alt_timeout = src->alt_timeout;
35766 + memset(dst->reserved, 0, sizeof(dst->reserved));
35767 }
35768 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
35769
35770 diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
35771 index 100da85..62e6b88 100644
35772 --- a/drivers/infiniband/hw/ipath/ipath_fs.c
35773 +++ b/drivers/infiniband/hw/ipath/ipath_fs.c
35774 @@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
35775 struct infinipath_counters counters;
35776 struct ipath_devdata *dd;
35777
35778 + pax_track_stack();
35779 +
35780 dd = file->f_path.dentry->d_inode->i_private;
35781 dd->ipath_f_read_counters(dd, &counters);
35782
35783 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
35784 index cbde0cf..afaf55c 100644
35785 --- a/drivers/infiniband/hw/nes/nes.c
35786 +++ b/drivers/infiniband/hw/nes/nes.c
35787 @@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
35788 LIST_HEAD(nes_adapter_list);
35789 static LIST_HEAD(nes_dev_list);
35790
35791 -atomic_t qps_destroyed;
35792 +atomic_unchecked_t qps_destroyed;
35793
35794 static unsigned int ee_flsh_adapter;
35795 static unsigned int sysfs_nonidx_addr;
35796 @@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
35797 struct nes_adapter *nesadapter = nesdev->nesadapter;
35798 u32 qp_id;
35799
35800 - atomic_inc(&qps_destroyed);
35801 + atomic_inc_unchecked(&qps_destroyed);
35802
35803 /* Free the control structures */
35804
35805 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
35806 index bcc6abc..9c76b2f 100644
35807 --- a/drivers/infiniband/hw/nes/nes.h
35808 +++ b/drivers/infiniband/hw/nes/nes.h
35809 @@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
35810 extern unsigned int wqm_quanta;
35811 extern struct list_head nes_adapter_list;
35812
35813 -extern atomic_t cm_connects;
35814 -extern atomic_t cm_accepts;
35815 -extern atomic_t cm_disconnects;
35816 -extern atomic_t cm_closes;
35817 -extern atomic_t cm_connecteds;
35818 -extern atomic_t cm_connect_reqs;
35819 -extern atomic_t cm_rejects;
35820 -extern atomic_t mod_qp_timouts;
35821 -extern atomic_t qps_created;
35822 -extern atomic_t qps_destroyed;
35823 -extern atomic_t sw_qps_destroyed;
35824 +extern atomic_unchecked_t cm_connects;
35825 +extern atomic_unchecked_t cm_accepts;
35826 +extern atomic_unchecked_t cm_disconnects;
35827 +extern atomic_unchecked_t cm_closes;
35828 +extern atomic_unchecked_t cm_connecteds;
35829 +extern atomic_unchecked_t cm_connect_reqs;
35830 +extern atomic_unchecked_t cm_rejects;
35831 +extern atomic_unchecked_t mod_qp_timouts;
35832 +extern atomic_unchecked_t qps_created;
35833 +extern atomic_unchecked_t qps_destroyed;
35834 +extern atomic_unchecked_t sw_qps_destroyed;
35835 extern u32 mh_detected;
35836 extern u32 mh_pauses_sent;
35837 extern u32 cm_packets_sent;
35838 @@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
35839 extern u32 cm_listens_created;
35840 extern u32 cm_listens_destroyed;
35841 extern u32 cm_backlog_drops;
35842 -extern atomic_t cm_loopbacks;
35843 -extern atomic_t cm_nodes_created;
35844 -extern atomic_t cm_nodes_destroyed;
35845 -extern atomic_t cm_accel_dropped_pkts;
35846 -extern atomic_t cm_resets_recvd;
35847 +extern atomic_unchecked_t cm_loopbacks;
35848 +extern atomic_unchecked_t cm_nodes_created;
35849 +extern atomic_unchecked_t cm_nodes_destroyed;
35850 +extern atomic_unchecked_t cm_accel_dropped_pkts;
35851 +extern atomic_unchecked_t cm_resets_recvd;
35852
35853 extern u32 int_mod_timer_init;
35854 extern u32 int_mod_cq_depth_256;
35855 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
35856 index 73473db..5ed06e8 100644
35857 --- a/drivers/infiniband/hw/nes/nes_cm.c
35858 +++ b/drivers/infiniband/hw/nes/nes_cm.c
35859 @@ -69,11 +69,11 @@ u32 cm_packets_received;
35860 u32 cm_listens_created;
35861 u32 cm_listens_destroyed;
35862 u32 cm_backlog_drops;
35863 -atomic_t cm_loopbacks;
35864 -atomic_t cm_nodes_created;
35865 -atomic_t cm_nodes_destroyed;
35866 -atomic_t cm_accel_dropped_pkts;
35867 -atomic_t cm_resets_recvd;
35868 +atomic_unchecked_t cm_loopbacks;
35869 +atomic_unchecked_t cm_nodes_created;
35870 +atomic_unchecked_t cm_nodes_destroyed;
35871 +atomic_unchecked_t cm_accel_dropped_pkts;
35872 +atomic_unchecked_t cm_resets_recvd;
35873
35874 static inline int mini_cm_accelerated(struct nes_cm_core *,
35875 struct nes_cm_node *);
35876 @@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
35877
35878 static struct nes_cm_core *g_cm_core;
35879
35880 -atomic_t cm_connects;
35881 -atomic_t cm_accepts;
35882 -atomic_t cm_disconnects;
35883 -atomic_t cm_closes;
35884 -atomic_t cm_connecteds;
35885 -atomic_t cm_connect_reqs;
35886 -atomic_t cm_rejects;
35887 +atomic_unchecked_t cm_connects;
35888 +atomic_unchecked_t cm_accepts;
35889 +atomic_unchecked_t cm_disconnects;
35890 +atomic_unchecked_t cm_closes;
35891 +atomic_unchecked_t cm_connecteds;
35892 +atomic_unchecked_t cm_connect_reqs;
35893 +atomic_unchecked_t cm_rejects;
35894
35895
35896 /**
35897 @@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
35898 cm_node->rem_mac);
35899
35900 add_hte_node(cm_core, cm_node);
35901 - atomic_inc(&cm_nodes_created);
35902 + atomic_inc_unchecked(&cm_nodes_created);
35903
35904 return cm_node;
35905 }
35906 @@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
35907 }
35908
35909 atomic_dec(&cm_core->node_cnt);
35910 - atomic_inc(&cm_nodes_destroyed);
35911 + atomic_inc_unchecked(&cm_nodes_destroyed);
35912 nesqp = cm_node->nesqp;
35913 if (nesqp) {
35914 nesqp->cm_node = NULL;
35915 @@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
35916
35917 static void drop_packet(struct sk_buff *skb)
35918 {
35919 - atomic_inc(&cm_accel_dropped_pkts);
35920 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
35921 dev_kfree_skb_any(skb);
35922 }
35923
35924 @@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
35925
35926 int reset = 0; /* whether to send reset in case of err.. */
35927 int passive_state;
35928 - atomic_inc(&cm_resets_recvd);
35929 + atomic_inc_unchecked(&cm_resets_recvd);
35930 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
35931 " refcnt=%d\n", cm_node, cm_node->state,
35932 atomic_read(&cm_node->ref_count));
35933 @@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
35934 rem_ref_cm_node(cm_node->cm_core, cm_node);
35935 return NULL;
35936 }
35937 - atomic_inc(&cm_loopbacks);
35938 + atomic_inc_unchecked(&cm_loopbacks);
35939 loopbackremotenode->loopbackpartner = cm_node;
35940 loopbackremotenode->tcp_cntxt.rcv_wscale =
35941 NES_CM_DEFAULT_RCV_WND_SCALE;
35942 @@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
35943 add_ref_cm_node(cm_node);
35944 } else if (cm_node->state == NES_CM_STATE_TSA) {
35945 rem_ref_cm_node(cm_core, cm_node);
35946 - atomic_inc(&cm_accel_dropped_pkts);
35947 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
35948 dev_kfree_skb_any(skb);
35949 break;
35950 }
35951 @@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35952
35953 if ((cm_id) && (cm_id->event_handler)) {
35954 if (issue_disconn) {
35955 - atomic_inc(&cm_disconnects);
35956 + atomic_inc_unchecked(&cm_disconnects);
35957 cm_event.event = IW_CM_EVENT_DISCONNECT;
35958 cm_event.status = disconn_status;
35959 cm_event.local_addr = cm_id->local_addr;
35960 @@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35961 }
35962
35963 if (issue_close) {
35964 - atomic_inc(&cm_closes);
35965 + atomic_inc_unchecked(&cm_closes);
35966 nes_disconnect(nesqp, 1);
35967
35968 cm_id->provider_data = nesqp;
35969 @@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35970
35971 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
35972 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
35973 - atomic_inc(&cm_accepts);
35974 + atomic_inc_unchecked(&cm_accepts);
35975
35976 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
35977 atomic_read(&nesvnic->netdev->refcnt));
35978 @@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
35979
35980 struct nes_cm_core *cm_core;
35981
35982 - atomic_inc(&cm_rejects);
35983 + atomic_inc_unchecked(&cm_rejects);
35984 cm_node = (struct nes_cm_node *) cm_id->provider_data;
35985 loopback = cm_node->loopbackpartner;
35986 cm_core = cm_node->cm_core;
35987 @@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35988 ntohl(cm_id->local_addr.sin_addr.s_addr),
35989 ntohs(cm_id->local_addr.sin_port));
35990
35991 - atomic_inc(&cm_connects);
35992 + atomic_inc_unchecked(&cm_connects);
35993 nesqp->active_conn = 1;
35994
35995 /* cache the cm_id in the qp */
35996 @@ -3195,7 +3195,7 @@ static void cm_event_connected(struct nes_cm_event *event)
35997 if (nesqp->destroyed) {
35998 return;
35999 }
36000 - atomic_inc(&cm_connecteds);
36001 + atomic_inc_unchecked(&cm_connecteds);
36002 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
36003 " local port 0x%04X. jiffies = %lu.\n",
36004 nesqp->hwqp.qp_id,
36005 @@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm_event *event)
36006
36007 ret = cm_id->event_handler(cm_id, &cm_event);
36008 cm_id->add_ref(cm_id);
36009 - atomic_inc(&cm_closes);
36010 + atomic_inc_unchecked(&cm_closes);
36011 cm_event.event = IW_CM_EVENT_CLOSE;
36012 cm_event.status = IW_CM_EVENT_STATUS_OK;
36013 cm_event.provider_data = cm_id->provider_data;
36014 @@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
36015 return;
36016 cm_id = cm_node->cm_id;
36017
36018 - atomic_inc(&cm_connect_reqs);
36019 + atomic_inc_unchecked(&cm_connect_reqs);
36020 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
36021 cm_node, cm_id, jiffies);
36022
36023 @@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
36024 return;
36025 cm_id = cm_node->cm_id;
36026
36027 - atomic_inc(&cm_connect_reqs);
36028 + atomic_inc_unchecked(&cm_connect_reqs);
36029 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
36030 cm_node, cm_id, jiffies);
36031
36032 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
36033 index e593af3..870694a 100644
36034 --- a/drivers/infiniband/hw/nes/nes_nic.c
36035 +++ b/drivers/infiniband/hw/nes/nes_nic.c
36036 @@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
36037 target_stat_values[++index] = mh_detected;
36038 target_stat_values[++index] = mh_pauses_sent;
36039 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
36040 - target_stat_values[++index] = atomic_read(&cm_connects);
36041 - target_stat_values[++index] = atomic_read(&cm_accepts);
36042 - target_stat_values[++index] = atomic_read(&cm_disconnects);
36043 - target_stat_values[++index] = atomic_read(&cm_connecteds);
36044 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
36045 - target_stat_values[++index] = atomic_read(&cm_rejects);
36046 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
36047 - target_stat_values[++index] = atomic_read(&qps_created);
36048 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
36049 - target_stat_values[++index] = atomic_read(&qps_destroyed);
36050 - target_stat_values[++index] = atomic_read(&cm_closes);
36051 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
36052 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
36053 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
36054 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
36055 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
36056 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
36057 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
36058 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
36059 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
36060 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
36061 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
36062 target_stat_values[++index] = cm_packets_sent;
36063 target_stat_values[++index] = cm_packets_bounced;
36064 target_stat_values[++index] = cm_packets_created;
36065 @@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
36066 target_stat_values[++index] = cm_listens_created;
36067 target_stat_values[++index] = cm_listens_destroyed;
36068 target_stat_values[++index] = cm_backlog_drops;
36069 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
36070 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
36071 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
36072 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
36073 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
36074 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
36075 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
36076 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
36077 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
36078 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
36079 target_stat_values[++index] = int_mod_timer_init;
36080 target_stat_values[++index] = int_mod_cq_depth_1;
36081 target_stat_values[++index] = int_mod_cq_depth_4;
36082 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
36083 index a680c42..f914deb 100644
36084 --- a/drivers/infiniband/hw/nes/nes_verbs.c
36085 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
36086 @@ -45,9 +45,9 @@
36087
36088 #include <rdma/ib_umem.h>
36089
36090 -atomic_t mod_qp_timouts;
36091 -atomic_t qps_created;
36092 -atomic_t sw_qps_destroyed;
36093 +atomic_unchecked_t mod_qp_timouts;
36094 +atomic_unchecked_t qps_created;
36095 +atomic_unchecked_t sw_qps_destroyed;
36096
36097 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
36098
36099 @@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
36100 if (init_attr->create_flags)
36101 return ERR_PTR(-EINVAL);
36102
36103 - atomic_inc(&qps_created);
36104 + atomic_inc_unchecked(&qps_created);
36105 switch (init_attr->qp_type) {
36106 case IB_QPT_RC:
36107 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
36108 @@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
36109 struct iw_cm_event cm_event;
36110 int ret;
36111
36112 - atomic_inc(&sw_qps_destroyed);
36113 + atomic_inc_unchecked(&sw_qps_destroyed);
36114 nesqp->destroyed = 1;
36115
36116 /* Blow away the connection if it exists. */
36117 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
36118 index ac11be0..3883c04 100644
36119 --- a/drivers/input/gameport/gameport.c
36120 +++ b/drivers/input/gameport/gameport.c
36121 @@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
36122 */
36123 static void gameport_init_port(struct gameport *gameport)
36124 {
36125 - static atomic_t gameport_no = ATOMIC_INIT(0);
36126 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
36127
36128 __module_get(THIS_MODULE);
36129
36130 mutex_init(&gameport->drv_mutex);
36131 device_initialize(&gameport->dev);
36132 - dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
36133 + dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
36134 gameport->dev.bus = &gameport_bus;
36135 gameport->dev.release = gameport_release_port;
36136 if (gameport->parent)
36137 diff --git a/drivers/input/input.c b/drivers/input/input.c
36138 index c82ae82..8cfb9cb 100644
36139 --- a/drivers/input/input.c
36140 +++ b/drivers/input/input.c
36141 @@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
36142 */
36143 int input_register_device(struct input_dev *dev)
36144 {
36145 - static atomic_t input_no = ATOMIC_INIT(0);
36146 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
36147 struct input_handler *handler;
36148 const char *path;
36149 int error;
36150 @@ -1585,7 +1585,7 @@ int input_register_device(struct input_dev *dev)
36151 dev->setkeycode = input_default_setkeycode;
36152
36153 dev_set_name(&dev->dev, "input%ld",
36154 - (unsigned long) atomic_inc_return(&input_no) - 1);
36155 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
36156
36157 error = device_add(&dev->dev);
36158 if (error)
36159 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
36160 index ca13a6b..b032b0c 100644
36161 --- a/drivers/input/joystick/sidewinder.c
36162 +++ b/drivers/input/joystick/sidewinder.c
36163 @@ -30,6 +30,7 @@
36164 #include <linux/kernel.h>
36165 #include <linux/module.h>
36166 #include <linux/slab.h>
36167 +#include <linux/sched.h>
36168 #include <linux/init.h>
36169 #include <linux/input.h>
36170 #include <linux/gameport.h>
36171 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
36172 unsigned char buf[SW_LENGTH];
36173 int i;
36174
36175 + pax_track_stack();
36176 +
36177 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
36178
36179 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
36180 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
36181 index 79e3edc..01412b9 100644
36182 --- a/drivers/input/joystick/xpad.c
36183 +++ b/drivers/input/joystick/xpad.c
36184 @@ -621,7 +621,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
36185
36186 static int xpad_led_probe(struct usb_xpad *xpad)
36187 {
36188 - static atomic_t led_seq = ATOMIC_INIT(0);
36189 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
36190 long led_no;
36191 struct xpad_led *led;
36192 struct led_classdev *led_cdev;
36193 @@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
36194 if (!led)
36195 return -ENOMEM;
36196
36197 - led_no = (long)atomic_inc_return(&led_seq) - 1;
36198 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
36199
36200 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
36201 led->xpad = xpad;
36202 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
36203 index 0236f0d..c7327f1 100644
36204 --- a/drivers/input/serio/serio.c
36205 +++ b/drivers/input/serio/serio.c
36206 @@ -527,7 +527,7 @@ static void serio_release_port(struct device *dev)
36207 */
36208 static void serio_init_port(struct serio *serio)
36209 {
36210 - static atomic_t serio_no = ATOMIC_INIT(0);
36211 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
36212
36213 __module_get(THIS_MODULE);
36214
36215 @@ -536,7 +536,7 @@ static void serio_init_port(struct serio *serio)
36216 mutex_init(&serio->drv_mutex);
36217 device_initialize(&serio->dev);
36218 dev_set_name(&serio->dev, "serio%ld",
36219 - (long)atomic_inc_return(&serio_no) - 1);
36220 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
36221 serio->dev.bus = &serio_bus;
36222 serio->dev.release = serio_release_port;
36223 if (serio->parent) {
36224 diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
36225 index 33dcd8d..2783d25 100644
36226 --- a/drivers/isdn/gigaset/common.c
36227 +++ b/drivers/isdn/gigaset/common.c
36228 @@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
36229 cs->commands_pending = 0;
36230 cs->cur_at_seq = 0;
36231 cs->gotfwver = -1;
36232 - cs->open_count = 0;
36233 + local_set(&cs->open_count, 0);
36234 cs->dev = NULL;
36235 cs->tty = NULL;
36236 cs->tty_dev = NULL;
36237 diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
36238 index a2f6125..6a70677 100644
36239 --- a/drivers/isdn/gigaset/gigaset.h
36240 +++ b/drivers/isdn/gigaset/gigaset.h
36241 @@ -34,6 +34,7 @@
36242 #include <linux/tty_driver.h>
36243 #include <linux/list.h>
36244 #include <asm/atomic.h>
36245 +#include <asm/local.h>
36246
36247 #define GIG_VERSION {0,5,0,0}
36248 #define GIG_COMPAT {0,4,0,0}
36249 @@ -446,7 +447,7 @@ struct cardstate {
36250 spinlock_t cmdlock;
36251 unsigned curlen, cmdbytes;
36252
36253 - unsigned open_count;
36254 + local_t open_count;
36255 struct tty_struct *tty;
36256 struct tasklet_struct if_wake_tasklet;
36257 unsigned control_state;
36258 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
36259 index b3065b8..c7e8cc9 100644
36260 --- a/drivers/isdn/gigaset/interface.c
36261 +++ b/drivers/isdn/gigaset/interface.c
36262 @@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
36263 return -ERESTARTSYS; // FIXME -EINTR?
36264 tty->driver_data = cs;
36265
36266 - ++cs->open_count;
36267 -
36268 - if (cs->open_count == 1) {
36269 + if (local_inc_return(&cs->open_count) == 1) {
36270 spin_lock_irqsave(&cs->lock, flags);
36271 cs->tty = tty;
36272 spin_unlock_irqrestore(&cs->lock, flags);
36273 @@ -195,10 +193,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
36274
36275 if (!cs->connected)
36276 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
36277 - else if (!cs->open_count)
36278 + else if (!local_read(&cs->open_count))
36279 dev_warn(cs->dev, "%s: device not opened\n", __func__);
36280 else {
36281 - if (!--cs->open_count) {
36282 + if (!local_dec_return(&cs->open_count)) {
36283 spin_lock_irqsave(&cs->lock, flags);
36284 cs->tty = NULL;
36285 spin_unlock_irqrestore(&cs->lock, flags);
36286 @@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file,
36287 if (!cs->connected) {
36288 gig_dbg(DEBUG_IF, "not connected");
36289 retval = -ENODEV;
36290 - } else if (!cs->open_count)
36291 + } else if (!local_read(&cs->open_count))
36292 dev_warn(cs->dev, "%s: device not opened\n", __func__);
36293 else {
36294 retval = 0;
36295 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
36296 if (!cs->connected) {
36297 gig_dbg(DEBUG_IF, "not connected");
36298 retval = -ENODEV;
36299 - } else if (!cs->open_count)
36300 + } else if (!local_read(&cs->open_count))
36301 dev_warn(cs->dev, "%s: device not opened\n", __func__);
36302 else if (cs->mstate != MS_LOCKED) {
36303 dev_warn(cs->dev, "can't write to unlocked device\n");
36304 @@ -395,7 +393,7 @@ static int if_write_room(struct tty_struct *tty)
36305 if (!cs->connected) {
36306 gig_dbg(DEBUG_IF, "not connected");
36307 retval = -ENODEV;
36308 - } else if (!cs->open_count)
36309 + } else if (!local_read(&cs->open_count))
36310 dev_warn(cs->dev, "%s: device not opened\n", __func__);
36311 else if (cs->mstate != MS_LOCKED) {
36312 dev_warn(cs->dev, "can't write to unlocked device\n");
36313 @@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
36314
36315 if (!cs->connected)
36316 gig_dbg(DEBUG_IF, "not connected");
36317 - else if (!cs->open_count)
36318 + else if (!local_read(&cs->open_count))
36319 dev_warn(cs->dev, "%s: device not opened\n", __func__);
36320 else if (cs->mstate != MS_LOCKED)
36321 dev_warn(cs->dev, "can't write to unlocked device\n");
36322 @@ -453,7 +451,7 @@ static void if_throttle(struct tty_struct *tty)
36323
36324 if (!cs->connected)
36325 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
36326 - else if (!cs->open_count)
36327 + else if (!local_read(&cs->open_count))
36328 dev_warn(cs->dev, "%s: device not opened\n", __func__);
36329 else {
36330 //FIXME
36331 @@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_struct *tty)
36332
36333 if (!cs->connected)
36334 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
36335 - else if (!cs->open_count)
36336 + else if (!local_read(&cs->open_count))
36337 dev_warn(cs->dev, "%s: device not opened\n", __func__);
36338 else {
36339 //FIXME
36340 @@ -510,7 +508,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
36341 goto out;
36342 }
36343
36344 - if (!cs->open_count) {
36345 + if (!local_read(&cs->open_count)) {
36346 dev_warn(cs->dev, "%s: device not opened\n", __func__);
36347 goto out;
36348 }
36349 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
36350 index a7c0083..62a7cb6 100644
36351 --- a/drivers/isdn/hardware/avm/b1.c
36352 +++ b/drivers/isdn/hardware/avm/b1.c
36353 @@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
36354 }
36355 if (left) {
36356 if (t4file->user) {
36357 - if (copy_from_user(buf, dp, left))
36358 + if (left > sizeof buf || copy_from_user(buf, dp, left))
36359 return -EFAULT;
36360 } else {
36361 memcpy(buf, dp, left);
36362 @@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
36363 }
36364 if (left) {
36365 if (config->user) {
36366 - if (copy_from_user(buf, dp, left))
36367 + if (left > sizeof buf || copy_from_user(buf, dp, left))
36368 return -EFAULT;
36369 } else {
36370 memcpy(buf, dp, left);
36371 diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c
36372 index f130724..c373c68 100644
36373 --- a/drivers/isdn/hardware/eicon/capidtmf.c
36374 +++ b/drivers/isdn/hardware/eicon/capidtmf.c
36375 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_state *p_state, byte *buffer, word leng
36376 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
36377 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
36378
36379 + pax_track_stack();
36380
36381 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
36382 {
36383 diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
36384 index 4d425c6..a9be6c4 100644
36385 --- a/drivers/isdn/hardware/eicon/capifunc.c
36386 +++ b/drivers/isdn/hardware/eicon/capifunc.c
36387 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
36388 IDI_SYNC_REQ req;
36389 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
36390
36391 + pax_track_stack();
36392 +
36393 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
36394
36395 for (x = 0; x < MAX_DESCRIPTORS; x++) {
36396 diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
36397 index 3029234..ef0d9e2 100644
36398 --- a/drivers/isdn/hardware/eicon/diddfunc.c
36399 +++ b/drivers/isdn/hardware/eicon/diddfunc.c
36400 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
36401 IDI_SYNC_REQ req;
36402 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
36403
36404 + pax_track_stack();
36405 +
36406 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
36407
36408 for (x = 0; x < MAX_DESCRIPTORS; x++) {
36409 diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
36410 index d36a4c0..11e7d1a 100644
36411 --- a/drivers/isdn/hardware/eicon/divasfunc.c
36412 +++ b/drivers/isdn/hardware/eicon/divasfunc.c
36413 @@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
36414 IDI_SYNC_REQ req;
36415 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
36416
36417 + pax_track_stack();
36418 +
36419 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
36420
36421 for (x = 0; x < MAX_DESCRIPTORS; x++) {
36422 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
36423 index 85784a7..a19ca98 100644
36424 --- a/drivers/isdn/hardware/eicon/divasync.h
36425 +++ b/drivers/isdn/hardware/eicon/divasync.h
36426 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
36427 } diva_didd_add_adapter_t;
36428 typedef struct _diva_didd_remove_adapter {
36429 IDI_CALL p_request;
36430 -} diva_didd_remove_adapter_t;
36431 +} __no_const diva_didd_remove_adapter_t;
36432 typedef struct _diva_didd_read_adapter_array {
36433 void * buffer;
36434 dword length;
36435 diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
36436 index db87d51..7d09acf 100644
36437 --- a/drivers/isdn/hardware/eicon/idifunc.c
36438 +++ b/drivers/isdn/hardware/eicon/idifunc.c
36439 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
36440 IDI_SYNC_REQ req;
36441 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
36442
36443 + pax_track_stack();
36444 +
36445 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
36446
36447 for (x = 0; x < MAX_DESCRIPTORS; x++) {
36448 diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
36449 index ae89fb8..0fab299 100644
36450 --- a/drivers/isdn/hardware/eicon/message.c
36451 +++ b/drivers/isdn/hardware/eicon/message.c
36452 @@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
36453 dword d;
36454 word w;
36455
36456 + pax_track_stack();
36457 +
36458 a = plci->adapter;
36459 Id = ((word)plci->Id<<8)|a->Id;
36460 PUT_WORD(&SS_Ind[4],0x0000);
36461 @@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
36462 word j, n, w;
36463 dword d;
36464
36465 + pax_track_stack();
36466 +
36467
36468 for(i=0;i<8;i++) bp_parms[i].length = 0;
36469 for(i=0;i<2;i++) global_config[i].length = 0;
36470 @@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARSE *bp)
36471 const byte llc3[] = {4,3,2,2,6,6,0};
36472 const byte header[] = {0,2,3,3,0,0,0};
36473
36474 + pax_track_stack();
36475 +
36476 for(i=0;i<8;i++) bp_parms[i].length = 0;
36477 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
36478 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
36479 @@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci)
36480 word appl_number_group_type[MAX_APPL];
36481 PLCI *auxplci;
36482
36483 + pax_track_stack();
36484 +
36485 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
36486
36487 if(!a->group_optimization_enabled)
36488 diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
36489 index a564b75..f3cf8b5 100644
36490 --- a/drivers/isdn/hardware/eicon/mntfunc.c
36491 +++ b/drivers/isdn/hardware/eicon/mntfunc.c
36492 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
36493 IDI_SYNC_REQ req;
36494 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
36495
36496 + pax_track_stack();
36497 +
36498 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
36499
36500 for (x = 0; x < MAX_DESCRIPTORS; x++) {
36501 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
36502 index a3bd163..8956575 100644
36503 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
36504 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
36505 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
36506 typedef struct _diva_os_idi_adapter_interface {
36507 diva_init_card_proc_t cleanup_adapter_proc;
36508 diva_cmd_card_proc_t cmd_proc;
36509 -} diva_os_idi_adapter_interface_t;
36510 +} __no_const diva_os_idi_adapter_interface_t;
36511
36512 typedef struct _diva_os_xdi_adapter {
36513 struct list_head link;
36514 diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
36515 index adb1e8c..21b590b 100644
36516 --- a/drivers/isdn/i4l/isdn_common.c
36517 +++ b/drivers/isdn/i4l/isdn_common.c
36518 @@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
36519 } iocpar;
36520 void __user *argp = (void __user *)arg;
36521
36522 + pax_track_stack();
36523 +
36524 #define name iocpar.name
36525 #define bname iocpar.bname
36526 #define iocts iocpar.iocts
36527 diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
36528 index 90b56ed..5ed3305 100644
36529 --- a/drivers/isdn/i4l/isdn_net.c
36530 +++ b/drivers/isdn/i4l/isdn_net.c
36531 @@ -1902,7 +1902,7 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
36532 {
36533 isdn_net_local *lp = netdev_priv(dev);
36534 unsigned char *p;
36535 - ushort len = 0;
36536 + int len = 0;
36537
36538 switch (lp->p_encap) {
36539 case ISDN_NET_ENCAP_ETHER:
36540 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
36541 index bf7997a..cf091db 100644
36542 --- a/drivers/isdn/icn/icn.c
36543 +++ b/drivers/isdn/icn/icn.c
36544 @@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
36545 if (count > len)
36546 count = len;
36547 if (user) {
36548 - if (copy_from_user(msg, buf, count))
36549 + if (count > sizeof msg || copy_from_user(msg, buf, count))
36550 return -EFAULT;
36551 } else
36552 memcpy(msg, buf, count);
36553 diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
36554 index feb0fa4..f76f830 100644
36555 --- a/drivers/isdn/mISDN/socket.c
36556 +++ b/drivers/isdn/mISDN/socket.c
36557 @@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
36558 if (dev) {
36559 struct mISDN_devinfo di;
36560
36561 + memset(&di, 0, sizeof(di));
36562 di.id = dev->id;
36563 di.Dprotocols = dev->Dprotocols;
36564 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
36565 @@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
36566 if (dev) {
36567 struct mISDN_devinfo di;
36568
36569 + memset(&di, 0, sizeof(di));
36570 di.id = dev->id;
36571 di.Dprotocols = dev->Dprotocols;
36572 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
36573 diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c
36574 index 485be8b..f0225bc 100644
36575 --- a/drivers/isdn/sc/interrupt.c
36576 +++ b/drivers/isdn/sc/interrupt.c
36577 @@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
36578 }
36579 else if(callid>=0x0000 && callid<=0x7FFF)
36580 {
36581 + int len;
36582 +
36583 pr_debug("%s: Got Incoming Call\n",
36584 sc_adapter[card]->devicename);
36585 - strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
36586 - strcpy(setup.eazmsn,
36587 - sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
36588 + len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
36589 + sizeof(setup.phone));
36590 + if (len >= sizeof(setup.phone))
36591 + continue;
36592 + len = strlcpy(setup.eazmsn,
36593 + sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
36594 + sizeof(setup.eazmsn));
36595 + if (len >= sizeof(setup.eazmsn))
36596 + continue;
36597 setup.si1 = 7;
36598 setup.si2 = 0;
36599 setup.plan = 0;
36600 @@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
36601 * Handle a GetMyNumber Rsp
36602 */
36603 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
36604 - strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
36605 + strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
36606 + rcvmsg.msg_data.byte_array,
36607 + sizeof(rcvmsg.msg_data.byte_array));
36608 continue;
36609 }
36610
36611 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
36612 index 8744d24..d1f9a9a 100644
36613 --- a/drivers/lguest/core.c
36614 +++ b/drivers/lguest/core.c
36615 @@ -91,9 +91,17 @@ static __init int map_switcher(void)
36616 * it's worked so far. The end address needs +1 because __get_vm_area
36617 * allocates an extra guard page, so we need space for that.
36618 */
36619 +
36620 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
36621 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
36622 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
36623 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
36624 +#else
36625 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
36626 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
36627 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
36628 +#endif
36629 +
36630 if (!switcher_vma) {
36631 err = -ENOMEM;
36632 printk("lguest: could not map switcher pages high\n");
36633 @@ -118,7 +126,7 @@ static __init int map_switcher(void)
36634 * Now the Switcher is mapped at the right address, we can't fail!
36635 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
36636 */
36637 - memcpy(switcher_vma->addr, start_switcher_text,
36638 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
36639 end_switcher_text - start_switcher_text);
36640
36641 printk(KERN_INFO "lguest: mapped switcher at %p\n",
36642 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
36643 index 6ae3888..8b38145 100644
36644 --- a/drivers/lguest/x86/core.c
36645 +++ b/drivers/lguest/x86/core.c
36646 @@ -59,7 +59,7 @@ static struct {
36647 /* Offset from where switcher.S was compiled to where we've copied it */
36648 static unsigned long switcher_offset(void)
36649 {
36650 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
36651 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
36652 }
36653
36654 /* This cpu's struct lguest_pages. */
36655 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
36656 * These copies are pretty cheap, so we do them unconditionally: */
36657 /* Save the current Host top-level page directory.
36658 */
36659 +
36660 +#ifdef CONFIG_PAX_PER_CPU_PGD
36661 + pages->state.host_cr3 = read_cr3();
36662 +#else
36663 pages->state.host_cr3 = __pa(current->mm->pgd);
36664 +#endif
36665 +
36666 /*
36667 * Set up the Guest's page tables to see this CPU's pages (and no
36668 * other CPU's pages).
36669 @@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
36670 * compiled-in switcher code and the high-mapped copy we just made.
36671 */
36672 for (i = 0; i < IDT_ENTRIES; i++)
36673 - default_idt_entries[i] += switcher_offset();
36674 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
36675
36676 /*
36677 * Set up the Switcher's per-cpu areas.
36678 @@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
36679 * it will be undisturbed when we switch. To change %cs and jump we
36680 * need this structure to feed to Intel's "lcall" instruction.
36681 */
36682 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
36683 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
36684 lguest_entry.segment = LGUEST_CS;
36685
36686 /*
36687 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
36688 index 40634b0..4f5855e 100644
36689 --- a/drivers/lguest/x86/switcher_32.S
36690 +++ b/drivers/lguest/x86/switcher_32.S
36691 @@ -87,6 +87,7 @@
36692 #include <asm/page.h>
36693 #include <asm/segment.h>
36694 #include <asm/lguest.h>
36695 +#include <asm/processor-flags.h>
36696
36697 // We mark the start of the code to copy
36698 // It's placed in .text tho it's never run here
36699 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
36700 // Changes type when we load it: damn Intel!
36701 // For after we switch over our page tables
36702 // That entry will be read-only: we'd crash.
36703 +
36704 +#ifdef CONFIG_PAX_KERNEXEC
36705 + mov %cr0, %edx
36706 + xor $X86_CR0_WP, %edx
36707 + mov %edx, %cr0
36708 +#endif
36709 +
36710 movl $(GDT_ENTRY_TSS*8), %edx
36711 ltr %dx
36712
36713 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
36714 // Let's clear it again for our return.
36715 // The GDT descriptor of the Host
36716 // Points to the table after two "size" bytes
36717 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
36718 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
36719 // Clear "used" from type field (byte 5, bit 2)
36720 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
36721 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
36722 +
36723 +#ifdef CONFIG_PAX_KERNEXEC
36724 + mov %cr0, %eax
36725 + xor $X86_CR0_WP, %eax
36726 + mov %eax, %cr0
36727 +#endif
36728
36729 // Once our page table's switched, the Guest is live!
36730 // The Host fades as we run this final step.
36731 @@ -295,13 +309,12 @@ deliver_to_host:
36732 // I consulted gcc, and it gave
36733 // These instructions, which I gladly credit:
36734 leal (%edx,%ebx,8), %eax
36735 - movzwl (%eax),%edx
36736 - movl 4(%eax), %eax
36737 - xorw %ax, %ax
36738 - orl %eax, %edx
36739 + movl 4(%eax), %edx
36740 + movw (%eax), %dx
36741 // Now the address of the handler's in %edx
36742 // We call it now: its "iret" drops us home.
36743 - jmp *%edx
36744 + ljmp $__KERNEL_CS, $1f
36745 +1: jmp *%edx
36746
36747 // Every interrupt can come to us here
36748 // But we must truly tell each apart.
36749 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
36750 index 588a5b0..b71db89 100644
36751 --- a/drivers/macintosh/macio_asic.c
36752 +++ b/drivers/macintosh/macio_asic.c
36753 @@ -701,7 +701,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
36754 * MacIO is matched against any Apple ID, it's probe() function
36755 * will then decide wether it applies or not
36756 */
36757 -static const struct pci_device_id __devinitdata pci_ids [] = { {
36758 +static const struct pci_device_id __devinitconst pci_ids [] = { {
36759 .vendor = PCI_VENDOR_ID_APPLE,
36760 .device = PCI_ANY_ID,
36761 .subvendor = PCI_ANY_ID,
36762 diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
36763 index a348bb0..ecd9b3f 100644
36764 --- a/drivers/macintosh/via-pmu-backlight.c
36765 +++ b/drivers/macintosh/via-pmu-backlight.c
36766 @@ -15,7 +15,7 @@
36767
36768 #define MAX_PMU_LEVEL 0xFF
36769
36770 -static struct backlight_ops pmu_backlight_data;
36771 +static const struct backlight_ops pmu_backlight_data;
36772 static DEFINE_SPINLOCK(pmu_backlight_lock);
36773 static int sleeping, uses_pmu_bl;
36774 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
36775 @@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(struct backlight_device *bd)
36776 return bd->props.brightness;
36777 }
36778
36779 -static struct backlight_ops pmu_backlight_data = {
36780 +static const struct backlight_ops pmu_backlight_data = {
36781 .get_brightness = pmu_backlight_get_brightness,
36782 .update_status = pmu_backlight_update_status,
36783
36784 diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
36785 index 6f308a4..b5f7ff7 100644
36786 --- a/drivers/macintosh/via-pmu.c
36787 +++ b/drivers/macintosh/via-pmu.c
36788 @@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state_t state)
36789 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
36790 }
36791
36792 -static struct platform_suspend_ops pmu_pm_ops = {
36793 +static const struct platform_suspend_ops pmu_pm_ops = {
36794 .enter = powerbook_sleep,
36795 .valid = pmu_sleep_valid,
36796 };
36797 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
36798 index 818b617..4656e38 100644
36799 --- a/drivers/md/dm-ioctl.c
36800 +++ b/drivers/md/dm-ioctl.c
36801 @@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
36802 cmd == DM_LIST_VERSIONS_CMD)
36803 return 0;
36804
36805 - if ((cmd == DM_DEV_CREATE_CMD)) {
36806 + if (cmd == DM_DEV_CREATE_CMD) {
36807 if (!*param->name) {
36808 DMWARN("name not supplied when creating device");
36809 return -EINVAL;
36810 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
36811 index 6021d0a..a878643 100644
36812 --- a/drivers/md/dm-raid1.c
36813 +++ b/drivers/md/dm-raid1.c
36814 @@ -41,7 +41,7 @@ enum dm_raid1_error {
36815
36816 struct mirror {
36817 struct mirror_set *ms;
36818 - atomic_t error_count;
36819 + atomic_unchecked_t error_count;
36820 unsigned long error_type;
36821 struct dm_dev *dev;
36822 sector_t offset;
36823 @@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
36824 * simple way to tell if a device has encountered
36825 * errors.
36826 */
36827 - atomic_inc(&m->error_count);
36828 + atomic_inc_unchecked(&m->error_count);
36829
36830 if (test_and_set_bit(error_type, &m->error_type))
36831 return;
36832 @@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
36833 }
36834
36835 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
36836 - if (!atomic_read(&new->error_count)) {
36837 + if (!atomic_read_unchecked(&new->error_count)) {
36838 set_default_mirror(new);
36839 break;
36840 }
36841 @@ -363,7 +363,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
36842 struct mirror *m = get_default_mirror(ms);
36843
36844 do {
36845 - if (likely(!atomic_read(&m->error_count)))
36846 + if (likely(!atomic_read_unchecked(&m->error_count)))
36847 return m;
36848
36849 if (m-- == ms->mirror)
36850 @@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
36851 {
36852 struct mirror *default_mirror = get_default_mirror(m->ms);
36853
36854 - return !atomic_read(&default_mirror->error_count);
36855 + return !atomic_read_unchecked(&default_mirror->error_count);
36856 }
36857
36858 static int mirror_available(struct mirror_set *ms, struct bio *bio)
36859 @@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
36860 */
36861 if (likely(region_in_sync(ms, region, 1)))
36862 m = choose_mirror(ms, bio->bi_sector);
36863 - else if (m && atomic_read(&m->error_count))
36864 + else if (m && atomic_read_unchecked(&m->error_count))
36865 m = NULL;
36866
36867 if (likely(m))
36868 @@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
36869 }
36870
36871 ms->mirror[mirror].ms = ms;
36872 - atomic_set(&(ms->mirror[mirror].error_count), 0);
36873 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
36874 ms->mirror[mirror].error_type = 0;
36875 ms->mirror[mirror].offset = offset;
36876
36877 @@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_target *ti)
36878 */
36879 static char device_status_char(struct mirror *m)
36880 {
36881 - if (!atomic_read(&(m->error_count)))
36882 + if (!atomic_read_unchecked(&(m->error_count)))
36883 return 'A';
36884
36885 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
36886 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
36887 index bd58703..9f26571 100644
36888 --- a/drivers/md/dm-stripe.c
36889 +++ b/drivers/md/dm-stripe.c
36890 @@ -20,7 +20,7 @@ struct stripe {
36891 struct dm_dev *dev;
36892 sector_t physical_start;
36893
36894 - atomic_t error_count;
36895 + atomic_unchecked_t error_count;
36896 };
36897
36898 struct stripe_c {
36899 @@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
36900 kfree(sc);
36901 return r;
36902 }
36903 - atomic_set(&(sc->stripe[i].error_count), 0);
36904 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
36905 }
36906
36907 ti->private = sc;
36908 @@ -257,7 +257,7 @@ static int stripe_status(struct dm_target *ti,
36909 DMEMIT("%d ", sc->stripes);
36910 for (i = 0; i < sc->stripes; i++) {
36911 DMEMIT("%s ", sc->stripe[i].dev->name);
36912 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
36913 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
36914 'D' : 'A';
36915 }
36916 buffer[i] = '\0';
36917 @@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
36918 */
36919 for (i = 0; i < sc->stripes; i++)
36920 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
36921 - atomic_inc(&(sc->stripe[i].error_count));
36922 - if (atomic_read(&(sc->stripe[i].error_count)) <
36923 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
36924 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
36925 DM_IO_ERROR_THRESHOLD)
36926 queue_work(kstriped, &sc->kstriped_ws);
36927 }
36928 diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
36929 index 4b04590..13a77b2 100644
36930 --- a/drivers/md/dm-sysfs.c
36931 +++ b/drivers/md/dm-sysfs.c
36932 @@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
36933 NULL,
36934 };
36935
36936 -static struct sysfs_ops dm_sysfs_ops = {
36937 +static const struct sysfs_ops dm_sysfs_ops = {
36938 .show = dm_attr_show,
36939 };
36940
36941 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
36942 index 03345bb..332250d 100644
36943 --- a/drivers/md/dm-table.c
36944 +++ b/drivers/md/dm-table.c
36945 @@ -376,7 +376,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
36946 if (!dev_size)
36947 return 0;
36948
36949 - if ((start >= dev_size) || (start + len > dev_size)) {
36950 + if ((start >= dev_size) || (len > dev_size - start)) {
36951 DMWARN("%s: %s too small for target: "
36952 "start=%llu, len=%llu, dev_size=%llu",
36953 dm_device_name(ti->table->md), bdevname(bdev, b),
36954 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
36955 index c988ac2..c418141 100644
36956 --- a/drivers/md/dm.c
36957 +++ b/drivers/md/dm.c
36958 @@ -165,9 +165,9 @@ struct mapped_device {
36959 /*
36960 * Event handling.
36961 */
36962 - atomic_t event_nr;
36963 + atomic_unchecked_t event_nr;
36964 wait_queue_head_t eventq;
36965 - atomic_t uevent_seq;
36966 + atomic_unchecked_t uevent_seq;
36967 struct list_head uevent_list;
36968 spinlock_t uevent_lock; /* Protect access to uevent_list */
36969
36970 @@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(int minor)
36971 rwlock_init(&md->map_lock);
36972 atomic_set(&md->holders, 1);
36973 atomic_set(&md->open_count, 0);
36974 - atomic_set(&md->event_nr, 0);
36975 - atomic_set(&md->uevent_seq, 0);
36976 + atomic_set_unchecked(&md->event_nr, 0);
36977 + atomic_set_unchecked(&md->uevent_seq, 0);
36978 INIT_LIST_HEAD(&md->uevent_list);
36979 spin_lock_init(&md->uevent_lock);
36980
36981 @@ -1927,7 +1927,7 @@ static void event_callback(void *context)
36982
36983 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
36984
36985 - atomic_inc(&md->event_nr);
36986 + atomic_inc_unchecked(&md->event_nr);
36987 wake_up(&md->eventq);
36988 }
36989
36990 @@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
36991
36992 uint32_t dm_next_uevent_seq(struct mapped_device *md)
36993 {
36994 - return atomic_add_return(1, &md->uevent_seq);
36995 + return atomic_add_return_unchecked(1, &md->uevent_seq);
36996 }
36997
36998 uint32_t dm_get_event_nr(struct mapped_device *md)
36999 {
37000 - return atomic_read(&md->event_nr);
37001 + return atomic_read_unchecked(&md->event_nr);
37002 }
37003
37004 int dm_wait_event(struct mapped_device *md, int event_nr)
37005 {
37006 return wait_event_interruptible(md->eventq,
37007 - (event_nr != atomic_read(&md->event_nr)));
37008 + (event_nr != atomic_read_unchecked(&md->event_nr)));
37009 }
37010
37011 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
37012 diff --git a/drivers/md/md.c b/drivers/md/md.c
37013 index 4ce6e2f..7a9530a 100644
37014 --- a/drivers/md/md.c
37015 +++ b/drivers/md/md.c
37016 @@ -153,10 +153,10 @@ static int start_readonly;
37017 * start build, activate spare
37018 */
37019 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
37020 -static atomic_t md_event_count;
37021 +static atomic_unchecked_t md_event_count;
37022 void md_new_event(mddev_t *mddev)
37023 {
37024 - atomic_inc(&md_event_count);
37025 + atomic_inc_unchecked(&md_event_count);
37026 wake_up(&md_event_waiters);
37027 }
37028 EXPORT_SYMBOL_GPL(md_new_event);
37029 @@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
37030 */
37031 static void md_new_event_inintr(mddev_t *mddev)
37032 {
37033 - atomic_inc(&md_event_count);
37034 + atomic_inc_unchecked(&md_event_count);
37035 wake_up(&md_event_waiters);
37036 }
37037
37038 @@ -1226,7 +1226,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
37039
37040 rdev->preferred_minor = 0xffff;
37041 rdev->data_offset = le64_to_cpu(sb->data_offset);
37042 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
37043 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
37044
37045 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
37046 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
37047 @@ -1400,7 +1400,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
37048 else
37049 sb->resync_offset = cpu_to_le64(0);
37050
37051 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
37052 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
37053
37054 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
37055 sb->size = cpu_to_le64(mddev->dev_sectors);
37056 @@ -2222,7 +2222,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
37057 static ssize_t
37058 errors_show(mdk_rdev_t *rdev, char *page)
37059 {
37060 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
37061 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
37062 }
37063
37064 static ssize_t
37065 @@ -2231,7 +2231,7 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
37066 char *e;
37067 unsigned long n = simple_strtoul(buf, &e, 10);
37068 if (*buf && (*e == 0 || *e == '\n')) {
37069 - atomic_set(&rdev->corrected_errors, n);
37070 + atomic_set_unchecked(&rdev->corrected_errors, n);
37071 return len;
37072 }
37073 return -EINVAL;
37074 @@ -2525,7 +2525,7 @@ static void rdev_free(struct kobject *ko)
37075 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
37076 kfree(rdev);
37077 }
37078 -static struct sysfs_ops rdev_sysfs_ops = {
37079 +static const struct sysfs_ops rdev_sysfs_ops = {
37080 .show = rdev_attr_show,
37081 .store = rdev_attr_store,
37082 };
37083 @@ -2574,8 +2574,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
37084 rdev->data_offset = 0;
37085 rdev->sb_events = 0;
37086 atomic_set(&rdev->nr_pending, 0);
37087 - atomic_set(&rdev->read_errors, 0);
37088 - atomic_set(&rdev->corrected_errors, 0);
37089 + atomic_set_unchecked(&rdev->read_errors, 0);
37090 + atomic_set_unchecked(&rdev->corrected_errors, 0);
37091
37092 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
37093 if (!size) {
37094 @@ -3895,7 +3895,7 @@ static void md_free(struct kobject *ko)
37095 kfree(mddev);
37096 }
37097
37098 -static struct sysfs_ops md_sysfs_ops = {
37099 +static const struct sysfs_ops md_sysfs_ops = {
37100 .show = md_attr_show,
37101 .store = md_attr_store,
37102 };
37103 @@ -4482,7 +4482,8 @@ out:
37104 err = 0;
37105 blk_integrity_unregister(disk);
37106 md_new_event(mddev);
37107 - sysfs_notify_dirent(mddev->sysfs_state);
37108 + if (mddev->sysfs_state)
37109 + sysfs_notify_dirent(mddev->sysfs_state);
37110 return err;
37111 }
37112
37113 @@ -5962,7 +5963,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
37114
37115 spin_unlock(&pers_lock);
37116 seq_printf(seq, "\n");
37117 - mi->event = atomic_read(&md_event_count);
37118 + mi->event = atomic_read_unchecked(&md_event_count);
37119 return 0;
37120 }
37121 if (v == (void*)2) {
37122 @@ -6051,7 +6052,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
37123 chunk_kb ? "KB" : "B");
37124 if (bitmap->file) {
37125 seq_printf(seq, ", file: ");
37126 - seq_path(seq, &bitmap->file->f_path, " \t\n");
37127 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
37128 }
37129
37130 seq_printf(seq, "\n");
37131 @@ -6085,7 +6086,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
37132 else {
37133 struct seq_file *p = file->private_data;
37134 p->private = mi;
37135 - mi->event = atomic_read(&md_event_count);
37136 + mi->event = atomic_read_unchecked(&md_event_count);
37137 }
37138 return error;
37139 }
37140 @@ -6101,7 +6102,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
37141 /* always allow read */
37142 mask = POLLIN | POLLRDNORM;
37143
37144 - if (mi->event != atomic_read(&md_event_count))
37145 + if (mi->event != atomic_read_unchecked(&md_event_count))
37146 mask |= POLLERR | POLLPRI;
37147 return mask;
37148 }
37149 @@ -6145,7 +6146,7 @@ static int is_mddev_idle(mddev_t *mddev, int init)
37150 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
37151 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
37152 (int)part_stat_read(&disk->part0, sectors[1]) -
37153 - atomic_read(&disk->sync_io);
37154 + atomic_read_unchecked(&disk->sync_io);
37155 /* sync IO will cause sync_io to increase before the disk_stats
37156 * as sync_io is counted when a request starts, and
37157 * disk_stats is counted when it completes.
37158 diff --git a/drivers/md/md.h b/drivers/md/md.h
37159 index 87430fe..0024a4c 100644
37160 --- a/drivers/md/md.h
37161 +++ b/drivers/md/md.h
37162 @@ -94,10 +94,10 @@ struct mdk_rdev_s
37163 * only maintained for arrays that
37164 * support hot removal
37165 */
37166 - atomic_t read_errors; /* number of consecutive read errors that
37167 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
37168 * we have tried to ignore.
37169 */
37170 - atomic_t corrected_errors; /* number of corrected read errors,
37171 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
37172 * for reporting to userspace and storing
37173 * in superblock.
37174 */
37175 @@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
37176
37177 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
37178 {
37179 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
37180 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
37181 }
37182
37183 struct mdk_personality
37184 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
37185 index 968cb14..f0ad2e4 100644
37186 --- a/drivers/md/raid1.c
37187 +++ b/drivers/md/raid1.c
37188 @@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
37189 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
37190 continue;
37191 rdev = conf->mirrors[d].rdev;
37192 - atomic_add(s, &rdev->corrected_errors);
37193 + atomic_add_unchecked(s, &rdev->corrected_errors);
37194 if (sync_page_io(rdev->bdev,
37195 sect + rdev->data_offset,
37196 s<<9,
37197 @@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
37198 /* Well, this device is dead */
37199 md_error(mddev, rdev);
37200 else {
37201 - atomic_add(s, &rdev->corrected_errors);
37202 + atomic_add_unchecked(s, &rdev->corrected_errors);
37203 printk(KERN_INFO
37204 "raid1:%s: read error corrected "
37205 "(%d sectors at %llu on %s)\n",
37206 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
37207 index 1b4e232..cf0f534 100644
37208 --- a/drivers/md/raid10.c
37209 +++ b/drivers/md/raid10.c
37210 @@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bio, int error)
37211 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
37212 set_bit(R10BIO_Uptodate, &r10_bio->state);
37213 else {
37214 - atomic_add(r10_bio->sectors,
37215 + atomic_add_unchecked(r10_bio->sectors,
37216 &conf->mirrors[d].rdev->corrected_errors);
37217 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
37218 md_error(r10_bio->mddev,
37219 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
37220 test_bit(In_sync, &rdev->flags)) {
37221 atomic_inc(&rdev->nr_pending);
37222 rcu_read_unlock();
37223 - atomic_add(s, &rdev->corrected_errors);
37224 + atomic_add_unchecked(s, &rdev->corrected_errors);
37225 if (sync_page_io(rdev->bdev,
37226 r10_bio->devs[sl].addr +
37227 sect + rdev->data_offset,
37228 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
37229 index 883215d..675bf47 100644
37230 --- a/drivers/md/raid5.c
37231 +++ b/drivers/md/raid5.c
37232 @@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
37233 bi->bi_next = NULL;
37234 if ((rw & WRITE) &&
37235 test_bit(R5_ReWrite, &sh->dev[i].flags))
37236 - atomic_add(STRIPE_SECTORS,
37237 + atomic_add_unchecked(STRIPE_SECTORS,
37238 &rdev->corrected_errors);
37239 generic_make_request(bi);
37240 } else {
37241 @@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struct bio * bi, int error)
37242 clear_bit(R5_ReadError, &sh->dev[i].flags);
37243 clear_bit(R5_ReWrite, &sh->dev[i].flags);
37244 }
37245 - if (atomic_read(&conf->disks[i].rdev->read_errors))
37246 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
37247 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
37248 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
37249 } else {
37250 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
37251 int retry = 0;
37252 rdev = conf->disks[i].rdev;
37253
37254 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
37255 - atomic_inc(&rdev->read_errors);
37256 + atomic_inc_unchecked(&rdev->read_errors);
37257 if (conf->mddev->degraded >= conf->max_degraded)
37258 printk_rl(KERN_WARNING
37259 "raid5:%s: read error not correctable "
37260 @@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
37261 (unsigned long long)(sh->sector
37262 + rdev->data_offset),
37263 bdn);
37264 - else if (atomic_read(&rdev->read_errors)
37265 + else if (atomic_read_unchecked(&rdev->read_errors)
37266 > conf->max_nr_stripes)
37267 printk(KERN_WARNING
37268 "raid5:%s: Too many read errors, failing device %s.\n",
37269 @@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
37270 sector_t r_sector;
37271 struct stripe_head sh2;
37272
37273 + pax_track_stack();
37274
37275 chunk_offset = sector_div(new_sector, sectors_per_chunk);
37276 stripe = new_sector;
37277 diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
37278 index 05bde9c..2f31d40 100644
37279 --- a/drivers/media/common/saa7146_hlp.c
37280 +++ b/drivers/media/common/saa7146_hlp.c
37281 @@ -353,6 +353,8 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
37282
37283 int x[32], y[32], w[32], h[32];
37284
37285 + pax_track_stack();
37286 +
37287 /* clear out memory */
37288 memset(&line_list[0], 0x00, sizeof(u32)*32);
37289 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
37290 diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
37291 index cb22da5..82b686e 100644
37292 --- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
37293 +++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
37294 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
37295 u8 buf[HOST_LINK_BUF_SIZE];
37296 int i;
37297
37298 + pax_track_stack();
37299 +
37300 dprintk("%s\n", __func__);
37301
37302 /* check if we have space for a link buf in the rx_buffer */
37303 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
37304 unsigned long timeout;
37305 int written;
37306
37307 + pax_track_stack();
37308 +
37309 dprintk("%s\n", __func__);
37310
37311 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
37312 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
37313 index 2fe05d0..a3289c4 100644
37314 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
37315 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
37316 @@ -71,7 +71,7 @@ struct dvb_demux_feed {
37317 union {
37318 dmx_ts_cb ts;
37319 dmx_section_cb sec;
37320 - } cb;
37321 + } __no_const cb;
37322
37323 struct dvb_demux *demux;
37324 void *priv;
37325 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
37326 index 94159b9..376bd8e 100644
37327 --- a/drivers/media/dvb/dvb-core/dvbdev.c
37328 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
37329 @@ -191,7 +191,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
37330 const struct dvb_device *template, void *priv, int type)
37331 {
37332 struct dvb_device *dvbdev;
37333 - struct file_operations *dvbdevfops;
37334 + file_operations_no_const *dvbdevfops;
37335 struct device *clsdev;
37336 int minor;
37337 int id;
37338 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
37339 index 2a53dd0..db8c07a 100644
37340 --- a/drivers/media/dvb/dvb-usb/cxusb.c
37341 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
37342 @@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
37343 struct dib0700_adapter_state {
37344 int (*set_param_save) (struct dvb_frontend *,
37345 struct dvb_frontend_parameters *);
37346 -};
37347 +} __no_const;
37348
37349 static int dib7070_set_param_override(struct dvb_frontend *fe,
37350 struct dvb_frontend_parameters *fep)
37351 diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
37352 index db7f7f7..f55e96f 100644
37353 --- a/drivers/media/dvb/dvb-usb/dib0700_core.c
37354 +++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
37355 @@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
37356
37357 u8 buf[260];
37358
37359 + pax_track_stack();
37360 +
37361 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
37362 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
37363
37364 diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
37365 index 524acf5..5ffc403 100644
37366 --- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
37367 +++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
37368 @@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplif
37369
37370 struct dib0700_adapter_state {
37371 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
37372 -};
37373 +} __no_const;
37374
37375 /* Hauppauge Nova-T 500 (aka Bristol)
37376 * has a LNA on GPIO0 which is enabled by setting 1 */
37377 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
37378 index ba91735..4261d84 100644
37379 --- a/drivers/media/dvb/frontends/dib3000.h
37380 +++ b/drivers/media/dvb/frontends/dib3000.h
37381 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
37382 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
37383 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
37384 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
37385 -};
37386 +} __no_const;
37387
37388 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
37389 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
37390 diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
37391 index c709ce6..b3fe620 100644
37392 --- a/drivers/media/dvb/frontends/or51211.c
37393 +++ b/drivers/media/dvb/frontends/or51211.c
37394 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
37395 u8 tudata[585];
37396 int i;
37397
37398 + pax_track_stack();
37399 +
37400 dprintk("Firmware is %zd bytes\n",fw->size);
37401
37402 /* Get eprom data */
37403 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
37404 index 482d0f3..ee1e202 100644
37405 --- a/drivers/media/radio/radio-cadet.c
37406 +++ b/drivers/media/radio/radio-cadet.c
37407 @@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
37408 while (i < count && dev->rdsin != dev->rdsout)
37409 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
37410
37411 - if (copy_to_user(data, readbuf, i))
37412 + if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
37413 return -EFAULT;
37414 return i;
37415 }
37416 diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
37417 index 6dd51e2..0359b92 100644
37418 --- a/drivers/media/video/cx18/cx18-driver.c
37419 +++ b/drivers/media/video/cx18/cx18-driver.c
37420 @@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl[] __devinitdata = {
37421
37422 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
37423
37424 -static atomic_t cx18_instance = ATOMIC_INIT(0);
37425 +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
37426
37427 /* Parameter declarations */
37428 static int cardtype[CX18_MAX_CARDS];
37429 @@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
37430 struct i2c_client c;
37431 u8 eedata[256];
37432
37433 + pax_track_stack();
37434 +
37435 memset(&c, 0, sizeof(c));
37436 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
37437 c.adapter = &cx->i2c_adap[0];
37438 @@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
37439 struct cx18 *cx;
37440
37441 /* FIXME - module parameter arrays constrain max instances */
37442 - i = atomic_inc_return(&cx18_instance) - 1;
37443 + i = atomic_inc_return_unchecked(&cx18_instance) - 1;
37444 if (i >= CX18_MAX_CARDS) {
37445 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
37446 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
37447 diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
37448 index 463ec34..2f4625a 100644
37449 --- a/drivers/media/video/ivtv/ivtv-driver.c
37450 +++ b/drivers/media/video/ivtv/ivtv-driver.c
37451 @@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl[] __devinitdata = {
37452 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
37453
37454 /* ivtv instance counter */
37455 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
37456 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
37457
37458 /* Parameter declarations */
37459 static int cardtype[IVTV_MAX_CARDS];
37460 diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
37461 index 5fc4ac0..652a54a 100644
37462 --- a/drivers/media/video/omap24xxcam.c
37463 +++ b/drivers/media/video/omap24xxcam.c
37464 @@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
37465 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
37466
37467 do_gettimeofday(&vb->ts);
37468 - vb->field_count = atomic_add_return(2, &fh->field_count);
37469 + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
37470 if (csr & csr_error) {
37471 vb->state = VIDEOBUF_ERROR;
37472 if (!atomic_read(&fh->cam->in_reset)) {
37473 diff --git a/drivers/media/video/omap24xxcam.h b/drivers/media/video/omap24xxcam.h
37474 index 2ce67f5..cf26a5b 100644
37475 --- a/drivers/media/video/omap24xxcam.h
37476 +++ b/drivers/media/video/omap24xxcam.h
37477 @@ -533,7 +533,7 @@ struct omap24xxcam_fh {
37478 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
37479 struct videobuf_queue vbq;
37480 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
37481 - atomic_t field_count; /* field counter for videobuf_buffer */
37482 + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
37483 /* accessing cam here doesn't need serialisation: it's constant */
37484 struct omap24xxcam_device *cam;
37485 };
37486 diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
37487 index 299afa4..eb47459 100644
37488 --- a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
37489 +++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
37490 @@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
37491 u8 *eeprom;
37492 struct tveeprom tvdata;
37493
37494 + pax_track_stack();
37495 +
37496 memset(&tvdata,0,sizeof(tvdata));
37497
37498 eeprom = pvr2_eeprom_fetch(hdw);
37499 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
37500 index 5b152ff..3320638 100644
37501 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
37502 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
37503 @@ -195,7 +195,7 @@ struct pvr2_hdw {
37504
37505 /* I2C stuff */
37506 struct i2c_adapter i2c_adap;
37507 - struct i2c_algorithm i2c_algo;
37508 + i2c_algorithm_no_const i2c_algo;
37509 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
37510 int i2c_cx25840_hack_state;
37511 int i2c_linked;
37512 diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
37513 index 1eabff6..8e2313a 100644
37514 --- a/drivers/media/video/saa7134/saa6752hs.c
37515 +++ b/drivers/media/video/saa7134/saa6752hs.c
37516 @@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes)
37517 unsigned char localPAT[256];
37518 unsigned char localPMT[256];
37519
37520 + pax_track_stack();
37521 +
37522 /* Set video format - must be done first as it resets other settings */
37523 set_reg8(client, 0x41, h->video_format);
37524
37525 diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
37526 index 9c1d3ac..b1b49e9 100644
37527 --- a/drivers/media/video/saa7164/saa7164-cmd.c
37528 +++ b/drivers/media/video/saa7164/saa7164-cmd.c
37529 @@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
37530 wait_queue_head_t *q = 0;
37531 dprintk(DBGLVL_CMD, "%s()\n", __func__);
37532
37533 + pax_track_stack();
37534 +
37535 /* While any outstand message on the bus exists... */
37536 do {
37537
37538 @@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
37539 u8 tmp[512];
37540 dprintk(DBGLVL_CMD, "%s()\n", __func__);
37541
37542 + pax_track_stack();
37543 +
37544 while (loop) {
37545
37546 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
37547 diff --git a/drivers/media/video/usbvideo/ibmcam.c b/drivers/media/video/usbvideo/ibmcam.c
37548 index b085496..cde0270 100644
37549 --- a/drivers/media/video/usbvideo/ibmcam.c
37550 +++ b/drivers/media/video/usbvideo/ibmcam.c
37551 @@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] = {
37552 static int __init ibmcam_init(void)
37553 {
37554 struct usbvideo_cb cbTbl;
37555 - memset(&cbTbl, 0, sizeof(cbTbl));
37556 - cbTbl.probe = ibmcam_probe;
37557 - cbTbl.setupOnOpen = ibmcam_setup_on_open;
37558 - cbTbl.videoStart = ibmcam_video_start;
37559 - cbTbl.videoStop = ibmcam_video_stop;
37560 - cbTbl.processData = ibmcam_ProcessIsocData;
37561 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37562 - cbTbl.adjustPicture = ibmcam_adjust_picture;
37563 - cbTbl.getFPS = ibmcam_calculate_fps;
37564 + memset((void *)&cbTbl, 0, sizeof(cbTbl));
37565 + *(void **)&cbTbl.probe = ibmcam_probe;
37566 + *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
37567 + *(void **)&cbTbl.videoStart = ibmcam_video_start;
37568 + *(void **)&cbTbl.videoStop = ibmcam_video_stop;
37569 + *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
37570 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37571 + *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
37572 + *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
37573 return usbvideo_register(
37574 &cams,
37575 MAX_IBMCAM,
37576 diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
37577 index 31d57f2..600b735 100644
37578 --- a/drivers/media/video/usbvideo/konicawc.c
37579 +++ b/drivers/media/video/usbvideo/konicawc.c
37580 @@ -225,7 +225,7 @@ static void konicawc_register_input(struct konicawc *cam, struct usb_device *dev
37581 int error;
37582
37583 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
37584 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
37585 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
37586
37587 cam->input = input_dev = input_allocate_device();
37588 if (!input_dev) {
37589 @@ -935,16 +935,16 @@ static int __init konicawc_init(void)
37590 struct usbvideo_cb cbTbl;
37591 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
37592 DRIVER_DESC "\n");
37593 - memset(&cbTbl, 0, sizeof(cbTbl));
37594 - cbTbl.probe = konicawc_probe;
37595 - cbTbl.setupOnOpen = konicawc_setup_on_open;
37596 - cbTbl.processData = konicawc_process_isoc;
37597 - cbTbl.getFPS = konicawc_calculate_fps;
37598 - cbTbl.setVideoMode = konicawc_set_video_mode;
37599 - cbTbl.startDataPump = konicawc_start_data;
37600 - cbTbl.stopDataPump = konicawc_stop_data;
37601 - cbTbl.adjustPicture = konicawc_adjust_picture;
37602 - cbTbl.userFree = konicawc_free_uvd;
37603 + memset((void * )&cbTbl, 0, sizeof(cbTbl));
37604 + *(void **)&cbTbl.probe = konicawc_probe;
37605 + *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
37606 + *(void **)&cbTbl.processData = konicawc_process_isoc;
37607 + *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
37608 + *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
37609 + *(void **)&cbTbl.startDataPump = konicawc_start_data;
37610 + *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
37611 + *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
37612 + *(void **)&cbTbl.userFree = konicawc_free_uvd;
37613 return usbvideo_register(
37614 &cams,
37615 MAX_CAMERAS,
37616 diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
37617 index 803d3e4..c4d1b96 100644
37618 --- a/drivers/media/video/usbvideo/quickcam_messenger.c
37619 +++ b/drivers/media/video/usbvideo/quickcam_messenger.c
37620 @@ -89,7 +89,7 @@ static void qcm_register_input(struct qcm *cam, struct usb_device *dev)
37621 int error;
37622
37623 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
37624 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
37625 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
37626
37627 cam->input = input_dev = input_allocate_device();
37628 if (!input_dev) {
37629 diff --git a/drivers/media/video/usbvideo/ultracam.c b/drivers/media/video/usbvideo/ultracam.c
37630 index fbd1b63..292f9f0 100644
37631 --- a/drivers/media/video/usbvideo/ultracam.c
37632 +++ b/drivers/media/video/usbvideo/ultracam.c
37633 @@ -655,14 +655,14 @@ static int __init ultracam_init(void)
37634 {
37635 struct usbvideo_cb cbTbl;
37636 memset(&cbTbl, 0, sizeof(cbTbl));
37637 - cbTbl.probe = ultracam_probe;
37638 - cbTbl.setupOnOpen = ultracam_setup_on_open;
37639 - cbTbl.videoStart = ultracam_video_start;
37640 - cbTbl.videoStop = ultracam_video_stop;
37641 - cbTbl.processData = ultracam_ProcessIsocData;
37642 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37643 - cbTbl.adjustPicture = ultracam_adjust_picture;
37644 - cbTbl.getFPS = ultracam_calculate_fps;
37645 + *(void **)&cbTbl.probe = ultracam_probe;
37646 + *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
37647 + *(void **)&cbTbl.videoStart = ultracam_video_start;
37648 + *(void **)&cbTbl.videoStop = ultracam_video_stop;
37649 + *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
37650 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37651 + *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
37652 + *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
37653 return usbvideo_register(
37654 &cams,
37655 MAX_CAMERAS,
37656 diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
37657 index dea8b32..34f6878 100644
37658 --- a/drivers/media/video/usbvideo/usbvideo.c
37659 +++ b/drivers/media/video/usbvideo/usbvideo.c
37660 @@ -697,15 +697,15 @@ int usbvideo_register(
37661 __func__, cams, base_size, num_cams);
37662
37663 /* Copy callbacks, apply defaults for those that are not set */
37664 - memmove(&cams->cb, cbTbl, sizeof(cams->cb));
37665 + memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
37666 if (cams->cb.getFrame == NULL)
37667 - cams->cb.getFrame = usbvideo_GetFrame;
37668 + *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
37669 if (cams->cb.disconnect == NULL)
37670 - cams->cb.disconnect = usbvideo_Disconnect;
37671 + *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
37672 if (cams->cb.startDataPump == NULL)
37673 - cams->cb.startDataPump = usbvideo_StartDataPump;
37674 + *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
37675 if (cams->cb.stopDataPump == NULL)
37676 - cams->cb.stopDataPump = usbvideo_StopDataPump;
37677 + *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
37678
37679 cams->num_cameras = num_cams;
37680 cams->cam = (struct uvd *) &cams[1];
37681 diff --git a/drivers/media/video/usbvideo/usbvideo.h b/drivers/media/video/usbvideo/usbvideo.h
37682 index c66985b..7fa143a 100644
37683 --- a/drivers/media/video/usbvideo/usbvideo.h
37684 +++ b/drivers/media/video/usbvideo/usbvideo.h
37685 @@ -268,7 +268,7 @@ struct usbvideo_cb {
37686 int (*startDataPump)(struct uvd *uvd);
37687 void (*stopDataPump)(struct uvd *uvd);
37688 int (*setVideoMode)(struct uvd *uvd, struct video_window *vw);
37689 -};
37690 +} __no_const;
37691
37692 struct usbvideo {
37693 int num_cameras; /* As allocated */
37694 diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
37695 index e0f91e4..37554ea 100644
37696 --- a/drivers/media/video/usbvision/usbvision-core.c
37697 +++ b/drivers/media/video/usbvision/usbvision-core.c
37698 @@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_compress(struct usb_usbvision *usbvision,
37699 unsigned char rv, gv, bv;
37700 static unsigned char *Y, *U, *V;
37701
37702 + pax_track_stack();
37703 +
37704 frame = usbvision->curFrame;
37705 imageSize = frame->frmwidth * frame->frmheight;
37706 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
37707 diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
37708 index 0d06e7c..3d17d24 100644
37709 --- a/drivers/media/video/v4l2-device.c
37710 +++ b/drivers/media/video/v4l2-device.c
37711 @@ -50,9 +50,9 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
37712 EXPORT_SYMBOL_GPL(v4l2_device_register);
37713
37714 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
37715 - atomic_t *instance)
37716 + atomic_unchecked_t *instance)
37717 {
37718 - int num = atomic_inc_return(instance) - 1;
37719 + int num = atomic_inc_return_unchecked(instance) - 1;
37720 int len = strlen(basename);
37721
37722 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
37723 diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
37724 index 032ebae..6a3532c 100644
37725 --- a/drivers/media/video/videobuf-dma-sg.c
37726 +++ b/drivers/media/video/videobuf-dma-sg.c
37727 @@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
37728 {
37729 struct videobuf_queue q;
37730
37731 + pax_track_stack();
37732 +
37733 /* Required to make generic handler to call __videobuf_alloc */
37734 q.int_ops = &sg_ops;
37735
37736 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
37737 index b6992b7..9fa7547 100644
37738 --- a/drivers/message/fusion/mptbase.c
37739 +++ b/drivers/message/fusion/mptbase.c
37740 @@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eo
37741 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
37742 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
37743
37744 +#ifdef CONFIG_GRKERNSEC_HIDESYM
37745 + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
37746 + NULL, NULL);
37747 +#else
37748 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
37749 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
37750 +#endif
37751 +
37752 /*
37753 * Rounding UP to nearest 4-kB boundary here...
37754 */
37755 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
37756 index 83873e3..e360e9a 100644
37757 --- a/drivers/message/fusion/mptsas.c
37758 +++ b/drivers/message/fusion/mptsas.c
37759 @@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
37760 return 0;
37761 }
37762
37763 +static inline void
37764 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
37765 +{
37766 + if (phy_info->port_details) {
37767 + phy_info->port_details->rphy = rphy;
37768 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
37769 + ioc->name, rphy));
37770 + }
37771 +
37772 + if (rphy) {
37773 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
37774 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
37775 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
37776 + ioc->name, rphy, rphy->dev.release));
37777 + }
37778 +}
37779 +
37780 /* no mutex */
37781 static void
37782 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
37783 @@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
37784 return NULL;
37785 }
37786
37787 -static inline void
37788 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
37789 -{
37790 - if (phy_info->port_details) {
37791 - phy_info->port_details->rphy = rphy;
37792 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
37793 - ioc->name, rphy));
37794 - }
37795 -
37796 - if (rphy) {
37797 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
37798 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
37799 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
37800 - ioc->name, rphy, rphy->dev.release));
37801 - }
37802 -}
37803 -
37804 static inline struct sas_port *
37805 mptsas_get_port(struct mptsas_phyinfo *phy_info)
37806 {
37807 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
37808 index bd096ca..332cf76 100644
37809 --- a/drivers/message/fusion/mptscsih.c
37810 +++ b/drivers/message/fusion/mptscsih.c
37811 @@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
37812
37813 h = shost_priv(SChost);
37814
37815 - if (h) {
37816 - if (h->info_kbuf == NULL)
37817 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
37818 - return h->info_kbuf;
37819 - h->info_kbuf[0] = '\0';
37820 + if (!h)
37821 + return NULL;
37822
37823 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
37824 - h->info_kbuf[size-1] = '\0';
37825 - }
37826 + if (h->info_kbuf == NULL)
37827 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
37828 + return h->info_kbuf;
37829 + h->info_kbuf[0] = '\0';
37830 +
37831 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
37832 + h->info_kbuf[size-1] = '\0';
37833
37834 return h->info_kbuf;
37835 }
37836 diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
37837 index efba702..59b2c0f 100644
37838 --- a/drivers/message/i2o/i2o_config.c
37839 +++ b/drivers/message/i2o/i2o_config.c
37840 @@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned long arg)
37841 struct i2o_message *msg;
37842 unsigned int iop;
37843
37844 + pax_track_stack();
37845 +
37846 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
37847 return -EFAULT;
37848
37849 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
37850 index 7045c45..c07b170 100644
37851 --- a/drivers/message/i2o/i2o_proc.c
37852 +++ b/drivers/message/i2o/i2o_proc.c
37853 @@ -259,13 +259,6 @@ static char *scsi_devices[] = {
37854 "Array Controller Device"
37855 };
37856
37857 -static char *chtostr(u8 * chars, int n)
37858 -{
37859 - char tmp[256];
37860 - tmp[0] = 0;
37861 - return strncat(tmp, (char *)chars, n);
37862 -}
37863 -
37864 static int i2o_report_query_status(struct seq_file *seq, int block_status,
37865 char *group)
37866 {
37867 @@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
37868
37869 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
37870 seq_printf(seq, "%-#8x", ddm_table.module_id);
37871 - seq_printf(seq, "%-29s",
37872 - chtostr(ddm_table.module_name_version, 28));
37873 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
37874 seq_printf(seq, "%9d ", ddm_table.data_size);
37875 seq_printf(seq, "%8d", ddm_table.code_size);
37876
37877 @@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
37878
37879 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
37880 seq_printf(seq, "%-#8x", dst->module_id);
37881 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
37882 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
37883 + seq_printf(seq, "%-.28s", dst->module_name_version);
37884 + seq_printf(seq, "%-.8s", dst->date);
37885 seq_printf(seq, "%8d ", dst->module_size);
37886 seq_printf(seq, "%8d ", dst->mpb_size);
37887 seq_printf(seq, "0x%04x", dst->module_flags);
37888 @@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
37889 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
37890 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
37891 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
37892 - seq_printf(seq, "Vendor info : %s\n",
37893 - chtostr((u8 *) (work32 + 2), 16));
37894 - seq_printf(seq, "Product info : %s\n",
37895 - chtostr((u8 *) (work32 + 6), 16));
37896 - seq_printf(seq, "Description : %s\n",
37897 - chtostr((u8 *) (work32 + 10), 16));
37898 - seq_printf(seq, "Product rev. : %s\n",
37899 - chtostr((u8 *) (work32 + 14), 8));
37900 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
37901 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
37902 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
37903 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
37904
37905 seq_printf(seq, "Serial number : ");
37906 print_serial_number(seq, (u8 *) (work32 + 16),
37907 @@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
37908 }
37909
37910 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
37911 - seq_printf(seq, "Module name : %s\n",
37912 - chtostr(result.module_name, 24));
37913 - seq_printf(seq, "Module revision : %s\n",
37914 - chtostr(result.module_rev, 8));
37915 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
37916 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
37917
37918 seq_printf(seq, "Serial number : ");
37919 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
37920 @@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
37921 return 0;
37922 }
37923
37924 - seq_printf(seq, "Device name : %s\n",
37925 - chtostr(result.device_name, 64));
37926 - seq_printf(seq, "Service name : %s\n",
37927 - chtostr(result.service_name, 64));
37928 - seq_printf(seq, "Physical name : %s\n",
37929 - chtostr(result.physical_location, 64));
37930 - seq_printf(seq, "Instance number : %s\n",
37931 - chtostr(result.instance_number, 4));
37932 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
37933 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
37934 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
37935 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
37936
37937 return 0;
37938 }
37939 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
37940 index 27cf4af..b1205b8 100644
37941 --- a/drivers/message/i2o/iop.c
37942 +++ b/drivers/message/i2o/iop.c
37943 @@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
37944
37945 spin_lock_irqsave(&c->context_list_lock, flags);
37946
37947 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
37948 - atomic_inc(&c->context_list_counter);
37949 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
37950 + atomic_inc_unchecked(&c->context_list_counter);
37951
37952 - entry->context = atomic_read(&c->context_list_counter);
37953 + entry->context = atomic_read_unchecked(&c->context_list_counter);
37954
37955 list_add(&entry->list, &c->context_list);
37956
37957 @@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
37958
37959 #if BITS_PER_LONG == 64
37960 spin_lock_init(&c->context_list_lock);
37961 - atomic_set(&c->context_list_counter, 0);
37962 + atomic_set_unchecked(&c->context_list_counter, 0);
37963 INIT_LIST_HEAD(&c->context_list);
37964 #endif
37965
37966 diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
37967 index 78e3e85..66c9a0d 100644
37968 --- a/drivers/mfd/ab3100-core.c
37969 +++ b/drivers/mfd/ab3100-core.c
37970 @@ -777,7 +777,7 @@ struct ab_family_id {
37971 char *name;
37972 };
37973
37974 -static const struct ab_family_id ids[] __initdata = {
37975 +static const struct ab_family_id ids[] __initconst = {
37976 /* AB3100 */
37977 {
37978 .id = 0xc0,
37979 diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
37980 index 8d8c932..8104515 100644
37981 --- a/drivers/mfd/wm8350-i2c.c
37982 +++ b/drivers/mfd/wm8350-i2c.c
37983 @@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
37984 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
37985 int ret;
37986
37987 + pax_track_stack();
37988 +
37989 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
37990 return -EINVAL;
37991
37992 diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
37993 index e4ff50b..4cc3f04 100644
37994 --- a/drivers/misc/kgdbts.c
37995 +++ b/drivers/misc/kgdbts.c
37996 @@ -118,7 +118,7 @@
37997 } while (0)
37998 #define MAX_CONFIG_LEN 40
37999
38000 -static struct kgdb_io kgdbts_io_ops;
38001 +static const struct kgdb_io kgdbts_io_ops;
38002 static char get_buf[BUFMAX];
38003 static int get_buf_cnt;
38004 static char put_buf[BUFMAX];
38005 @@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void)
38006 module_put(THIS_MODULE);
38007 }
38008
38009 -static struct kgdb_io kgdbts_io_ops = {
38010 +static const struct kgdb_io kgdbts_io_ops = {
38011 .name = "kgdbts",
38012 .read_char = kgdbts_get_char,
38013 .write_char = kgdbts_put_char,
38014 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
38015 index 37e7cfc..67cfb76 100644
38016 --- a/drivers/misc/sgi-gru/gruhandles.c
38017 +++ b/drivers/misc/sgi-gru/gruhandles.c
38018 @@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistics[mcsop_last];
38019
38020 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
38021 {
38022 - atomic_long_inc(&mcs_op_statistics[op].count);
38023 - atomic_long_add(clks, &mcs_op_statistics[op].total);
38024 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
38025 + atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
38026 if (mcs_op_statistics[op].max < clks)
38027 mcs_op_statistics[op].max = clks;
38028 }
38029 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
38030 index 3f2375c..467c6e6 100644
38031 --- a/drivers/misc/sgi-gru/gruprocfs.c
38032 +++ b/drivers/misc/sgi-gru/gruprocfs.c
38033 @@ -32,9 +32,9 @@
38034
38035 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
38036
38037 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
38038 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
38039 {
38040 - unsigned long val = atomic_long_read(v);
38041 + unsigned long val = atomic_long_read_unchecked(v);
38042
38043 if (val)
38044 seq_printf(s, "%16lu %s\n", val, id);
38045 @@ -136,8 +136,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
38046 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
38047
38048 for (op = 0; op < mcsop_last; op++) {
38049 - count = atomic_long_read(&mcs_op_statistics[op].count);
38050 - total = atomic_long_read(&mcs_op_statistics[op].total);
38051 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
38052 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
38053 max = mcs_op_statistics[op].max;
38054 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
38055 count ? total / count : 0, max);
38056 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
38057 index 46990bc..4a251b5 100644
38058 --- a/drivers/misc/sgi-gru/grutables.h
38059 +++ b/drivers/misc/sgi-gru/grutables.h
38060 @@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
38061 * GRU statistics.
38062 */
38063 struct gru_stats_s {
38064 - atomic_long_t vdata_alloc;
38065 - atomic_long_t vdata_free;
38066 - atomic_long_t gts_alloc;
38067 - atomic_long_t gts_free;
38068 - atomic_long_t vdata_double_alloc;
38069 - atomic_long_t gts_double_allocate;
38070 - atomic_long_t assign_context;
38071 - atomic_long_t assign_context_failed;
38072 - atomic_long_t free_context;
38073 - atomic_long_t load_user_context;
38074 - atomic_long_t load_kernel_context;
38075 - atomic_long_t lock_kernel_context;
38076 - atomic_long_t unlock_kernel_context;
38077 - atomic_long_t steal_user_context;
38078 - atomic_long_t steal_kernel_context;
38079 - atomic_long_t steal_context_failed;
38080 - atomic_long_t nopfn;
38081 - atomic_long_t break_cow;
38082 - atomic_long_t asid_new;
38083 - atomic_long_t asid_next;
38084 - atomic_long_t asid_wrap;
38085 - atomic_long_t asid_reuse;
38086 - atomic_long_t intr;
38087 - atomic_long_t intr_mm_lock_failed;
38088 - atomic_long_t call_os;
38089 - atomic_long_t call_os_offnode_reference;
38090 - atomic_long_t call_os_check_for_bug;
38091 - atomic_long_t call_os_wait_queue;
38092 - atomic_long_t user_flush_tlb;
38093 - atomic_long_t user_unload_context;
38094 - atomic_long_t user_exception;
38095 - atomic_long_t set_context_option;
38096 - atomic_long_t migrate_check;
38097 - atomic_long_t migrated_retarget;
38098 - atomic_long_t migrated_unload;
38099 - atomic_long_t migrated_unload_delay;
38100 - atomic_long_t migrated_nopfn_retarget;
38101 - atomic_long_t migrated_nopfn_unload;
38102 - atomic_long_t tlb_dropin;
38103 - atomic_long_t tlb_dropin_fail_no_asid;
38104 - atomic_long_t tlb_dropin_fail_upm;
38105 - atomic_long_t tlb_dropin_fail_invalid;
38106 - atomic_long_t tlb_dropin_fail_range_active;
38107 - atomic_long_t tlb_dropin_fail_idle;
38108 - atomic_long_t tlb_dropin_fail_fmm;
38109 - atomic_long_t tlb_dropin_fail_no_exception;
38110 - atomic_long_t tlb_dropin_fail_no_exception_war;
38111 - atomic_long_t tfh_stale_on_fault;
38112 - atomic_long_t mmu_invalidate_range;
38113 - atomic_long_t mmu_invalidate_page;
38114 - atomic_long_t mmu_clear_flush_young;
38115 - atomic_long_t flush_tlb;
38116 - atomic_long_t flush_tlb_gru;
38117 - atomic_long_t flush_tlb_gru_tgh;
38118 - atomic_long_t flush_tlb_gru_zero_asid;
38119 + atomic_long_unchecked_t vdata_alloc;
38120 + atomic_long_unchecked_t vdata_free;
38121 + atomic_long_unchecked_t gts_alloc;
38122 + atomic_long_unchecked_t gts_free;
38123 + atomic_long_unchecked_t vdata_double_alloc;
38124 + atomic_long_unchecked_t gts_double_allocate;
38125 + atomic_long_unchecked_t assign_context;
38126 + atomic_long_unchecked_t assign_context_failed;
38127 + atomic_long_unchecked_t free_context;
38128 + atomic_long_unchecked_t load_user_context;
38129 + atomic_long_unchecked_t load_kernel_context;
38130 + atomic_long_unchecked_t lock_kernel_context;
38131 + atomic_long_unchecked_t unlock_kernel_context;
38132 + atomic_long_unchecked_t steal_user_context;
38133 + atomic_long_unchecked_t steal_kernel_context;
38134 + atomic_long_unchecked_t steal_context_failed;
38135 + atomic_long_unchecked_t nopfn;
38136 + atomic_long_unchecked_t break_cow;
38137 + atomic_long_unchecked_t asid_new;
38138 + atomic_long_unchecked_t asid_next;
38139 + atomic_long_unchecked_t asid_wrap;
38140 + atomic_long_unchecked_t asid_reuse;
38141 + atomic_long_unchecked_t intr;
38142 + atomic_long_unchecked_t intr_mm_lock_failed;
38143 + atomic_long_unchecked_t call_os;
38144 + atomic_long_unchecked_t call_os_offnode_reference;
38145 + atomic_long_unchecked_t call_os_check_for_bug;
38146 + atomic_long_unchecked_t call_os_wait_queue;
38147 + atomic_long_unchecked_t user_flush_tlb;
38148 + atomic_long_unchecked_t user_unload_context;
38149 + atomic_long_unchecked_t user_exception;
38150 + atomic_long_unchecked_t set_context_option;
38151 + atomic_long_unchecked_t migrate_check;
38152 + atomic_long_unchecked_t migrated_retarget;
38153 + atomic_long_unchecked_t migrated_unload;
38154 + atomic_long_unchecked_t migrated_unload_delay;
38155 + atomic_long_unchecked_t migrated_nopfn_retarget;
38156 + atomic_long_unchecked_t migrated_nopfn_unload;
38157 + atomic_long_unchecked_t tlb_dropin;
38158 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
38159 + atomic_long_unchecked_t tlb_dropin_fail_upm;
38160 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
38161 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
38162 + atomic_long_unchecked_t tlb_dropin_fail_idle;
38163 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
38164 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
38165 + atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
38166 + atomic_long_unchecked_t tfh_stale_on_fault;
38167 + atomic_long_unchecked_t mmu_invalidate_range;
38168 + atomic_long_unchecked_t mmu_invalidate_page;
38169 + atomic_long_unchecked_t mmu_clear_flush_young;
38170 + atomic_long_unchecked_t flush_tlb;
38171 + atomic_long_unchecked_t flush_tlb_gru;
38172 + atomic_long_unchecked_t flush_tlb_gru_tgh;
38173 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
38174
38175 - atomic_long_t copy_gpa;
38176 + atomic_long_unchecked_t copy_gpa;
38177
38178 - atomic_long_t mesq_receive;
38179 - atomic_long_t mesq_receive_none;
38180 - atomic_long_t mesq_send;
38181 - atomic_long_t mesq_send_failed;
38182 - atomic_long_t mesq_noop;
38183 - atomic_long_t mesq_send_unexpected_error;
38184 - atomic_long_t mesq_send_lb_overflow;
38185 - atomic_long_t mesq_send_qlimit_reached;
38186 - atomic_long_t mesq_send_amo_nacked;
38187 - atomic_long_t mesq_send_put_nacked;
38188 - atomic_long_t mesq_qf_not_full;
38189 - atomic_long_t mesq_qf_locked;
38190 - atomic_long_t mesq_qf_noop_not_full;
38191 - atomic_long_t mesq_qf_switch_head_failed;
38192 - atomic_long_t mesq_qf_unexpected_error;
38193 - atomic_long_t mesq_noop_unexpected_error;
38194 - atomic_long_t mesq_noop_lb_overflow;
38195 - atomic_long_t mesq_noop_qlimit_reached;
38196 - atomic_long_t mesq_noop_amo_nacked;
38197 - atomic_long_t mesq_noop_put_nacked;
38198 + atomic_long_unchecked_t mesq_receive;
38199 + atomic_long_unchecked_t mesq_receive_none;
38200 + atomic_long_unchecked_t mesq_send;
38201 + atomic_long_unchecked_t mesq_send_failed;
38202 + atomic_long_unchecked_t mesq_noop;
38203 + atomic_long_unchecked_t mesq_send_unexpected_error;
38204 + atomic_long_unchecked_t mesq_send_lb_overflow;
38205 + atomic_long_unchecked_t mesq_send_qlimit_reached;
38206 + atomic_long_unchecked_t mesq_send_amo_nacked;
38207 + atomic_long_unchecked_t mesq_send_put_nacked;
38208 + atomic_long_unchecked_t mesq_qf_not_full;
38209 + atomic_long_unchecked_t mesq_qf_locked;
38210 + atomic_long_unchecked_t mesq_qf_noop_not_full;
38211 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
38212 + atomic_long_unchecked_t mesq_qf_unexpected_error;
38213 + atomic_long_unchecked_t mesq_noop_unexpected_error;
38214 + atomic_long_unchecked_t mesq_noop_lb_overflow;
38215 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
38216 + atomic_long_unchecked_t mesq_noop_amo_nacked;
38217 + atomic_long_unchecked_t mesq_noop_put_nacked;
38218
38219 };
38220
38221 @@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
38222 cchop_deallocate, tghop_invalidate, mcsop_last};
38223
38224 struct mcs_op_statistic {
38225 - atomic_long_t count;
38226 - atomic_long_t total;
38227 + atomic_long_unchecked_t count;
38228 + atomic_long_unchecked_t total;
38229 unsigned long max;
38230 };
38231
38232 @@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
38233
38234 #define STAT(id) do { \
38235 if (gru_options & OPT_STATS) \
38236 - atomic_long_inc(&gru_stats.id); \
38237 + atomic_long_inc_unchecked(&gru_stats.id); \
38238 } while (0)
38239
38240 #ifdef CONFIG_SGI_GRU_DEBUG
38241 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
38242 index 2275126..12a9dbfb 100644
38243 --- a/drivers/misc/sgi-xp/xp.h
38244 +++ b/drivers/misc/sgi-xp/xp.h
38245 @@ -289,7 +289,7 @@ struct xpc_interface {
38246 xpc_notify_func, void *);
38247 void (*received) (short, int, void *);
38248 enum xp_retval (*partid_to_nasids) (short, void *);
38249 -};
38250 +} __no_const;
38251
38252 extern struct xpc_interface xpc_interface;
38253
38254 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
38255 index b94d5f7..7f494c5 100644
38256 --- a/drivers/misc/sgi-xp/xpc.h
38257 +++ b/drivers/misc/sgi-xp/xpc.h
38258 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
38259 void (*received_payload) (struct xpc_channel *, void *);
38260 void (*notify_senders_of_disconnect) (struct xpc_channel *);
38261 };
38262 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
38263
38264 /* struct xpc_partition act_state values (for XPC HB) */
38265
38266 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
38267 /* found in xpc_main.c */
38268 extern struct device *xpc_part;
38269 extern struct device *xpc_chan;
38270 -extern struct xpc_arch_operations xpc_arch_ops;
38271 +extern xpc_arch_operations_no_const xpc_arch_ops;
38272 extern int xpc_disengage_timelimit;
38273 extern int xpc_disengage_timedout;
38274 extern int xpc_activate_IRQ_rcvd;
38275 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
38276 index fd3688a..7e211a4 100644
38277 --- a/drivers/misc/sgi-xp/xpc_main.c
38278 +++ b/drivers/misc/sgi-xp/xpc_main.c
38279 @@ -169,7 +169,7 @@ static struct notifier_block xpc_die_notifier = {
38280 .notifier_call = xpc_system_die,
38281 };
38282
38283 -struct xpc_arch_operations xpc_arch_ops;
38284 +xpc_arch_operations_no_const xpc_arch_ops;
38285
38286 /*
38287 * Timer function to enforce the timelimit on the partition disengage.
38288 diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
38289 index 8b70e03..700bda6 100644
38290 --- a/drivers/misc/sgi-xp/xpc_sn2.c
38291 +++ b/drivers/misc/sgi-xp/xpc_sn2.c
38292 @@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
38293 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
38294 }
38295
38296 -static struct xpc_arch_operations xpc_arch_ops_sn2 = {
38297 +static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
38298 .setup_partitions = xpc_setup_partitions_sn2,
38299 .teardown_partitions = xpc_teardown_partitions_sn2,
38300 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
38301 @@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
38302 int ret;
38303 size_t buf_size;
38304
38305 - xpc_arch_ops = xpc_arch_ops_sn2;
38306 + pax_open_kernel();
38307 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
38308 + pax_close_kernel();
38309
38310 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
38311 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
38312 diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
38313 index 8e08d71..7cb8c9b 100644
38314 --- a/drivers/misc/sgi-xp/xpc_uv.c
38315 +++ b/drivers/misc/sgi-xp/xpc_uv.c
38316 @@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
38317 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
38318 }
38319
38320 -static struct xpc_arch_operations xpc_arch_ops_uv = {
38321 +static const struct xpc_arch_operations xpc_arch_ops_uv = {
38322 .setup_partitions = xpc_setup_partitions_uv,
38323 .teardown_partitions = xpc_teardown_partitions_uv,
38324 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
38325 @@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
38326 int
38327 xpc_init_uv(void)
38328 {
38329 - xpc_arch_ops = xpc_arch_ops_uv;
38330 + pax_open_kernel();
38331 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
38332 + pax_close_kernel();
38333
38334 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
38335 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
38336 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
38337 index 6fd20b42..650efe3 100644
38338 --- a/drivers/mmc/host/sdhci-pci.c
38339 +++ b/drivers/mmc/host/sdhci-pci.c
38340 @@ -297,7 +297,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
38341 .probe = via_probe,
38342 };
38343
38344 -static const struct pci_device_id pci_ids[] __devinitdata = {
38345 +static const struct pci_device_id pci_ids[] __devinitconst = {
38346 {
38347 .vendor = PCI_VENDOR_ID_RICOH,
38348 .device = PCI_DEVICE_ID_RICOH_R5C822,
38349 diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
38350 index e7563a9..5f90ce5 100644
38351 --- a/drivers/mtd/chips/cfi_cmdset_0001.c
38352 +++ b/drivers/mtd/chips/cfi_cmdset_0001.c
38353 @@ -743,6 +743,8 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
38354 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
38355 unsigned long timeo = jiffies + HZ;
38356
38357 + pax_track_stack();
38358 +
38359 /* Prevent setting state FL_SYNCING for chip in suspended state. */
38360 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
38361 goto sleep;
38362 @@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
38363 unsigned long initial_adr;
38364 int initial_len = len;
38365
38366 + pax_track_stack();
38367 +
38368 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
38369 adr += chip->start;
38370 initial_adr = adr;
38371 @@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
38372 int retries = 3;
38373 int ret;
38374
38375 + pax_track_stack();
38376 +
38377 adr += chip->start;
38378
38379 retry:
38380 diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
38381 index 0667a67..3ab97ed 100644
38382 --- a/drivers/mtd/chips/cfi_cmdset_0020.c
38383 +++ b/drivers/mtd/chips/cfi_cmdset_0020.c
38384 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
38385 unsigned long cmd_addr;
38386 struct cfi_private *cfi = map->fldrv_priv;
38387
38388 + pax_track_stack();
38389 +
38390 adr += chip->start;
38391
38392 /* Ensure cmd read/writes are aligned. */
38393 @@ -428,6 +430,8 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
38394 DECLARE_WAITQUEUE(wait, current);
38395 int wbufsize, z;
38396
38397 + pax_track_stack();
38398 +
38399 /* M58LW064A requires bus alignment for buffer wriets -- saw */
38400 if (adr & (map_bankwidth(map)-1))
38401 return -EINVAL;
38402 @@ -742,6 +746,8 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
38403 DECLARE_WAITQUEUE(wait, current);
38404 int ret = 0;
38405
38406 + pax_track_stack();
38407 +
38408 adr += chip->start;
38409
38410 /* Let's determine this according to the interleave only once */
38411 @@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
38412 unsigned long timeo = jiffies + HZ;
38413 DECLARE_WAITQUEUE(wait, current);
38414
38415 + pax_track_stack();
38416 +
38417 adr += chip->start;
38418
38419 /* Let's determine this according to the interleave only once */
38420 @@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
38421 unsigned long timeo = jiffies + HZ;
38422 DECLARE_WAITQUEUE(wait, current);
38423
38424 + pax_track_stack();
38425 +
38426 adr += chip->start;
38427
38428 /* Let's determine this according to the interleave only once */
38429 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
38430 index 5bf5f46..c5de373 100644
38431 --- a/drivers/mtd/devices/doc2000.c
38432 +++ b/drivers/mtd/devices/doc2000.c
38433 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
38434
38435 /* The ECC will not be calculated correctly if less than 512 is written */
38436 /* DBB-
38437 - if (len != 0x200 && eccbuf)
38438 + if (len != 0x200)
38439 printk(KERN_WARNING
38440 "ECC needs a full sector write (adr: %lx size %lx)\n",
38441 (long) to, (long) len);
38442 diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
38443 index 0990f78..bb4e8a4 100644
38444 --- a/drivers/mtd/devices/doc2001.c
38445 +++ b/drivers/mtd/devices/doc2001.c
38446 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
38447 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
38448
38449 /* Don't allow read past end of device */
38450 - if (from >= this->totlen)
38451 + if (from >= this->totlen || !len)
38452 return -EINVAL;
38453
38454 /* Don't allow a single read to cross a 512-byte block boundary */
38455 diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
38456 index e56d6b4..f07e6cf 100644
38457 --- a/drivers/mtd/ftl.c
38458 +++ b/drivers/mtd/ftl.c
38459 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
38460 loff_t offset;
38461 uint16_t srcunitswap = cpu_to_le16(srcunit);
38462
38463 + pax_track_stack();
38464 +
38465 eun = &part->EUNInfo[srcunit];
38466 xfer = &part->XferInfo[xferunit];
38467 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
38468 diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
38469 index 8aca552..146446e 100755
38470 --- a/drivers/mtd/inftlcore.c
38471 +++ b/drivers/mtd/inftlcore.c
38472 @@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
38473 struct inftl_oob oob;
38474 size_t retlen;
38475
38476 + pax_track_stack();
38477 +
38478 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
38479 "pending=%d)\n", inftl, thisVUC, pendingblock);
38480
38481 diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
38482 index 32e82ae..ed50953 100644
38483 --- a/drivers/mtd/inftlmount.c
38484 +++ b/drivers/mtd/inftlmount.c
38485 @@ -54,6 +54,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
38486 struct INFTLPartition *ip;
38487 size_t retlen;
38488
38489 + pax_track_stack();
38490 +
38491 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
38492
38493 /*
38494 diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
38495 index 79bf40f..fe5f8fd 100644
38496 --- a/drivers/mtd/lpddr/qinfo_probe.c
38497 +++ b/drivers/mtd/lpddr/qinfo_probe.c
38498 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
38499 {
38500 map_word pfow_val[4];
38501
38502 + pax_track_stack();
38503 +
38504 /* Check identification string */
38505 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
38506 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
38507 diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
38508 index 726a1b8..f46b460 100644
38509 --- a/drivers/mtd/mtdchar.c
38510 +++ b/drivers/mtd/mtdchar.c
38511 @@ -461,6 +461,8 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
38512 u_long size;
38513 struct mtd_info_user info;
38514
38515 + pax_track_stack();
38516 +
38517 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
38518
38519 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
38520 diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
38521 index 1002e18..26d82d5 100644
38522 --- a/drivers/mtd/nftlcore.c
38523 +++ b/drivers/mtd/nftlcore.c
38524 @@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
38525 int inplace = 1;
38526 size_t retlen;
38527
38528 + pax_track_stack();
38529 +
38530 memset(BlockMap, 0xff, sizeof(BlockMap));
38531 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
38532
38533 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
38534 index 8b22b18..6fada85 100644
38535 --- a/drivers/mtd/nftlmount.c
38536 +++ b/drivers/mtd/nftlmount.c
38537 @@ -23,6 +23,7 @@
38538 #include <asm/errno.h>
38539 #include <linux/delay.h>
38540 #include <linux/slab.h>
38541 +#include <linux/sched.h>
38542 #include <linux/mtd/mtd.h>
38543 #include <linux/mtd/nand.h>
38544 #include <linux/mtd/nftl.h>
38545 @@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
38546 struct mtd_info *mtd = nftl->mbd.mtd;
38547 unsigned int i;
38548
38549 + pax_track_stack();
38550 +
38551 /* Assume logical EraseSize == physical erasesize for starting the scan.
38552 We'll sort it out later if we find a MediaHeader which says otherwise */
38553 /* Actually, we won't. The new DiskOnChip driver has already scanned
38554 diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
38555 index 14cec04..d775b87 100644
38556 --- a/drivers/mtd/ubi/build.c
38557 +++ b/drivers/mtd/ubi/build.c
38558 @@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
38559 static int __init bytes_str_to_int(const char *str)
38560 {
38561 char *endp;
38562 - unsigned long result;
38563 + unsigned long result, scale = 1;
38564
38565 result = simple_strtoul(str, &endp, 0);
38566 if (str == endp || result >= INT_MAX) {
38567 @@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const char *str)
38568
38569 switch (*endp) {
38570 case 'G':
38571 - result *= 1024;
38572 + scale *= 1024;
38573 case 'M':
38574 - result *= 1024;
38575 + scale *= 1024;
38576 case 'K':
38577 - result *= 1024;
38578 + scale *= 1024;
38579 if (endp[1] == 'i' && endp[2] == 'B')
38580 endp += 2;
38581 case '\0':
38582 @@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const char *str)
38583 return -EINVAL;
38584 }
38585
38586 - return result;
38587 + if ((intoverflow_t)result*scale >= INT_MAX) {
38588 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
38589 + str);
38590 + return -EINVAL;
38591 + }
38592 +
38593 + return result*scale;
38594 }
38595
38596 /**
38597 diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
38598 index ab68886..ca405e8 100644
38599 --- a/drivers/net/atlx/atl2.c
38600 +++ b/drivers/net/atlx/atl2.c
38601 @@ -2845,7 +2845,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
38602 */
38603
38604 #define ATL2_PARAM(X, desc) \
38605 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
38606 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
38607 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
38608 MODULE_PARM_DESC(X, desc);
38609 #else
38610 diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
38611 index 4874b2b..67f8526 100644
38612 --- a/drivers/net/bnx2.c
38613 +++ b/drivers/net/bnx2.c
38614 @@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
38615 int rc = 0;
38616 u32 magic, csum;
38617
38618 + pax_track_stack();
38619 +
38620 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
38621 goto test_nvram_done;
38622
38623 diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
38624 index fd3eb07..8a6978d 100644
38625 --- a/drivers/net/cxgb3/l2t.h
38626 +++ b/drivers/net/cxgb3/l2t.h
38627 @@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
38628 */
38629 struct l2t_skb_cb {
38630 arp_failure_handler_func arp_failure_handler;
38631 -};
38632 +} __no_const;
38633
38634 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
38635
38636 diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
38637 index 032cfe0..411af379 100644
38638 --- a/drivers/net/cxgb3/t3_hw.c
38639 +++ b/drivers/net/cxgb3/t3_hw.c
38640 @@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
38641 int i, addr, ret;
38642 struct t3_vpd vpd;
38643
38644 + pax_track_stack();
38645 +
38646 /*
38647 * Card information is normally at VPD_BASE but some early cards had
38648 * it at 0.
38649 diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
38650 index d1e0563..b9e129c 100644
38651 --- a/drivers/net/e1000e/82571.c
38652 +++ b/drivers/net/e1000e/82571.c
38653 @@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
38654 {
38655 struct e1000_hw *hw = &adapter->hw;
38656 struct e1000_mac_info *mac = &hw->mac;
38657 - struct e1000_mac_operations *func = &mac->ops;
38658 + e1000_mac_operations_no_const *func = &mac->ops;
38659 u32 swsm = 0;
38660 u32 swsm2 = 0;
38661 bool force_clear_smbi = false;
38662 @@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
38663 temp = er32(ICRXDMTC);
38664 }
38665
38666 -static struct e1000_mac_operations e82571_mac_ops = {
38667 +static const struct e1000_mac_operations e82571_mac_ops = {
38668 /* .check_mng_mode: mac type dependent */
38669 /* .check_for_link: media type dependent */
38670 .id_led_init = e1000e_id_led_init,
38671 @@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
38672 .setup_led = e1000e_setup_led_generic,
38673 };
38674
38675 -static struct e1000_phy_operations e82_phy_ops_igp = {
38676 +static const struct e1000_phy_operations e82_phy_ops_igp = {
38677 .acquire_phy = e1000_get_hw_semaphore_82571,
38678 .check_reset_block = e1000e_check_reset_block_generic,
38679 .commit_phy = NULL,
38680 @@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_phy_ops_igp = {
38681 .cfg_on_link_up = NULL,
38682 };
38683
38684 -static struct e1000_phy_operations e82_phy_ops_m88 = {
38685 +static const struct e1000_phy_operations e82_phy_ops_m88 = {
38686 .acquire_phy = e1000_get_hw_semaphore_82571,
38687 .check_reset_block = e1000e_check_reset_block_generic,
38688 .commit_phy = e1000e_phy_sw_reset,
38689 @@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
38690 .cfg_on_link_up = NULL,
38691 };
38692
38693 -static struct e1000_phy_operations e82_phy_ops_bm = {
38694 +static const struct e1000_phy_operations e82_phy_ops_bm = {
38695 .acquire_phy = e1000_get_hw_semaphore_82571,
38696 .check_reset_block = e1000e_check_reset_block_generic,
38697 .commit_phy = e1000e_phy_sw_reset,
38698 @@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_phy_ops_bm = {
38699 .cfg_on_link_up = NULL,
38700 };
38701
38702 -static struct e1000_nvm_operations e82571_nvm_ops = {
38703 +static const struct e1000_nvm_operations e82571_nvm_ops = {
38704 .acquire_nvm = e1000_acquire_nvm_82571,
38705 .read_nvm = e1000e_read_nvm_eerd,
38706 .release_nvm = e1000_release_nvm_82571,
38707 diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
38708 index 47db9bd..fa58ccd 100644
38709 --- a/drivers/net/e1000e/e1000.h
38710 +++ b/drivers/net/e1000e/e1000.h
38711 @@ -375,9 +375,9 @@ struct e1000_info {
38712 u32 pba;
38713 u32 max_hw_frame_size;
38714 s32 (*get_variants)(struct e1000_adapter *);
38715 - struct e1000_mac_operations *mac_ops;
38716 - struct e1000_phy_operations *phy_ops;
38717 - struct e1000_nvm_operations *nvm_ops;
38718 + const struct e1000_mac_operations *mac_ops;
38719 + const struct e1000_phy_operations *phy_ops;
38720 + const struct e1000_nvm_operations *nvm_ops;
38721 };
38722
38723 /* hardware capability, feature, and workaround flags */
38724 diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
38725 index ae5d736..e9a93a1 100644
38726 --- a/drivers/net/e1000e/es2lan.c
38727 +++ b/drivers/net/e1000e/es2lan.c
38728 @@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
38729 {
38730 struct e1000_hw *hw = &adapter->hw;
38731 struct e1000_mac_info *mac = &hw->mac;
38732 - struct e1000_mac_operations *func = &mac->ops;
38733 + e1000_mac_operations_no_const *func = &mac->ops;
38734
38735 /* Set media type */
38736 switch (adapter->pdev->device) {
38737 @@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
38738 temp = er32(ICRXDMTC);
38739 }
38740
38741 -static struct e1000_mac_operations es2_mac_ops = {
38742 +static const struct e1000_mac_operations es2_mac_ops = {
38743 .id_led_init = e1000e_id_led_init,
38744 .check_mng_mode = e1000e_check_mng_mode_generic,
38745 /* check_for_link dependent on media type */
38746 @@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_mac_ops = {
38747 .setup_led = e1000e_setup_led_generic,
38748 };
38749
38750 -static struct e1000_phy_operations es2_phy_ops = {
38751 +static const struct e1000_phy_operations es2_phy_ops = {
38752 .acquire_phy = e1000_acquire_phy_80003es2lan,
38753 .check_reset_block = e1000e_check_reset_block_generic,
38754 .commit_phy = e1000e_phy_sw_reset,
38755 @@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_phy_ops = {
38756 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
38757 };
38758
38759 -static struct e1000_nvm_operations es2_nvm_ops = {
38760 +static const struct e1000_nvm_operations es2_nvm_ops = {
38761 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
38762 .read_nvm = e1000e_read_nvm_eerd,
38763 .release_nvm = e1000_release_nvm_80003es2lan,
38764 diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
38765 index 11f3b7c..6381887 100644
38766 --- a/drivers/net/e1000e/hw.h
38767 +++ b/drivers/net/e1000e/hw.h
38768 @@ -753,6 +753,7 @@ struct e1000_mac_operations {
38769 s32 (*setup_physical_interface)(struct e1000_hw *);
38770 s32 (*setup_led)(struct e1000_hw *);
38771 };
38772 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38773
38774 /* Function pointers for the PHY. */
38775 struct e1000_phy_operations {
38776 @@ -774,6 +775,7 @@ struct e1000_phy_operations {
38777 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
38778 s32 (*cfg_on_link_up)(struct e1000_hw *);
38779 };
38780 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
38781
38782 /* Function pointers for the NVM. */
38783 struct e1000_nvm_operations {
38784 @@ -785,9 +787,10 @@ struct e1000_nvm_operations {
38785 s32 (*validate_nvm)(struct e1000_hw *);
38786 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
38787 };
38788 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
38789
38790 struct e1000_mac_info {
38791 - struct e1000_mac_operations ops;
38792 + e1000_mac_operations_no_const ops;
38793
38794 u8 addr[6];
38795 u8 perm_addr[6];
38796 @@ -823,7 +826,7 @@ struct e1000_mac_info {
38797 };
38798
38799 struct e1000_phy_info {
38800 - struct e1000_phy_operations ops;
38801 + e1000_phy_operations_no_const ops;
38802
38803 enum e1000_phy_type type;
38804
38805 @@ -857,7 +860,7 @@ struct e1000_phy_info {
38806 };
38807
38808 struct e1000_nvm_info {
38809 - struct e1000_nvm_operations ops;
38810 + e1000_nvm_operations_no_const ops;
38811
38812 enum e1000_nvm_type type;
38813 enum e1000_nvm_override override;
38814 diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
38815 index de39f9a..e28d3e0 100644
38816 --- a/drivers/net/e1000e/ich8lan.c
38817 +++ b/drivers/net/e1000e/ich8lan.c
38818 @@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
38819 }
38820 }
38821
38822 -static struct e1000_mac_operations ich8_mac_ops = {
38823 +static const struct e1000_mac_operations ich8_mac_ops = {
38824 .id_led_init = e1000e_id_led_init,
38825 .check_mng_mode = e1000_check_mng_mode_ich8lan,
38826 .check_for_link = e1000_check_for_copper_link_ich8lan,
38827 @@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
38828 /* id_led_init dependent on mac type */
38829 };
38830
38831 -static struct e1000_phy_operations ich8_phy_ops = {
38832 +static const struct e1000_phy_operations ich8_phy_ops = {
38833 .acquire_phy = e1000_acquire_swflag_ich8lan,
38834 .check_reset_block = e1000_check_reset_block_ich8lan,
38835 .commit_phy = NULL,
38836 @@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_phy_ops = {
38837 .write_phy_reg = e1000e_write_phy_reg_igp,
38838 };
38839
38840 -static struct e1000_nvm_operations ich8_nvm_ops = {
38841 +static const struct e1000_nvm_operations ich8_nvm_ops = {
38842 .acquire_nvm = e1000_acquire_nvm_ich8lan,
38843 .read_nvm = e1000_read_nvm_ich8lan,
38844 .release_nvm = e1000_release_nvm_ich8lan,
38845 diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
38846 index 18d5fbb..542d96d 100644
38847 --- a/drivers/net/fealnx.c
38848 +++ b/drivers/net/fealnx.c
38849 @@ -151,7 +151,7 @@ struct chip_info {
38850 int flags;
38851 };
38852
38853 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
38854 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
38855 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
38856 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
38857 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
38858 diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
38859 index 0e5b54b..b503f82 100644
38860 --- a/drivers/net/hamradio/6pack.c
38861 +++ b/drivers/net/hamradio/6pack.c
38862 @@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct tty_struct *tty,
38863 unsigned char buf[512];
38864 int count1;
38865
38866 + pax_track_stack();
38867 +
38868 if (!count)
38869 return;
38870
38871 diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
38872 index 5862282..7cce8cb 100644
38873 --- a/drivers/net/ibmveth.c
38874 +++ b/drivers/net/ibmveth.c
38875 @@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attrs[] = {
38876 NULL,
38877 };
38878
38879 -static struct sysfs_ops veth_pool_ops = {
38880 +static const struct sysfs_ops veth_pool_ops = {
38881 .show = veth_pool_show,
38882 .store = veth_pool_store,
38883 };
38884 diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
38885 index d617f2d..57b5309 100644
38886 --- a/drivers/net/igb/e1000_82575.c
38887 +++ b/drivers/net/igb/e1000_82575.c
38888 @@ -1411,7 +1411,7 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
38889 wr32(E1000_VT_CTL, vt_ctl);
38890 }
38891
38892 -static struct e1000_mac_operations e1000_mac_ops_82575 = {
38893 +static const struct e1000_mac_operations e1000_mac_ops_82575 = {
38894 .reset_hw = igb_reset_hw_82575,
38895 .init_hw = igb_init_hw_82575,
38896 .check_for_link = igb_check_for_link_82575,
38897 @@ -1420,13 +1420,13 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
38898 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
38899 };
38900
38901 -static struct e1000_phy_operations e1000_phy_ops_82575 = {
38902 +static const struct e1000_phy_operations e1000_phy_ops_82575 = {
38903 .acquire = igb_acquire_phy_82575,
38904 .get_cfg_done = igb_get_cfg_done_82575,
38905 .release = igb_release_phy_82575,
38906 };
38907
38908 -static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
38909 +static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
38910 .acquire = igb_acquire_nvm_82575,
38911 .read = igb_read_nvm_eerd,
38912 .release = igb_release_nvm_82575,
38913 diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
38914 index 72081df..d855cf5 100644
38915 --- a/drivers/net/igb/e1000_hw.h
38916 +++ b/drivers/net/igb/e1000_hw.h
38917 @@ -288,6 +288,7 @@ struct e1000_mac_operations {
38918 s32 (*read_mac_addr)(struct e1000_hw *);
38919 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
38920 };
38921 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38922
38923 struct e1000_phy_operations {
38924 s32 (*acquire)(struct e1000_hw *);
38925 @@ -303,6 +304,7 @@ struct e1000_phy_operations {
38926 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
38927 s32 (*write_reg)(struct e1000_hw *, u32, u16);
38928 };
38929 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
38930
38931 struct e1000_nvm_operations {
38932 s32 (*acquire)(struct e1000_hw *);
38933 @@ -310,6 +312,7 @@ struct e1000_nvm_operations {
38934 void (*release)(struct e1000_hw *);
38935 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
38936 };
38937 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
38938
38939 struct e1000_info {
38940 s32 (*get_invariants)(struct e1000_hw *);
38941 @@ -321,7 +324,7 @@ struct e1000_info {
38942 extern const struct e1000_info e1000_82575_info;
38943
38944 struct e1000_mac_info {
38945 - struct e1000_mac_operations ops;
38946 + e1000_mac_operations_no_const ops;
38947
38948 u8 addr[6];
38949 u8 perm_addr[6];
38950 @@ -365,7 +368,7 @@ struct e1000_mac_info {
38951 };
38952
38953 struct e1000_phy_info {
38954 - struct e1000_phy_operations ops;
38955 + e1000_phy_operations_no_const ops;
38956
38957 enum e1000_phy_type type;
38958
38959 @@ -400,7 +403,7 @@ struct e1000_phy_info {
38960 };
38961
38962 struct e1000_nvm_info {
38963 - struct e1000_nvm_operations ops;
38964 + e1000_nvm_operations_no_const ops;
38965
38966 enum e1000_nvm_type type;
38967 enum e1000_nvm_override override;
38968 @@ -446,6 +449,7 @@ struct e1000_mbx_operations {
38969 s32 (*check_for_ack)(struct e1000_hw *, u16);
38970 s32 (*check_for_rst)(struct e1000_hw *, u16);
38971 };
38972 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
38973
38974 struct e1000_mbx_stats {
38975 u32 msgs_tx;
38976 @@ -457,7 +461,7 @@ struct e1000_mbx_stats {
38977 };
38978
38979 struct e1000_mbx_info {
38980 - struct e1000_mbx_operations ops;
38981 + e1000_mbx_operations_no_const ops;
38982 struct e1000_mbx_stats stats;
38983 u32 timeout;
38984 u32 usec_delay;
38985 diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
38986 index 1e8ce37..549c453 100644
38987 --- a/drivers/net/igbvf/vf.h
38988 +++ b/drivers/net/igbvf/vf.h
38989 @@ -187,9 +187,10 @@ struct e1000_mac_operations {
38990 s32 (*read_mac_addr)(struct e1000_hw *);
38991 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
38992 };
38993 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38994
38995 struct e1000_mac_info {
38996 - struct e1000_mac_operations ops;
38997 + e1000_mac_operations_no_const ops;
38998 u8 addr[6];
38999 u8 perm_addr[6];
39000
39001 @@ -211,6 +212,7 @@ struct e1000_mbx_operations {
39002 s32 (*check_for_ack)(struct e1000_hw *);
39003 s32 (*check_for_rst)(struct e1000_hw *);
39004 };
39005 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
39006
39007 struct e1000_mbx_stats {
39008 u32 msgs_tx;
39009 @@ -222,7 +224,7 @@ struct e1000_mbx_stats {
39010 };
39011
39012 struct e1000_mbx_info {
39013 - struct e1000_mbx_operations ops;
39014 + e1000_mbx_operations_no_const ops;
39015 struct e1000_mbx_stats stats;
39016 u32 timeout;
39017 u32 usec_delay;
39018 diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
39019 index aa7286b..a61394f 100644
39020 --- a/drivers/net/iseries_veth.c
39021 +++ b/drivers/net/iseries_veth.c
39022 @@ -384,7 +384,7 @@ static struct attribute *veth_cnx_default_attrs[] = {
39023 NULL
39024 };
39025
39026 -static struct sysfs_ops veth_cnx_sysfs_ops = {
39027 +static const struct sysfs_ops veth_cnx_sysfs_ops = {
39028 .show = veth_cnx_attribute_show
39029 };
39030
39031 @@ -441,7 +441,7 @@ static struct attribute *veth_port_default_attrs[] = {
39032 NULL
39033 };
39034
39035 -static struct sysfs_ops veth_port_sysfs_ops = {
39036 +static const struct sysfs_ops veth_port_sysfs_ops = {
39037 .show = veth_port_attribute_show
39038 };
39039
39040 diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
39041 index 8aa44dc..fa1e797 100644
39042 --- a/drivers/net/ixgb/ixgb_main.c
39043 +++ b/drivers/net/ixgb/ixgb_main.c
39044 @@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev)
39045 u32 rctl;
39046 int i;
39047
39048 + pax_track_stack();
39049 +
39050 /* Check for Promiscuous and All Multicast modes */
39051
39052 rctl = IXGB_READ_REG(hw, RCTL);
39053 diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
39054 index af35e1d..8781785 100644
39055 --- a/drivers/net/ixgb/ixgb_param.c
39056 +++ b/drivers/net/ixgb/ixgb_param.c
39057 @@ -260,6 +260,9 @@ void __devinit
39058 ixgb_check_options(struct ixgb_adapter *adapter)
39059 {
39060 int bd = adapter->bd_number;
39061 +
39062 + pax_track_stack();
39063 +
39064 if (bd >= IXGB_MAX_NIC) {
39065 printk(KERN_NOTICE
39066 "Warning: no configuration for board #%i\n", bd);
39067 diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
39068 index b17aa73..ed74540 100644
39069 --- a/drivers/net/ixgbe/ixgbe_type.h
39070 +++ b/drivers/net/ixgbe/ixgbe_type.h
39071 @@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations {
39072 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
39073 s32 (*update_checksum)(struct ixgbe_hw *);
39074 };
39075 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
39076
39077 struct ixgbe_mac_operations {
39078 s32 (*init_hw)(struct ixgbe_hw *);
39079 @@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations {
39080 /* Flow Control */
39081 s32 (*fc_enable)(struct ixgbe_hw *, s32);
39082 };
39083 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
39084
39085 struct ixgbe_phy_operations {
39086 s32 (*identify)(struct ixgbe_hw *);
39087 @@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations {
39088 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
39089 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
39090 };
39091 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
39092
39093 struct ixgbe_eeprom_info {
39094 - struct ixgbe_eeprom_operations ops;
39095 + ixgbe_eeprom_operations_no_const ops;
39096 enum ixgbe_eeprom_type type;
39097 u32 semaphore_delay;
39098 u16 word_size;
39099 @@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info {
39100 };
39101
39102 struct ixgbe_mac_info {
39103 - struct ixgbe_mac_operations ops;
39104 + ixgbe_mac_operations_no_const ops;
39105 enum ixgbe_mac_type type;
39106 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
39107 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
39108 @@ -2423,7 +2426,7 @@ struct ixgbe_mac_info {
39109 };
39110
39111 struct ixgbe_phy_info {
39112 - struct ixgbe_phy_operations ops;
39113 + ixgbe_phy_operations_no_const ops;
39114 struct mdio_if_info mdio;
39115 enum ixgbe_phy_type type;
39116 u32 id;
39117 diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
39118 index 291a505..2543756 100644
39119 --- a/drivers/net/mlx4/main.c
39120 +++ b/drivers/net/mlx4/main.c
39121 @@ -38,6 +38,7 @@
39122 #include <linux/errno.h>
39123 #include <linux/pci.h>
39124 #include <linux/dma-mapping.h>
39125 +#include <linux/sched.h>
39126
39127 #include <linux/mlx4/device.h>
39128 #include <linux/mlx4/doorbell.h>
39129 @@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
39130 u64 icm_size;
39131 int err;
39132
39133 + pax_track_stack();
39134 +
39135 err = mlx4_QUERY_FW(dev);
39136 if (err) {
39137 if (err == -EACCES)
39138 diff --git a/drivers/net/niu.c b/drivers/net/niu.c
39139 index 2dce134..fa5ce75 100644
39140 --- a/drivers/net/niu.c
39141 +++ b/drivers/net/niu.c
39142 @@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
39143 int i, num_irqs, err;
39144 u8 first_ldg;
39145
39146 + pax_track_stack();
39147 +
39148 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
39149 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
39150 ldg_num_map[i] = first_ldg + i;
39151 diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
39152 index c1b3f09..97cd8c4 100644
39153 --- a/drivers/net/pcnet32.c
39154 +++ b/drivers/net/pcnet32.c
39155 @@ -79,7 +79,7 @@ static int cards_found;
39156 /*
39157 * VLB I/O addresses
39158 */
39159 -static unsigned int pcnet32_portlist[] __initdata =
39160 +static unsigned int pcnet32_portlist[] __devinitdata =
39161 { 0x300, 0x320, 0x340, 0x360, 0 };
39162
39163 static int pcnet32_debug = 0;
39164 @@ -267,7 +267,7 @@ struct pcnet32_private {
39165 struct sk_buff **rx_skbuff;
39166 dma_addr_t *tx_dma_addr;
39167 dma_addr_t *rx_dma_addr;
39168 - struct pcnet32_access a;
39169 + struct pcnet32_access *a;
39170 spinlock_t lock; /* Guard lock */
39171 unsigned int cur_rx, cur_tx; /* The next free ring entry */
39172 unsigned int rx_ring_size; /* current rx ring size */
39173 @@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct net_device *dev)
39174 u16 val;
39175
39176 netif_wake_queue(dev);
39177 - val = lp->a.read_csr(ioaddr, CSR3);
39178 + val = lp->a->read_csr(ioaddr, CSR3);
39179 val &= 0x00ff;
39180 - lp->a.write_csr(ioaddr, CSR3, val);
39181 + lp->a->write_csr(ioaddr, CSR3, val);
39182 napi_enable(&lp->napi);
39183 }
39184
39185 @@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_device *dev)
39186 r = mii_link_ok(&lp->mii_if);
39187 } else if (lp->chip_version >= PCNET32_79C970A) {
39188 ulong ioaddr = dev->base_addr; /* card base I/O address */
39189 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
39190 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
39191 } else { /* can not detect link on really old chips */
39192 r = 1;
39193 }
39194 @@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
39195 pcnet32_netif_stop(dev);
39196
39197 spin_lock_irqsave(&lp->lock, flags);
39198 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
39199 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
39200
39201 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
39202
39203 @@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct net_device *dev,
39204 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
39205 {
39206 struct pcnet32_private *lp = netdev_priv(dev);
39207 - struct pcnet32_access *a = &lp->a; /* access to registers */
39208 + struct pcnet32_access *a = lp->a; /* access to registers */
39209 ulong ioaddr = dev->base_addr; /* card base I/O address */
39210 struct sk_buff *skb; /* sk buff */
39211 int x, i; /* counters */
39212 @@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
39213 pcnet32_netif_stop(dev);
39214
39215 spin_lock_irqsave(&lp->lock, flags);
39216 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
39217 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
39218
39219 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
39220
39221 /* Reset the PCNET32 */
39222 - lp->a.reset(ioaddr);
39223 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
39224 + lp->a->reset(ioaddr);
39225 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
39226
39227 /* switch pcnet32 to 32bit mode */
39228 - lp->a.write_bcr(ioaddr, 20, 2);
39229 + lp->a->write_bcr(ioaddr, 20, 2);
39230
39231 /* purge & init rings but don't actually restart */
39232 pcnet32_restart(dev, 0x0000);
39233
39234 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
39235 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
39236
39237 /* Initialize Transmit buffers. */
39238 size = data_len + 15;
39239 @@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
39240
39241 /* set int loopback in CSR15 */
39242 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
39243 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
39244 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
39245
39246 teststatus = cpu_to_le16(0x8000);
39247 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
39248 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
39249
39250 /* Check status of descriptors */
39251 for (x = 0; x < numbuffs; x++) {
39252 @@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
39253 }
39254 }
39255
39256 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
39257 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
39258 wmb();
39259 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
39260 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
39261 @@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
39262 pcnet32_restart(dev, CSR0_NORMAL);
39263 } else {
39264 pcnet32_purge_rx_ring(dev);
39265 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
39266 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
39267 }
39268 spin_unlock_irqrestore(&lp->lock, flags);
39269
39270 @@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
39271 static void pcnet32_led_blink_callback(struct net_device *dev)
39272 {
39273 struct pcnet32_private *lp = netdev_priv(dev);
39274 - struct pcnet32_access *a = &lp->a;
39275 + struct pcnet32_access *a = lp->a;
39276 ulong ioaddr = dev->base_addr;
39277 unsigned long flags;
39278 int i;
39279 @@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(struct net_device *dev)
39280 static int pcnet32_phys_id(struct net_device *dev, u32 data)
39281 {
39282 struct pcnet32_private *lp = netdev_priv(dev);
39283 - struct pcnet32_access *a = &lp->a;
39284 + struct pcnet32_access *a = lp->a;
39285 ulong ioaddr = dev->base_addr;
39286 unsigned long flags;
39287 int i, regs[4];
39288 @@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
39289 {
39290 int csr5;
39291 struct pcnet32_private *lp = netdev_priv(dev);
39292 - struct pcnet32_access *a = &lp->a;
39293 + struct pcnet32_access *a = lp->a;
39294 ulong ioaddr = dev->base_addr;
39295 int ticks;
39296
39297 @@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
39298 spin_lock_irqsave(&lp->lock, flags);
39299 if (pcnet32_tx(dev)) {
39300 /* reset the chip to clear the error condition, then restart */
39301 - lp->a.reset(ioaddr);
39302 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
39303 + lp->a->reset(ioaddr);
39304 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
39305 pcnet32_restart(dev, CSR0_START);
39306 netif_wake_queue(dev);
39307 }
39308 @@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
39309 __napi_complete(napi);
39310
39311 /* clear interrupt masks */
39312 - val = lp->a.read_csr(ioaddr, CSR3);
39313 + val = lp->a->read_csr(ioaddr, CSR3);
39314 val &= 0x00ff;
39315 - lp->a.write_csr(ioaddr, CSR3, val);
39316 + lp->a->write_csr(ioaddr, CSR3, val);
39317
39318 /* Set interrupt enable. */
39319 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
39320 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
39321
39322 spin_unlock_irqrestore(&lp->lock, flags);
39323 }
39324 @@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
39325 int i, csr0;
39326 u16 *buff = ptr;
39327 struct pcnet32_private *lp = netdev_priv(dev);
39328 - struct pcnet32_access *a = &lp->a;
39329 + struct pcnet32_access *a = lp->a;
39330 ulong ioaddr = dev->base_addr;
39331 unsigned long flags;
39332
39333 @@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
39334 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
39335 if (lp->phymask & (1 << j)) {
39336 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
39337 - lp->a.write_bcr(ioaddr, 33,
39338 + lp->a->write_bcr(ioaddr, 33,
39339 (j << 5) | i);
39340 - *buff++ = lp->a.read_bcr(ioaddr, 34);
39341 + *buff++ = lp->a->read_bcr(ioaddr, 34);
39342 }
39343 }
39344 }
39345 @@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
39346 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
39347 lp->options |= PCNET32_PORT_FD;
39348
39349 - lp->a = *a;
39350 + lp->a = a;
39351
39352 /* prior to register_netdev, dev->name is not yet correct */
39353 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
39354 @@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
39355 if (lp->mii) {
39356 /* lp->phycount and lp->phymask are set to 0 by memset above */
39357
39358 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
39359 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
39360 /* scan for PHYs */
39361 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
39362 unsigned short id1, id2;
39363 @@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
39364 "Found PHY %04x:%04x at address %d.\n",
39365 id1, id2, i);
39366 }
39367 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
39368 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
39369 if (lp->phycount > 1) {
39370 lp->options |= PCNET32_PORT_MII;
39371 }
39372 @@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_device *dev)
39373 }
39374
39375 /* Reset the PCNET32 */
39376 - lp->a.reset(ioaddr);
39377 + lp->a->reset(ioaddr);
39378
39379 /* switch pcnet32 to 32bit mode */
39380 - lp->a.write_bcr(ioaddr, 20, 2);
39381 + lp->a->write_bcr(ioaddr, 20, 2);
39382
39383 if (netif_msg_ifup(lp))
39384 printk(KERN_DEBUG
39385 @@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_device *dev)
39386 (u32) (lp->init_dma_addr));
39387
39388 /* set/reset autoselect bit */
39389 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
39390 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
39391 if (lp->options & PCNET32_PORT_ASEL)
39392 val |= 2;
39393 - lp->a.write_bcr(ioaddr, 2, val);
39394 + lp->a->write_bcr(ioaddr, 2, val);
39395
39396 /* handle full duplex setting */
39397 if (lp->mii_if.full_duplex) {
39398 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
39399 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
39400 if (lp->options & PCNET32_PORT_FD) {
39401 val |= 1;
39402 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
39403 @@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_device *dev)
39404 if (lp->chip_version == 0x2627)
39405 val |= 3;
39406 }
39407 - lp->a.write_bcr(ioaddr, 9, val);
39408 + lp->a->write_bcr(ioaddr, 9, val);
39409 }
39410
39411 /* set/reset GPSI bit in test register */
39412 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
39413 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
39414 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
39415 val |= 0x10;
39416 - lp->a.write_csr(ioaddr, 124, val);
39417 + lp->a->write_csr(ioaddr, 124, val);
39418
39419 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
39420 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
39421 @@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_device *dev)
39422 * duplex, and/or enable auto negotiation, and clear DANAS
39423 */
39424 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
39425 - lp->a.write_bcr(ioaddr, 32,
39426 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
39427 + lp->a->write_bcr(ioaddr, 32,
39428 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
39429 /* disable Auto Negotiation, set 10Mpbs, HD */
39430 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
39431 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
39432 if (lp->options & PCNET32_PORT_FD)
39433 val |= 0x10;
39434 if (lp->options & PCNET32_PORT_100)
39435 val |= 0x08;
39436 - lp->a.write_bcr(ioaddr, 32, val);
39437 + lp->a->write_bcr(ioaddr, 32, val);
39438 } else {
39439 if (lp->options & PCNET32_PORT_ASEL) {
39440 - lp->a.write_bcr(ioaddr, 32,
39441 - lp->a.read_bcr(ioaddr,
39442 + lp->a->write_bcr(ioaddr, 32,
39443 + lp->a->read_bcr(ioaddr,
39444 32) | 0x0080);
39445 /* enable auto negotiate, setup, disable fd */
39446 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
39447 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
39448 val |= 0x20;
39449 - lp->a.write_bcr(ioaddr, 32, val);
39450 + lp->a->write_bcr(ioaddr, 32, val);
39451 }
39452 }
39453 } else {
39454 @@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_device *dev)
39455 * There is really no good other way to handle multiple PHYs
39456 * other than turning off all automatics
39457 */
39458 - val = lp->a.read_bcr(ioaddr, 2);
39459 - lp->a.write_bcr(ioaddr, 2, val & ~2);
39460 - val = lp->a.read_bcr(ioaddr, 32);
39461 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
39462 + val = lp->a->read_bcr(ioaddr, 2);
39463 + lp->a->write_bcr(ioaddr, 2, val & ~2);
39464 + val = lp->a->read_bcr(ioaddr, 32);
39465 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
39466
39467 if (!(lp->options & PCNET32_PORT_ASEL)) {
39468 /* setup ecmd */
39469 @@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_device *dev)
39470 ecmd.speed =
39471 lp->
39472 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
39473 - bcr9 = lp->a.read_bcr(ioaddr, 9);
39474 + bcr9 = lp->a->read_bcr(ioaddr, 9);
39475
39476 if (lp->options & PCNET32_PORT_FD) {
39477 ecmd.duplex = DUPLEX_FULL;
39478 @@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_device *dev)
39479 ecmd.duplex = DUPLEX_HALF;
39480 bcr9 |= ~(1 << 0);
39481 }
39482 - lp->a.write_bcr(ioaddr, 9, bcr9);
39483 + lp->a->write_bcr(ioaddr, 9, bcr9);
39484 }
39485
39486 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
39487 @@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_device *dev)
39488
39489 #ifdef DO_DXSUFLO
39490 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
39491 - val = lp->a.read_csr(ioaddr, CSR3);
39492 + val = lp->a->read_csr(ioaddr, CSR3);
39493 val |= 0x40;
39494 - lp->a.write_csr(ioaddr, CSR3, val);
39495 + lp->a->write_csr(ioaddr, CSR3, val);
39496 }
39497 #endif
39498
39499 @@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_device *dev)
39500 napi_enable(&lp->napi);
39501
39502 /* Re-initialize the PCNET32, and start it when done. */
39503 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
39504 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
39505 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
39506 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
39507
39508 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
39509 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
39510 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
39511 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
39512
39513 netif_start_queue(dev);
39514
39515 @@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_device *dev)
39516
39517 i = 0;
39518 while (i++ < 100)
39519 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
39520 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
39521 break;
39522 /*
39523 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
39524 * reports that doing so triggers a bug in the '974.
39525 */
39526 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
39527 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
39528
39529 if (netif_msg_ifup(lp))
39530 printk(KERN_DEBUG
39531 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
39532 dev->name, i,
39533 (u32) (lp->init_dma_addr),
39534 - lp->a.read_csr(ioaddr, CSR0));
39535 + lp->a->read_csr(ioaddr, CSR0));
39536
39537 spin_unlock_irqrestore(&lp->lock, flags);
39538
39539 @@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_device *dev)
39540 * Switch back to 16bit mode to avoid problems with dumb
39541 * DOS packet driver after a warm reboot
39542 */
39543 - lp->a.write_bcr(ioaddr, 20, 4);
39544 + lp->a->write_bcr(ioaddr, 20, 4);
39545
39546 err_free_irq:
39547 spin_unlock_irqrestore(&lp->lock, flags);
39548 @@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
39549
39550 /* wait for stop */
39551 for (i = 0; i < 100; i++)
39552 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
39553 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
39554 break;
39555
39556 if (i >= 100 && netif_msg_drv(lp))
39557 @@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
39558 return;
39559
39560 /* ReInit Ring */
39561 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
39562 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
39563 i = 0;
39564 while (i++ < 1000)
39565 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
39566 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
39567 break;
39568
39569 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
39570 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
39571 }
39572
39573 static void pcnet32_tx_timeout(struct net_device *dev)
39574 @@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct net_device *dev)
39575 if (pcnet32_debug & NETIF_MSG_DRV)
39576 printk(KERN_ERR
39577 "%s: transmit timed out, status %4.4x, resetting.\n",
39578 - dev->name, lp->a.read_csr(ioaddr, CSR0));
39579 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
39580 + dev->name, lp->a->read_csr(ioaddr, CSR0));
39581 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
39582 dev->stats.tx_errors++;
39583 if (netif_msg_tx_err(lp)) {
39584 int i;
39585 @@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
39586 if (netif_msg_tx_queued(lp)) {
39587 printk(KERN_DEBUG
39588 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
39589 - dev->name, lp->a.read_csr(ioaddr, CSR0));
39590 + dev->name, lp->a->read_csr(ioaddr, CSR0));
39591 }
39592
39593 /* Default status -- will not enable Successful-TxDone
39594 @@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
39595 dev->stats.tx_bytes += skb->len;
39596
39597 /* Trigger an immediate send poll. */
39598 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
39599 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
39600
39601 dev->trans_start = jiffies;
39602
39603 @@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
39604
39605 spin_lock(&lp->lock);
39606
39607 - csr0 = lp->a.read_csr(ioaddr, CSR0);
39608 + csr0 = lp->a->read_csr(ioaddr, CSR0);
39609 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
39610 if (csr0 == 0xffff) {
39611 break; /* PCMCIA remove happened */
39612 }
39613 /* Acknowledge all of the current interrupt sources ASAP. */
39614 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
39615 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
39616
39617 if (netif_msg_intr(lp))
39618 printk(KERN_DEBUG
39619 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
39620 - dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
39621 + dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
39622
39623 /* Log misc errors. */
39624 if (csr0 & 0x4000)
39625 @@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
39626 if (napi_schedule_prep(&lp->napi)) {
39627 u16 val;
39628 /* set interrupt masks */
39629 - val = lp->a.read_csr(ioaddr, CSR3);
39630 + val = lp->a->read_csr(ioaddr, CSR3);
39631 val |= 0x5f00;
39632 - lp->a.write_csr(ioaddr, CSR3, val);
39633 + lp->a->write_csr(ioaddr, CSR3, val);
39634
39635 __napi_schedule(&lp->napi);
39636 break;
39637 }
39638 - csr0 = lp->a.read_csr(ioaddr, CSR0);
39639 + csr0 = lp->a->read_csr(ioaddr, CSR0);
39640 }
39641
39642 if (netif_msg_intr(lp))
39643 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
39644 - dev->name, lp->a.read_csr(ioaddr, CSR0));
39645 + dev->name, lp->a->read_csr(ioaddr, CSR0));
39646
39647 spin_unlock(&lp->lock);
39648
39649 @@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_device *dev)
39650
39651 spin_lock_irqsave(&lp->lock, flags);
39652
39653 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
39654 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
39655
39656 if (netif_msg_ifdown(lp))
39657 printk(KERN_DEBUG
39658 "%s: Shutting down ethercard, status was %2.2x.\n",
39659 - dev->name, lp->a.read_csr(ioaddr, CSR0));
39660 + dev->name, lp->a->read_csr(ioaddr, CSR0));
39661
39662 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
39663 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
39664 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
39665
39666 /*
39667 * Switch back to 16bit mode to avoid problems with dumb
39668 * DOS packet driver after a warm reboot
39669 */
39670 - lp->a.write_bcr(ioaddr, 20, 4);
39671 + lp->a->write_bcr(ioaddr, 20, 4);
39672
39673 spin_unlock_irqrestore(&lp->lock, flags);
39674
39675 @@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
39676 unsigned long flags;
39677
39678 spin_lock_irqsave(&lp->lock, flags);
39679 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
39680 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
39681 spin_unlock_irqrestore(&lp->lock, flags);
39682
39683 return &dev->stats;
39684 @@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
39685 if (dev->flags & IFF_ALLMULTI) {
39686 ib->filter[0] = cpu_to_le32(~0U);
39687 ib->filter[1] = cpu_to_le32(~0U);
39688 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
39689 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
39690 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
39691 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
39692 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
39693 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
39694 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
39695 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
39696 return;
39697 }
39698 /* clear the multicast filter */
39699 @@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
39700 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
39701 }
39702 for (i = 0; i < 4; i++)
39703 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
39704 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
39705 le16_to_cpu(mcast_table[i]));
39706 return;
39707 }
39708 @@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
39709
39710 spin_lock_irqsave(&lp->lock, flags);
39711 suspended = pcnet32_suspend(dev, &flags, 0);
39712 - csr15 = lp->a.read_csr(ioaddr, CSR15);
39713 + csr15 = lp->a->read_csr(ioaddr, CSR15);
39714 if (dev->flags & IFF_PROMISC) {
39715 /* Log any net taps. */
39716 if (netif_msg_hw(lp))
39717 @@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
39718 lp->init_block->mode =
39719 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
39720 7);
39721 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
39722 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
39723 } else {
39724 lp->init_block->mode =
39725 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
39726 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
39727 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
39728 pcnet32_load_multicast(dev);
39729 }
39730
39731 if (suspended) {
39732 int csr5;
39733 /* clear SUSPEND (SPND) - CSR5 bit 0 */
39734 - csr5 = lp->a.read_csr(ioaddr, CSR5);
39735 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
39736 + csr5 = lp->a->read_csr(ioaddr, CSR5);
39737 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
39738 } else {
39739 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
39740 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
39741 pcnet32_restart(dev, CSR0_NORMAL);
39742 netif_wake_queue(dev);
39743 }
39744 @@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
39745 if (!lp->mii)
39746 return 0;
39747
39748 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39749 - val_out = lp->a.read_bcr(ioaddr, 34);
39750 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39751 + val_out = lp->a->read_bcr(ioaddr, 34);
39752
39753 return val_out;
39754 }
39755 @@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
39756 if (!lp->mii)
39757 return;
39758
39759 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39760 - lp->a.write_bcr(ioaddr, 34, val);
39761 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39762 + lp->a->write_bcr(ioaddr, 34, val);
39763 }
39764
39765 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39766 @@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
39767 curr_link = mii_link_ok(&lp->mii_if);
39768 } else {
39769 ulong ioaddr = dev->base_addr; /* card base I/O address */
39770 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
39771 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
39772 }
39773 if (!curr_link) {
39774 if (prev_link || verbose) {
39775 @@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
39776 (ecmd.duplex ==
39777 DUPLEX_FULL) ? "full" : "half");
39778 }
39779 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
39780 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
39781 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
39782 if (lp->mii_if.full_duplex)
39783 bcr9 |= (1 << 0);
39784 else
39785 bcr9 &= ~(1 << 0);
39786 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
39787 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
39788 }
39789 } else {
39790 if (netif_msg_link(lp))
39791 diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
39792 index 7cc9898..6eb50d3 100644
39793 --- a/drivers/net/sis190.c
39794 +++ b/drivers/net/sis190.c
39795 @@ -1598,7 +1598,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
39796 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
39797 struct net_device *dev)
39798 {
39799 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
39800 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
39801 struct sis190_private *tp = netdev_priv(dev);
39802 struct pci_dev *isa_bridge;
39803 u8 reg, tmp8;
39804 diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
39805 index e13685a..60c948c 100644
39806 --- a/drivers/net/sundance.c
39807 +++ b/drivers/net/sundance.c
39808 @@ -225,7 +225,7 @@ enum {
39809 struct pci_id_info {
39810 const char *name;
39811 };
39812 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
39813 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
39814 {"D-Link DFE-550TX FAST Ethernet Adapter"},
39815 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
39816 {"D-Link DFE-580TX 4 port Server Adapter"},
39817 diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
39818 index 529f55a..cccaa18 100644
39819 --- a/drivers/net/tg3.h
39820 +++ b/drivers/net/tg3.h
39821 @@ -95,6 +95,7 @@
39822 #define CHIPREV_ID_5750_A0 0x4000
39823 #define CHIPREV_ID_5750_A1 0x4001
39824 #define CHIPREV_ID_5750_A3 0x4003
39825 +#define CHIPREV_ID_5750_C1 0x4201
39826 #define CHIPREV_ID_5750_C2 0x4202
39827 #define CHIPREV_ID_5752_A0_HW 0x5000
39828 #define CHIPREV_ID_5752_A0 0x6000
39829 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
39830 index b9db1b5..720f9ce 100644
39831 --- a/drivers/net/tokenring/abyss.c
39832 +++ b/drivers/net/tokenring/abyss.c
39833 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
39834
39835 static int __init abyss_init (void)
39836 {
39837 - abyss_netdev_ops = tms380tr_netdev_ops;
39838 + pax_open_kernel();
39839 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39840
39841 - abyss_netdev_ops.ndo_open = abyss_open;
39842 - abyss_netdev_ops.ndo_stop = abyss_close;
39843 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
39844 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
39845 + pax_close_kernel();
39846
39847 return pci_register_driver(&abyss_driver);
39848 }
39849 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
39850 index 456f8bf..373e56d 100644
39851 --- a/drivers/net/tokenring/madgemc.c
39852 +++ b/drivers/net/tokenring/madgemc.c
39853 @@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver = {
39854
39855 static int __init madgemc_init (void)
39856 {
39857 - madgemc_netdev_ops = tms380tr_netdev_ops;
39858 - madgemc_netdev_ops.ndo_open = madgemc_open;
39859 - madgemc_netdev_ops.ndo_stop = madgemc_close;
39860 + pax_open_kernel();
39861 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39862 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
39863 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
39864 + pax_close_kernel();
39865
39866 return mca_register_driver (&madgemc_driver);
39867 }
39868 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
39869 index 16e8783..925bd49 100644
39870 --- a/drivers/net/tokenring/proteon.c
39871 +++ b/drivers/net/tokenring/proteon.c
39872 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
39873 struct platform_device *pdev;
39874 int i, num = 0, err = 0;
39875
39876 - proteon_netdev_ops = tms380tr_netdev_ops;
39877 - proteon_netdev_ops.ndo_open = proteon_open;
39878 - proteon_netdev_ops.ndo_stop = tms380tr_close;
39879 + pax_open_kernel();
39880 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39881 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
39882 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
39883 + pax_close_kernel();
39884
39885 err = platform_driver_register(&proteon_driver);
39886 if (err)
39887 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
39888 index 46db5c5..37c1536 100644
39889 --- a/drivers/net/tokenring/skisa.c
39890 +++ b/drivers/net/tokenring/skisa.c
39891 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
39892 struct platform_device *pdev;
39893 int i, num = 0, err = 0;
39894
39895 - sk_isa_netdev_ops = tms380tr_netdev_ops;
39896 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
39897 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
39898 + pax_open_kernel();
39899 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39900 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
39901 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
39902 + pax_close_kernel();
39903
39904 err = platform_driver_register(&sk_isa_driver);
39905 if (err)
39906 diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
39907 index 74e5ba4..5cf6bc9 100644
39908 --- a/drivers/net/tulip/de2104x.c
39909 +++ b/drivers/net/tulip/de2104x.c
39910 @@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
39911 struct de_srom_info_leaf *il;
39912 void *bufp;
39913
39914 + pax_track_stack();
39915 +
39916 /* download entire eeprom */
39917 for (i = 0; i < DE_EEPROM_WORDS; i++)
39918 ((__le16 *)ee_data)[i] =
39919 diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
39920 index a8349b7..90f9dfe 100644
39921 --- a/drivers/net/tulip/de4x5.c
39922 +++ b/drivers/net/tulip/de4x5.c
39923 @@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39924 for (i=0; i<ETH_ALEN; i++) {
39925 tmp.addr[i] = dev->dev_addr[i];
39926 }
39927 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39928 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39929 break;
39930
39931 case DE4X5_SET_HWADDR: /* Set the hardware address */
39932 @@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39933 spin_lock_irqsave(&lp->lock, flags);
39934 memcpy(&statbuf, &lp->pktStats, ioc->len);
39935 spin_unlock_irqrestore(&lp->lock, flags);
39936 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
39937 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
39938 return -EFAULT;
39939 break;
39940 }
39941 diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
39942 index 391acd3..56d11cd 100644
39943 --- a/drivers/net/tulip/eeprom.c
39944 +++ b/drivers/net/tulip/eeprom.c
39945 @@ -80,7 +80,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
39946 {NULL}};
39947
39948
39949 -static const char *block_name[] __devinitdata = {
39950 +static const char *block_name[] __devinitconst = {
39951 "21140 non-MII",
39952 "21140 MII PHY",
39953 "21142 Serial PHY",
39954 diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
39955 index b38d3b7..b1cff23 100644
39956 --- a/drivers/net/tulip/winbond-840.c
39957 +++ b/drivers/net/tulip/winbond-840.c
39958 @@ -235,7 +235,7 @@ struct pci_id_info {
39959 int drv_flags; /* Driver use, intended as capability flags. */
39960 };
39961
39962 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
39963 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
39964 { /* Sometime a Level-One switch card. */
39965 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
39966 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
39967 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
39968 index f450bc9..2b747c8 100644
39969 --- a/drivers/net/usb/hso.c
39970 +++ b/drivers/net/usb/hso.c
39971 @@ -71,7 +71,7 @@
39972 #include <asm/byteorder.h>
39973 #include <linux/serial_core.h>
39974 #include <linux/serial.h>
39975 -
39976 +#include <asm/local.h>
39977
39978 #define DRIVER_VERSION "1.2"
39979 #define MOD_AUTHOR "Option Wireless"
39980 @@ -258,7 +258,7 @@ struct hso_serial {
39981
39982 /* from usb_serial_port */
39983 struct tty_struct *tty;
39984 - int open_count;
39985 + local_t open_count;
39986 spinlock_t serial_lock;
39987
39988 int (*write_data) (struct hso_serial *serial);
39989 @@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
39990 struct urb *urb;
39991
39992 urb = serial->rx_urb[0];
39993 - if (serial->open_count > 0) {
39994 + if (local_read(&serial->open_count) > 0) {
39995 count = put_rxbuf_data(urb, serial);
39996 if (count == -1)
39997 return;
39998 @@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
39999 DUMP1(urb->transfer_buffer, urb->actual_length);
40000
40001 /* Anyone listening? */
40002 - if (serial->open_count == 0)
40003 + if (local_read(&serial->open_count) == 0)
40004 return;
40005
40006 if (status == 0) {
40007 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
40008 spin_unlock_irq(&serial->serial_lock);
40009
40010 /* check for port already opened, if not set the termios */
40011 - serial->open_count++;
40012 - if (serial->open_count == 1) {
40013 + if (local_inc_return(&serial->open_count) == 1) {
40014 tty->low_latency = 1;
40015 serial->rx_state = RX_IDLE;
40016 /* Force default termio settings */
40017 @@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
40018 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
40019 if (result) {
40020 hso_stop_serial_device(serial->parent);
40021 - serial->open_count--;
40022 + local_dec(&serial->open_count);
40023 kref_put(&serial->parent->ref, hso_serial_ref_free);
40024 }
40025 } else {
40026 @@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
40027
40028 /* reset the rts and dtr */
40029 /* do the actual close */
40030 - serial->open_count--;
40031 + local_dec(&serial->open_count);
40032
40033 - if (serial->open_count <= 0) {
40034 - serial->open_count = 0;
40035 + if (local_read(&serial->open_count) <= 0) {
40036 + local_set(&serial->open_count, 0);
40037 spin_lock_irq(&serial->serial_lock);
40038 if (serial->tty == tty) {
40039 serial->tty->driver_data = NULL;
40040 @@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
40041
40042 /* the actual setup */
40043 spin_lock_irqsave(&serial->serial_lock, flags);
40044 - if (serial->open_count)
40045 + if (local_read(&serial->open_count))
40046 _hso_serial_set_termios(tty, old);
40047 else
40048 tty->termios = old;
40049 @@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interface *iface)
40050 /* Start all serial ports */
40051 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
40052 if (serial_table[i] && (serial_table[i]->interface == iface)) {
40053 - if (dev2ser(serial_table[i])->open_count) {
40054 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
40055 result =
40056 hso_start_serial_device(serial_table[i], GFP_NOIO);
40057 hso_kick_transmit(dev2ser(serial_table[i]));
40058 diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
40059 index 3e94f0c..ffdd926 100644
40060 --- a/drivers/net/vxge/vxge-config.h
40061 +++ b/drivers/net/vxge/vxge-config.h
40062 @@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
40063 void (*link_down)(struct __vxge_hw_device *devh);
40064 void (*crit_err)(struct __vxge_hw_device *devh,
40065 enum vxge_hw_event type, u64 ext_data);
40066 -};
40067 +} __no_const;
40068
40069 /*
40070 * struct __vxge_hw_blockpool_entry - Block private data structure
40071 diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
40072 index 068d7a9..35293de 100644
40073 --- a/drivers/net/vxge/vxge-main.c
40074 +++ b/drivers/net/vxge/vxge-main.c
40075 @@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
40076 struct sk_buff *completed[NR_SKB_COMPLETED];
40077 int more;
40078
40079 + pax_track_stack();
40080 +
40081 do {
40082 more = 0;
40083 skb_ptr = completed;
40084 @@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
40085 u8 mtable[256] = {0}; /* CPU to vpath mapping */
40086 int index;
40087
40088 + pax_track_stack();
40089 +
40090 /*
40091 * Filling
40092 * - itable with bucket numbers
40093 diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
40094 index 461742b..81be42e 100644
40095 --- a/drivers/net/vxge/vxge-traffic.h
40096 +++ b/drivers/net/vxge/vxge-traffic.h
40097 @@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
40098 struct vxge_hw_mempool_dma *dma_object,
40099 u32 index,
40100 u32 is_last);
40101 -};
40102 +} __no_const;
40103
40104 void
40105 __vxge_hw_mempool_destroy(
40106 diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
40107 index cd8cb95..4153b79 100644
40108 --- a/drivers/net/wan/cycx_x25.c
40109 +++ b/drivers/net/wan/cycx_x25.c
40110 @@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned char *p, int len)
40111 unsigned char hex[1024],
40112 * phex = hex;
40113
40114 + pax_track_stack();
40115 +
40116 if (len >= (sizeof(hex) / 2))
40117 len = (sizeof(hex) / 2) - 1;
40118
40119 diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
40120 index aa9248f..a4e3c3b 100644
40121 --- a/drivers/net/wan/hdlc_x25.c
40122 +++ b/drivers/net/wan/hdlc_x25.c
40123 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
40124
40125 static int x25_open(struct net_device *dev)
40126 {
40127 - struct lapb_register_struct cb;
40128 + static struct lapb_register_struct cb = {
40129 + .connect_confirmation = x25_connected,
40130 + .connect_indication = x25_connected,
40131 + .disconnect_confirmation = x25_disconnected,
40132 + .disconnect_indication = x25_disconnected,
40133 + .data_indication = x25_data_indication,
40134 + .data_transmit = x25_data_transmit
40135 + };
40136 int result;
40137
40138 - cb.connect_confirmation = x25_connected;
40139 - cb.connect_indication = x25_connected;
40140 - cb.disconnect_confirmation = x25_disconnected;
40141 - cb.disconnect_indication = x25_disconnected;
40142 - cb.data_indication = x25_data_indication;
40143 - cb.data_transmit = x25_data_transmit;
40144 -
40145 result = lapb_register(dev, &cb);
40146 if (result != LAPB_OK)
40147 return result;
40148 diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
40149 index 5ad287c..783b020 100644
40150 --- a/drivers/net/wimax/i2400m/usb-fw.c
40151 +++ b/drivers/net/wimax/i2400m/usb-fw.c
40152 @@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
40153 int do_autopm = 1;
40154 DECLARE_COMPLETION_ONSTACK(notif_completion);
40155
40156 + pax_track_stack();
40157 +
40158 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
40159 i2400m, ack, ack_size);
40160 BUG_ON(_ack == i2400m->bm_ack_buf);
40161 diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
40162 index 6c26840..62c97c3 100644
40163 --- a/drivers/net/wireless/airo.c
40164 +++ b/drivers/net/wireless/airo.c
40165 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (struct airo_info *ai) {
40166 BSSListElement * loop_net;
40167 BSSListElement * tmp_net;
40168
40169 + pax_track_stack();
40170 +
40171 /* Blow away current list of scan results */
40172 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
40173 list_move_tail (&loop_net->list, &ai->network_free_list);
40174 @@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
40175 WepKeyRid wkr;
40176 int rc;
40177
40178 + pax_track_stack();
40179 +
40180 memset( &mySsid, 0, sizeof( mySsid ) );
40181 kfree (ai->flash);
40182 ai->flash = NULL;
40183 @@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct inode *inode,
40184 __le32 *vals = stats.vals;
40185 int len;
40186
40187 + pax_track_stack();
40188 +
40189 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
40190 return -ENOMEM;
40191 data = (struct proc_data *)file->private_data;
40192 @@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
40193 /* If doLoseSync is not 1, we won't do a Lose Sync */
40194 int doLoseSync = -1;
40195
40196 + pax_track_stack();
40197 +
40198 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
40199 return -ENOMEM;
40200 data = (struct proc_data *)file->private_data;
40201 @@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_device *dev,
40202 int i;
40203 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
40204
40205 + pax_track_stack();
40206 +
40207 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
40208 if (!qual)
40209 return -ENOMEM;
40210 @@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(struct airo_info *local)
40211 CapabilityRid cap_rid;
40212 __le32 *vals = stats_rid.vals;
40213
40214 + pax_track_stack();
40215 +
40216 /* Get stats out of the card */
40217 clear_bit(JOB_WSTATS, &local->jobs);
40218 if (local->power.event) {
40219 diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
40220 index 747508c..82e965d 100644
40221 --- a/drivers/net/wireless/ath/ath5k/debug.c
40222 +++ b/drivers/net/wireless/ath/ath5k/debug.c
40223 @@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
40224 unsigned int v;
40225 u64 tsf;
40226
40227 + pax_track_stack();
40228 +
40229 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
40230 len += snprintf(buf+len, sizeof(buf)-len,
40231 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
40232 @@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
40233 unsigned int len = 0;
40234 unsigned int i;
40235
40236 + pax_track_stack();
40237 +
40238 len += snprintf(buf+len, sizeof(buf)-len,
40239 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
40240
40241 diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
40242 index 2be4c22..593b1eb 100644
40243 --- a/drivers/net/wireless/ath/ath9k/debug.c
40244 +++ b/drivers/net/wireless/ath/ath9k/debug.c
40245 @@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
40246 char buf[512];
40247 unsigned int len = 0;
40248
40249 + pax_track_stack();
40250 +
40251 len += snprintf(buf + len, sizeof(buf) - len,
40252 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
40253 len += snprintf(buf + len, sizeof(buf) - len,
40254 @@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
40255 int i;
40256 u8 addr[ETH_ALEN];
40257
40258 + pax_track_stack();
40259 +
40260 len += snprintf(buf + len, sizeof(buf) - len,
40261 "primary: %s (%s chan=%d ht=%d)\n",
40262 wiphy_name(sc->pri_wiphy->hw->wiphy),
40263 diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
40264 index 80b19a4..dab3a45 100644
40265 --- a/drivers/net/wireless/b43/debugfs.c
40266 +++ b/drivers/net/wireless/b43/debugfs.c
40267 @@ -43,7 +43,7 @@ static struct dentry *rootdir;
40268 struct b43_debugfs_fops {
40269 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
40270 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
40271 - struct file_operations fops;
40272 + const struct file_operations fops;
40273 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
40274 size_t file_struct_offset;
40275 };
40276 diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c
40277 index 1f85ac5..c99b4b4 100644
40278 --- a/drivers/net/wireless/b43legacy/debugfs.c
40279 +++ b/drivers/net/wireless/b43legacy/debugfs.c
40280 @@ -44,7 +44,7 @@ static struct dentry *rootdir;
40281 struct b43legacy_debugfs_fops {
40282 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
40283 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
40284 - struct file_operations fops;
40285 + const struct file_operations fops;
40286 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
40287 size_t file_struct_offset;
40288 /* Take wl->irq_lock before calling read/write? */
40289 diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
40290 index 43102bf..3b569c3 100644
40291 --- a/drivers/net/wireless/ipw2x00/ipw2100.c
40292 +++ b/drivers/net/wireless/ipw2x00/ipw2100.c
40293 @@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
40294 int err;
40295 DECLARE_SSID_BUF(ssid);
40296
40297 + pax_track_stack();
40298 +
40299 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
40300
40301 if (ssid_len)
40302 @@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw2100_priv *priv,
40303 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
40304 int err;
40305
40306 + pax_track_stack();
40307 +
40308 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
40309 idx, keylen, len);
40310
40311 diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
40312 index 282b1f7..169f0cf 100644
40313 --- a/drivers/net/wireless/ipw2x00/libipw_rx.c
40314 +++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
40315 @@ -1566,6 +1566,8 @@ static void libipw_process_probe_response(struct libipw_device
40316 unsigned long flags;
40317 DECLARE_SSID_BUF(ssid);
40318
40319 + pax_track_stack();
40320 +
40321 LIBIPW_DEBUG_SCAN("'%s' (%pM"
40322 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
40323 print_ssid(ssid, info_element->data, info_element->len),
40324 diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
40325 index 950267a..80d5fd2 100644
40326 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c
40327 +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
40328 @@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib = {
40329 },
40330 };
40331
40332 -static struct iwl_ops iwl1000_ops = {
40333 +static const struct iwl_ops iwl1000_ops = {
40334 .ucode = &iwl5000_ucode,
40335 .lib = &iwl1000_lib,
40336 .hcmd = &iwl5000_hcmd,
40337 diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
40338 index 56bfcc3..b348020 100644
40339 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c
40340 +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
40341 @@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
40342 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
40343 };
40344
40345 -static struct iwl_ops iwl3945_ops = {
40346 +static const struct iwl_ops iwl3945_ops = {
40347 .ucode = &iwl3945_ucode,
40348 .lib = &iwl3945_lib,
40349 .hcmd = &iwl3945_hcmd,
40350 diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
40351 index 585b8d4..e142963 100644
40352 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c
40353 +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
40354 @@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib = {
40355 },
40356 };
40357
40358 -static struct iwl_ops iwl4965_ops = {
40359 +static const struct iwl_ops iwl4965_ops = {
40360 .ucode = &iwl4965_ucode,
40361 .lib = &iwl4965_lib,
40362 .hcmd = &iwl4965_hcmd,
40363 diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
40364 index 1f423f2..e37c192 100644
40365 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c
40366 +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
40367 @@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib = {
40368 },
40369 };
40370
40371 -struct iwl_ops iwl5000_ops = {
40372 +const struct iwl_ops iwl5000_ops = {
40373 .ucode = &iwl5000_ucode,
40374 .lib = &iwl5000_lib,
40375 .hcmd = &iwl5000_hcmd,
40376 .utils = &iwl5000_hcmd_utils,
40377 };
40378
40379 -static struct iwl_ops iwl5150_ops = {
40380 +static const struct iwl_ops iwl5150_ops = {
40381 .ucode = &iwl5000_ucode,
40382 .lib = &iwl5150_lib,
40383 .hcmd = &iwl5000_hcmd,
40384 diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
40385 index 1473452..f07d5e1 100644
40386 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c
40387 +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
40388 @@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000_hcmd_utils = {
40389 .calc_rssi = iwl5000_calc_rssi,
40390 };
40391
40392 -static struct iwl_ops iwl6000_ops = {
40393 +static const struct iwl_ops iwl6000_ops = {
40394 .ucode = &iwl5000_ucode,
40395 .lib = &iwl6000_lib,
40396 .hcmd = &iwl5000_hcmd,
40397 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
40398 index 1a3dfa2..b3e0a61 100644
40399 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
40400 +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
40401 @@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
40402 u8 active_index = 0;
40403 s32 tpt = 0;
40404
40405 + pax_track_stack();
40406 +
40407 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
40408
40409 if (!ieee80211_is_data(hdr->frame_control) ||
40410 @@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
40411 u8 valid_tx_ant = 0;
40412 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
40413
40414 + pax_track_stack();
40415 +
40416 /* Override starting rate (index 0) if needed for debug purposes */
40417 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
40418
40419 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
40420 index 0e56d78..6a3c107 100644
40421 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c
40422 +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
40423 @@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
40424 if (iwl_debug_level & IWL_DL_INFO)
40425 dev_printk(KERN_DEBUG, &(pdev->dev),
40426 "Disabling hw_scan\n");
40427 - iwl_hw_ops.hw_scan = NULL;
40428 + pax_open_kernel();
40429 + *(void **)&iwl_hw_ops.hw_scan = NULL;
40430 + pax_close_kernel();
40431 }
40432
40433 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
40434 diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
40435 index cbc6290..eb323d7 100644
40436 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h
40437 +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
40438 @@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv);
40439 #endif
40440
40441 #else
40442 -#define IWL_DEBUG(__priv, level, fmt, args...)
40443 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
40444 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
40445 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
40446 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
40447 void *p, u32 len)
40448 {}
40449 diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
40450 index a198bcf..8e68233 100644
40451 --- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
40452 +++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
40453 @@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
40454 int pos = 0;
40455 const size_t bufsz = sizeof(buf);
40456
40457 + pax_track_stack();
40458 +
40459 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
40460 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
40461 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
40462 @@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
40463 const size_t bufsz = sizeof(buf);
40464 ssize_t ret;
40465
40466 + pax_track_stack();
40467 +
40468 for (i = 0; i < AC_NUM; i++) {
40469 pos += scnprintf(buf + pos, bufsz - pos,
40470 "\tcw_min\tcw_max\taifsn\ttxop\n");
40471 diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
40472 index 3539ea4..b174bfa 100644
40473 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h
40474 +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
40475 @@ -68,7 +68,7 @@ struct iwl_tx_queue;
40476
40477 /* shared structures from iwl-5000.c */
40478 extern struct iwl_mod_params iwl50_mod_params;
40479 -extern struct iwl_ops iwl5000_ops;
40480 +extern const struct iwl_ops iwl5000_ops;
40481 extern struct iwl_ucode_ops iwl5000_ucode;
40482 extern struct iwl_lib_ops iwl5000_lib;
40483 extern struct iwl_hcmd_ops iwl5000_hcmd;
40484 diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
40485 index 619590d..69235ee 100644
40486 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
40487 +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
40488 @@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
40489 */
40490 if (iwl3945_mod_params.disable_hw_scan) {
40491 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
40492 - iwl3945_hw_ops.hw_scan = NULL;
40493 + pax_open_kernel();
40494 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
40495 + pax_close_kernel();
40496 }
40497
40498
40499 diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
40500 index 1465379..fe4d78b 100644
40501 --- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
40502 +++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
40503 @@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
40504 int buf_len = 512;
40505 size_t len = 0;
40506
40507 + pax_track_stack();
40508 +
40509 if (*ppos != 0)
40510 return 0;
40511 if (count < sizeof(buf))
40512 diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
40513 index 893a55c..7f66a50 100644
40514 --- a/drivers/net/wireless/libertas/debugfs.c
40515 +++ b/drivers/net/wireless/libertas/debugfs.c
40516 @@ -708,7 +708,7 @@ out_unlock:
40517 struct lbs_debugfs_files {
40518 const char *name;
40519 int perm;
40520 - struct file_operations fops;
40521 + const struct file_operations fops;
40522 };
40523
40524 static const struct lbs_debugfs_files debugfs_files[] = {
40525 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
40526 index 2ecbedb..42704f0 100644
40527 --- a/drivers/net/wireless/rndis_wlan.c
40528 +++ b/drivers/net/wireless/rndis_wlan.c
40529 @@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
40530
40531 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
40532
40533 - if (rts_threshold < 0 || rts_threshold > 2347)
40534 + if (rts_threshold > 2347)
40535 rts_threshold = 2347;
40536
40537 tmp = cpu_to_le32(rts_threshold);
40538 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
40539 index 334ccd6..47f8944 100644
40540 --- a/drivers/oprofile/buffer_sync.c
40541 +++ b/drivers/oprofile/buffer_sync.c
40542 @@ -342,7 +342,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
40543 if (cookie == NO_COOKIE)
40544 offset = pc;
40545 if (cookie == INVALID_COOKIE) {
40546 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
40547 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
40548 offset = pc;
40549 }
40550 if (cookie != last_cookie) {
40551 @@ -386,14 +386,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
40552 /* add userspace sample */
40553
40554 if (!mm) {
40555 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
40556 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
40557 return 0;
40558 }
40559
40560 cookie = lookup_dcookie(mm, s->eip, &offset);
40561
40562 if (cookie == INVALID_COOKIE) {
40563 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
40564 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
40565 return 0;
40566 }
40567
40568 @@ -562,7 +562,7 @@ void sync_buffer(int cpu)
40569 /* ignore backtraces if failed to add a sample */
40570 if (state == sb_bt_start) {
40571 state = sb_bt_ignore;
40572 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
40573 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
40574 }
40575 }
40576 release_mm(mm);
40577 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
40578 index 5df60a6..72f5c1c 100644
40579 --- a/drivers/oprofile/event_buffer.c
40580 +++ b/drivers/oprofile/event_buffer.c
40581 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
40582 }
40583
40584 if (buffer_pos == buffer_size) {
40585 - atomic_inc(&oprofile_stats.event_lost_overflow);
40586 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
40587 return;
40588 }
40589
40590 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
40591 index dc8a042..fe5f315 100644
40592 --- a/drivers/oprofile/oprof.c
40593 +++ b/drivers/oprofile/oprof.c
40594 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
40595 if (oprofile_ops.switch_events())
40596 return;
40597
40598 - atomic_inc(&oprofile_stats.multiplex_counter);
40599 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
40600 start_switch_worker();
40601 }
40602
40603 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
40604 index 61689e8..387f7f8 100644
40605 --- a/drivers/oprofile/oprofile_stats.c
40606 +++ b/drivers/oprofile/oprofile_stats.c
40607 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
40608 cpu_buf->sample_invalid_eip = 0;
40609 }
40610
40611 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
40612 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
40613 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
40614 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
40615 - atomic_set(&oprofile_stats.multiplex_counter, 0);
40616 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
40617 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
40618 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
40619 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
40620 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
40621 }
40622
40623
40624 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
40625 index 0b54e46..a37c527 100644
40626 --- a/drivers/oprofile/oprofile_stats.h
40627 +++ b/drivers/oprofile/oprofile_stats.h
40628 @@ -13,11 +13,11 @@
40629 #include <asm/atomic.h>
40630
40631 struct oprofile_stat_struct {
40632 - atomic_t sample_lost_no_mm;
40633 - atomic_t sample_lost_no_mapping;
40634 - atomic_t bt_lost_no_mapping;
40635 - atomic_t event_lost_overflow;
40636 - atomic_t multiplex_counter;
40637 + atomic_unchecked_t sample_lost_no_mm;
40638 + atomic_unchecked_t sample_lost_no_mapping;
40639 + atomic_unchecked_t bt_lost_no_mapping;
40640 + atomic_unchecked_t event_lost_overflow;
40641 + atomic_unchecked_t multiplex_counter;
40642 };
40643
40644 extern struct oprofile_stat_struct oprofile_stats;
40645 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
40646 index 2766a6d..80c77e2 100644
40647 --- a/drivers/oprofile/oprofilefs.c
40648 +++ b/drivers/oprofile/oprofilefs.c
40649 @@ -187,7 +187,7 @@ static const struct file_operations atomic_ro_fops = {
40650
40651
40652 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
40653 - char const *name, atomic_t *val)
40654 + char const *name, atomic_unchecked_t *val)
40655 {
40656 struct dentry *d = __oprofilefs_create_file(sb, root, name,
40657 &atomic_ro_fops, 0444);
40658 diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
40659 index 13a64bc..ad62835 100644
40660 --- a/drivers/parisc/pdc_stable.c
40661 +++ b/drivers/parisc/pdc_stable.c
40662 @@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj, struct attribute *attr,
40663 return ret;
40664 }
40665
40666 -static struct sysfs_ops pdcspath_attr_ops = {
40667 +static const struct sysfs_ops pdcspath_attr_ops = {
40668 .show = pdcspath_attr_show,
40669 .store = pdcspath_attr_store,
40670 };
40671 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
40672 index 8eefe56..40751a7 100644
40673 --- a/drivers/parport/procfs.c
40674 +++ b/drivers/parport/procfs.c
40675 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
40676
40677 *ppos += len;
40678
40679 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
40680 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
40681 }
40682
40683 #ifdef CONFIG_PARPORT_1284
40684 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
40685
40686 *ppos += len;
40687
40688 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
40689 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
40690 }
40691 #endif /* IEEE1284.3 support. */
40692
40693 diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
40694 index 73e7d8e..c80f3d2 100644
40695 --- a/drivers/pci/hotplug/acpiphp_glue.c
40696 +++ b/drivers/pci/hotplug/acpiphp_glue.c
40697 @@ -111,7 +111,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
40698 }
40699
40700
40701 -static struct acpi_dock_ops acpiphp_dock_ops = {
40702 +static const struct acpi_dock_ops acpiphp_dock_ops = {
40703 .handler = handle_hotplug_event_func,
40704 };
40705
40706 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
40707 index 9fff878..ad0ad53 100644
40708 --- a/drivers/pci/hotplug/cpci_hotplug.h
40709 +++ b/drivers/pci/hotplug/cpci_hotplug.h
40710 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
40711 int (*hardware_test) (struct slot* slot, u32 value);
40712 u8 (*get_power) (struct slot* slot);
40713 int (*set_power) (struct slot* slot, int value);
40714 -};
40715 +} __no_const;
40716
40717 struct cpci_hp_controller {
40718 unsigned int irq;
40719 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
40720 index 76ba8a1..20ca857 100644
40721 --- a/drivers/pci/hotplug/cpqphp_nvram.c
40722 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
40723 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
40724
40725 void compaq_nvram_init (void __iomem *rom_start)
40726 {
40727 +
40728 +#ifndef CONFIG_PAX_KERNEXEC
40729 if (rom_start) {
40730 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
40731 }
40732 +#endif
40733 +
40734 dbg("int15 entry = %p\n", compaq_int15_entry_point);
40735
40736 /* initialize our int15 lock */
40737 diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
40738 index 6151389..0a894ef 100644
40739 --- a/drivers/pci/hotplug/fakephp.c
40740 +++ b/drivers/pci/hotplug/fakephp.c
40741 @@ -73,7 +73,7 @@ static void legacy_release(struct kobject *kobj)
40742 }
40743
40744 static struct kobj_type legacy_ktype = {
40745 - .sysfs_ops = &(struct sysfs_ops){
40746 + .sysfs_ops = &(const struct sysfs_ops){
40747 .store = legacy_store, .show = legacy_show
40748 },
40749 .release = &legacy_release,
40750 diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
40751 index 5b680df..fe05b7e 100644
40752 --- a/drivers/pci/intel-iommu.c
40753 +++ b/drivers/pci/intel-iommu.c
40754 @@ -2643,7 +2643,7 @@ error:
40755 return 0;
40756 }
40757
40758 -static dma_addr_t intel_map_page(struct device *dev, struct page *page,
40759 +dma_addr_t intel_map_page(struct device *dev, struct page *page,
40760 unsigned long offset, size_t size,
40761 enum dma_data_direction dir,
40762 struct dma_attrs *attrs)
40763 @@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
40764 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
40765 }
40766
40767 -static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
40768 +void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
40769 size_t size, enum dma_data_direction dir,
40770 struct dma_attrs *attrs)
40771 {
40772 @@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
40773 }
40774 }
40775
40776 -static void *intel_alloc_coherent(struct device *hwdev, size_t size,
40777 +void *intel_alloc_coherent(struct device *hwdev, size_t size,
40778 dma_addr_t *dma_handle, gfp_t flags)
40779 {
40780 void *vaddr;
40781 @@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
40782 return NULL;
40783 }
40784
40785 -static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40786 +void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40787 dma_addr_t dma_handle)
40788 {
40789 int order;
40790 @@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40791 free_pages((unsigned long)vaddr, order);
40792 }
40793
40794 -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
40795 +void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
40796 int nelems, enum dma_data_direction dir,
40797 struct dma_attrs *attrs)
40798 {
40799 @@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
40800 return nelems;
40801 }
40802
40803 -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
40804 +int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
40805 enum dma_data_direction dir, struct dma_attrs *attrs)
40806 {
40807 int i;
40808 @@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
40809 return nelems;
40810 }
40811
40812 -static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
40813 +int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
40814 {
40815 return !dma_addr;
40816 }
40817
40818 -struct dma_map_ops intel_dma_ops = {
40819 +const struct dma_map_ops intel_dma_ops = {
40820 .alloc_coherent = intel_alloc_coherent,
40821 .free_coherent = intel_free_coherent,
40822 .map_sg = intel_map_sg,
40823 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
40824 index 5b7056c..607bc94 100644
40825 --- a/drivers/pci/pcie/aspm.c
40826 +++ b/drivers/pci/pcie/aspm.c
40827 @@ -27,9 +27,9 @@
40828 #define MODULE_PARAM_PREFIX "pcie_aspm."
40829
40830 /* Note: those are not register definitions */
40831 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
40832 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
40833 -#define ASPM_STATE_L1 (4) /* L1 state */
40834 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
40835 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
40836 +#define ASPM_STATE_L1 (4U) /* L1 state */
40837 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
40838 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
40839
40840 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
40841 index 8105e32..ca10419 100644
40842 --- a/drivers/pci/probe.c
40843 +++ b/drivers/pci/probe.c
40844 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
40845 return ret;
40846 }
40847
40848 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
40849 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
40850 struct device_attribute *attr,
40851 char *buf)
40852 {
40853 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
40854 }
40855
40856 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
40857 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
40858 struct device_attribute *attr,
40859 char *buf)
40860 {
40861 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
40862 index a03ad8c..024b0da 100644
40863 --- a/drivers/pci/proc.c
40864 +++ b/drivers/pci/proc.c
40865 @@ -480,7 +480,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
40866 static int __init pci_proc_init(void)
40867 {
40868 struct pci_dev *dev = NULL;
40869 +
40870 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40871 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40872 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
40873 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40874 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40875 +#endif
40876 +#else
40877 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
40878 +#endif
40879 proc_create("devices", 0, proc_bus_pci_dir,
40880 &proc_bus_pci_dev_operations);
40881 proc_initialized = 1;
40882 diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
40883 index 8c02b6c..5584d8e 100644
40884 --- a/drivers/pci/slot.c
40885 +++ b/drivers/pci/slot.c
40886 @@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struct kobject *kobj,
40887 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
40888 }
40889
40890 -static struct sysfs_ops pci_slot_sysfs_ops = {
40891 +static const struct sysfs_ops pci_slot_sysfs_ops = {
40892 .show = pci_slot_attr_show,
40893 .store = pci_slot_attr_store,
40894 };
40895 diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
40896 index 30cf71d2..50938f1 100644
40897 --- a/drivers/pcmcia/pcmcia_ioctl.c
40898 +++ b/drivers/pcmcia/pcmcia_ioctl.c
40899 @@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode, struct file * file,
40900 return -EFAULT;
40901 }
40902 }
40903 - buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
40904 + buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
40905 if (!buf)
40906 return -ENOMEM;
40907
40908 diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
40909 index 52183c4..b224c69 100644
40910 --- a/drivers/platform/x86/acer-wmi.c
40911 +++ b/drivers/platform/x86/acer-wmi.c
40912 @@ -918,7 +918,7 @@ static int update_bl_status(struct backlight_device *bd)
40913 return 0;
40914 }
40915
40916 -static struct backlight_ops acer_bl_ops = {
40917 +static const struct backlight_ops acer_bl_ops = {
40918 .get_brightness = read_brightness,
40919 .update_status = update_bl_status,
40920 };
40921 diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
40922 index 767cb61..a87380b 100644
40923 --- a/drivers/platform/x86/asus-laptop.c
40924 +++ b/drivers/platform/x86/asus-laptop.c
40925 @@ -250,7 +250,7 @@ static struct backlight_device *asus_backlight_device;
40926 */
40927 static int read_brightness(struct backlight_device *bd);
40928 static int update_bl_status(struct backlight_device *bd);
40929 -static struct backlight_ops asusbl_ops = {
40930 +static const struct backlight_ops asusbl_ops = {
40931 .get_brightness = read_brightness,
40932 .update_status = update_bl_status,
40933 };
40934 diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
40935 index d66c07a..a4abaac 100644
40936 --- a/drivers/platform/x86/asus_acpi.c
40937 +++ b/drivers/platform/x86/asus_acpi.c
40938 @@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type)
40939 return 0;
40940 }
40941
40942 -static struct backlight_ops asus_backlight_data = {
40943 +static const struct backlight_ops asus_backlight_data = {
40944 .get_brightness = read_brightness,
40945 .update_status = set_brightness_status,
40946 };
40947 diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
40948 index 11003bb..550ff1b 100644
40949 --- a/drivers/platform/x86/compal-laptop.c
40950 +++ b/drivers/platform/x86/compal-laptop.c
40951 @@ -163,7 +163,7 @@ static int bl_update_status(struct backlight_device *b)
40952 return set_lcd_level(b->props.brightness);
40953 }
40954
40955 -static struct backlight_ops compalbl_ops = {
40956 +static const struct backlight_ops compalbl_ops = {
40957 .get_brightness = bl_get_brightness,
40958 .update_status = bl_update_status,
40959 };
40960 diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
40961 index 07a74da..9dc99fa 100644
40962 --- a/drivers/platform/x86/dell-laptop.c
40963 +++ b/drivers/platform/x86/dell-laptop.c
40964 @@ -318,7 +318,7 @@ static int dell_get_intensity(struct backlight_device *bd)
40965 return buffer.output[1];
40966 }
40967
40968 -static struct backlight_ops dell_ops = {
40969 +static const struct backlight_ops dell_ops = {
40970 .get_brightness = dell_get_intensity,
40971 .update_status = dell_send_intensity,
40972 };
40973 diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
40974 index c533b1c..5c81f22 100644
40975 --- a/drivers/platform/x86/eeepc-laptop.c
40976 +++ b/drivers/platform/x86/eeepc-laptop.c
40977 @@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device;
40978 */
40979 static int read_brightness(struct backlight_device *bd);
40980 static int update_bl_status(struct backlight_device *bd);
40981 -static struct backlight_ops eeepcbl_ops = {
40982 +static const struct backlight_ops eeepcbl_ops = {
40983 .get_brightness = read_brightness,
40984 .update_status = update_bl_status,
40985 };
40986 diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
40987 index bcd4ba8..a249b35 100644
40988 --- a/drivers/platform/x86/fujitsu-laptop.c
40989 +++ b/drivers/platform/x86/fujitsu-laptop.c
40990 @@ -436,7 +436,7 @@ static int bl_update_status(struct backlight_device *b)
40991 return ret;
40992 }
40993
40994 -static struct backlight_ops fujitsubl_ops = {
40995 +static const struct backlight_ops fujitsubl_ops = {
40996 .get_brightness = bl_get_brightness,
40997 .update_status = bl_update_status,
40998 };
40999 diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
41000 index 759763d..1093ba2 100644
41001 --- a/drivers/platform/x86/msi-laptop.c
41002 +++ b/drivers/platform/x86/msi-laptop.c
41003 @@ -161,7 +161,7 @@ static int bl_update_status(struct backlight_device *b)
41004 return set_lcd_level(b->props.brightness);
41005 }
41006
41007 -static struct backlight_ops msibl_ops = {
41008 +static const struct backlight_ops msibl_ops = {
41009 .get_brightness = bl_get_brightness,
41010 .update_status = bl_update_status,
41011 };
41012 diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
41013 index fe7cf01..9012d8d 100644
41014 --- a/drivers/platform/x86/panasonic-laptop.c
41015 +++ b/drivers/platform/x86/panasonic-laptop.c
41016 @@ -352,7 +352,7 @@ static int bl_set_status(struct backlight_device *bd)
41017 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
41018 }
41019
41020 -static struct backlight_ops pcc_backlight_ops = {
41021 +static const struct backlight_ops pcc_backlight_ops = {
41022 .get_brightness = bl_get,
41023 .update_status = bl_set_status,
41024 };
41025 diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
41026 index a2a742c..b37e25e 100644
41027 --- a/drivers/platform/x86/sony-laptop.c
41028 +++ b/drivers/platform/x86/sony-laptop.c
41029 @@ -850,7 +850,7 @@ static int sony_backlight_get_brightness(struct backlight_device *bd)
41030 }
41031
41032 static struct backlight_device *sony_backlight_device;
41033 -static struct backlight_ops sony_backlight_ops = {
41034 +static const struct backlight_ops sony_backlight_ops = {
41035 .update_status = sony_backlight_update_status,
41036 .get_brightness = sony_backlight_get_brightness,
41037 };
41038 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
41039 index 68271ae..5e8fb10 100644
41040 --- a/drivers/platform/x86/thinkpad_acpi.c
41041 +++ b/drivers/platform/x86/thinkpad_acpi.c
41042 @@ -2139,7 +2139,7 @@ static int hotkey_mask_get(void)
41043 return 0;
41044 }
41045
41046 -void static hotkey_mask_warn_incomplete_mask(void)
41047 +static void hotkey_mask_warn_incomplete_mask(void)
41048 {
41049 /* log only what the user can fix... */
41050 const u32 wantedmask = hotkey_driver_mask &
41051 @@ -6125,7 +6125,7 @@ static void tpacpi_brightness_notify_change(void)
41052 BACKLIGHT_UPDATE_HOTKEY);
41053 }
41054
41055 -static struct backlight_ops ibm_backlight_data = {
41056 +static const struct backlight_ops ibm_backlight_data = {
41057 .get_brightness = brightness_get,
41058 .update_status = brightness_update_status,
41059 };
41060 diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
41061 index 51c0a8b..0786629 100644
41062 --- a/drivers/platform/x86/toshiba_acpi.c
41063 +++ b/drivers/platform/x86/toshiba_acpi.c
41064 @@ -671,7 +671,7 @@ static acpi_status remove_device(void)
41065 return AE_OK;
41066 }
41067
41068 -static struct backlight_ops toshiba_backlight_data = {
41069 +static const struct backlight_ops toshiba_backlight_data = {
41070 .get_brightness = get_lcd,
41071 .update_status = set_lcd_status,
41072 };
41073 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
41074 index fc83783c..cf370d7 100644
41075 --- a/drivers/pnp/pnpbios/bioscalls.c
41076 +++ b/drivers/pnp/pnpbios/bioscalls.c
41077 @@ -60,7 +60,7 @@ do { \
41078 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
41079 } while(0)
41080
41081 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
41082 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
41083 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
41084
41085 /*
41086 @@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
41087
41088 cpu = get_cpu();
41089 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
41090 +
41091 + pax_open_kernel();
41092 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
41093 + pax_close_kernel();
41094
41095 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
41096 spin_lock_irqsave(&pnp_bios_lock, flags);
41097 @@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
41098 :"memory");
41099 spin_unlock_irqrestore(&pnp_bios_lock, flags);
41100
41101 + pax_open_kernel();
41102 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
41103 + pax_close_kernel();
41104 +
41105 put_cpu();
41106
41107 /* If we get here and this is set then the PnP BIOS faulted on us. */
41108 @@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
41109 return status;
41110 }
41111
41112 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
41113 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
41114 {
41115 int i;
41116
41117 @@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
41118 pnp_bios_callpoint.offset = header->fields.pm16offset;
41119 pnp_bios_callpoint.segment = PNP_CS16;
41120
41121 + pax_open_kernel();
41122 +
41123 for_each_possible_cpu(i) {
41124 struct desc_struct *gdt = get_cpu_gdt_table(i);
41125 if (!gdt)
41126 @@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
41127 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
41128 (unsigned long)__va(header->fields.pm16dseg));
41129 }
41130 +
41131 + pax_close_kernel();
41132 }
41133 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
41134 index ba97654..66b99d4 100644
41135 --- a/drivers/pnp/resource.c
41136 +++ b/drivers/pnp/resource.c
41137 @@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
41138 return 1;
41139
41140 /* check if the resource is valid */
41141 - if (*irq < 0 || *irq > 15)
41142 + if (*irq > 15)
41143 return 0;
41144
41145 /* check if the resource is reserved */
41146 @@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
41147 return 1;
41148
41149 /* check if the resource is valid */
41150 - if (*dma < 0 || *dma == 4 || *dma > 7)
41151 + if (*dma == 4 || *dma > 7)
41152 return 0;
41153
41154 /* check if the resource is reserved */
41155 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
41156 index 62bb981..24a2dc9 100644
41157 --- a/drivers/power/bq27x00_battery.c
41158 +++ b/drivers/power/bq27x00_battery.c
41159 @@ -44,7 +44,7 @@ struct bq27x00_device_info;
41160 struct bq27x00_access_methods {
41161 int (*read)(u8 reg, int *rt_value, int b_single,
41162 struct bq27x00_device_info *di);
41163 -};
41164 +} __no_const;
41165
41166 struct bq27x00_device_info {
41167 struct device *dev;
41168 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
41169 index 62227cd..b5b538b 100644
41170 --- a/drivers/rtc/rtc-dev.c
41171 +++ b/drivers/rtc/rtc-dev.c
41172 @@ -14,6 +14,7 @@
41173 #include <linux/module.h>
41174 #include <linux/rtc.h>
41175 #include <linux/sched.h>
41176 +#include <linux/grsecurity.h>
41177 #include "rtc-core.h"
41178
41179 static dev_t rtc_devt;
41180 @@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *file,
41181 if (copy_from_user(&tm, uarg, sizeof(tm)))
41182 return -EFAULT;
41183
41184 + gr_log_timechange();
41185 +
41186 return rtc_set_time(rtc, &tm);
41187
41188 case RTC_PIE_ON:
41189 diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
41190 index 968e3c7..fbc637a 100644
41191 --- a/drivers/s390/cio/qdio_perf.c
41192 +++ b/drivers/s390/cio/qdio_perf.c
41193 @@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_pde;
41194 static int qdio_perf_proc_show(struct seq_file *m, void *v)
41195 {
41196 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
41197 - (long)atomic_long_read(&perf_stats.qdio_int));
41198 + (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
41199 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
41200 - (long)atomic_long_read(&perf_stats.pci_int));
41201 + (long)atomic_long_read_unchecked(&perf_stats.pci_int));
41202 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
41203 - (long)atomic_long_read(&perf_stats.thin_int));
41204 + (long)atomic_long_read_unchecked(&perf_stats.thin_int));
41205 seq_printf(m, "\n");
41206 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
41207 - (long)atomic_long_read(&perf_stats.tasklet_inbound));
41208 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
41209 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
41210 - (long)atomic_long_read(&perf_stats.tasklet_outbound));
41211 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
41212 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
41213 - (long)atomic_long_read(&perf_stats.tasklet_thinint),
41214 - (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
41215 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
41216 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
41217 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
41218 - (long)atomic_long_read(&perf_stats.thinint_inbound),
41219 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
41220 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
41221 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
41222 seq_printf(m, "\n");
41223 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
41224 - (long)atomic_long_read(&perf_stats.siga_in));
41225 + (long)atomic_long_read_unchecked(&perf_stats.siga_in));
41226 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
41227 - (long)atomic_long_read(&perf_stats.siga_out));
41228 + (long)atomic_long_read_unchecked(&perf_stats.siga_out));
41229 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
41230 - (long)atomic_long_read(&perf_stats.siga_sync));
41231 + (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
41232 seq_printf(m, "\n");
41233 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
41234 - (long)atomic_long_read(&perf_stats.inbound_handler));
41235 + (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
41236 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
41237 - (long)atomic_long_read(&perf_stats.outbound_handler));
41238 + (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
41239 seq_printf(m, "\n");
41240 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
41241 - (long)atomic_long_read(&perf_stats.fast_requeue));
41242 + (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
41243 seq_printf(m, "Number of outbound target full condition\t: %li\n",
41244 - (long)atomic_long_read(&perf_stats.outbound_target_full));
41245 + (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
41246 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
41247 - (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
41248 + (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
41249 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
41250 - (long)atomic_long_read(&perf_stats.debug_stop_polling));
41251 + (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
41252 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
41253 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
41254 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
41255 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
41256 - (long)atomic_long_read(&perf_stats.debug_eqbs_all),
41257 - (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
41258 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
41259 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
41260 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
41261 - (long)atomic_long_read(&perf_stats.debug_sqbs_all),
41262 - (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
41263 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
41264 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
41265 seq_printf(m, "\n");
41266 return 0;
41267 }
41268 diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
41269 index ff4504c..b3604c3 100644
41270 --- a/drivers/s390/cio/qdio_perf.h
41271 +++ b/drivers/s390/cio/qdio_perf.h
41272 @@ -13,46 +13,46 @@
41273
41274 struct qdio_perf_stats {
41275 /* interrupt handler calls */
41276 - atomic_long_t qdio_int;
41277 - atomic_long_t pci_int;
41278 - atomic_long_t thin_int;
41279 + atomic_long_unchecked_t qdio_int;
41280 + atomic_long_unchecked_t pci_int;
41281 + atomic_long_unchecked_t thin_int;
41282
41283 /* tasklet runs */
41284 - atomic_long_t tasklet_inbound;
41285 - atomic_long_t tasklet_outbound;
41286 - atomic_long_t tasklet_thinint;
41287 - atomic_long_t tasklet_thinint_loop;
41288 - atomic_long_t thinint_inbound;
41289 - atomic_long_t thinint_inbound_loop;
41290 - atomic_long_t thinint_inbound_loop2;
41291 + atomic_long_unchecked_t tasklet_inbound;
41292 + atomic_long_unchecked_t tasklet_outbound;
41293 + atomic_long_unchecked_t tasklet_thinint;
41294 + atomic_long_unchecked_t tasklet_thinint_loop;
41295 + atomic_long_unchecked_t thinint_inbound;
41296 + atomic_long_unchecked_t thinint_inbound_loop;
41297 + atomic_long_unchecked_t thinint_inbound_loop2;
41298
41299 /* signal adapter calls */
41300 - atomic_long_t siga_out;
41301 - atomic_long_t siga_in;
41302 - atomic_long_t siga_sync;
41303 + atomic_long_unchecked_t siga_out;
41304 + atomic_long_unchecked_t siga_in;
41305 + atomic_long_unchecked_t siga_sync;
41306
41307 /* misc */
41308 - atomic_long_t inbound_handler;
41309 - atomic_long_t outbound_handler;
41310 - atomic_long_t fast_requeue;
41311 - atomic_long_t outbound_target_full;
41312 + atomic_long_unchecked_t inbound_handler;
41313 + atomic_long_unchecked_t outbound_handler;
41314 + atomic_long_unchecked_t fast_requeue;
41315 + atomic_long_unchecked_t outbound_target_full;
41316
41317 /* for debugging */
41318 - atomic_long_t debug_tl_out_timer;
41319 - atomic_long_t debug_stop_polling;
41320 - atomic_long_t debug_eqbs_all;
41321 - atomic_long_t debug_eqbs_incomplete;
41322 - atomic_long_t debug_sqbs_all;
41323 - atomic_long_t debug_sqbs_incomplete;
41324 + atomic_long_unchecked_t debug_tl_out_timer;
41325 + atomic_long_unchecked_t debug_stop_polling;
41326 + atomic_long_unchecked_t debug_eqbs_all;
41327 + atomic_long_unchecked_t debug_eqbs_incomplete;
41328 + atomic_long_unchecked_t debug_sqbs_all;
41329 + atomic_long_unchecked_t debug_sqbs_incomplete;
41330 };
41331
41332 extern struct qdio_perf_stats perf_stats;
41333 extern int qdio_performance_stats;
41334
41335 -static inline void qdio_perf_stat_inc(atomic_long_t *count)
41336 +static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
41337 {
41338 if (qdio_performance_stats)
41339 - atomic_long_inc(count);
41340 + atomic_long_inc_unchecked(count);
41341 }
41342
41343 int qdio_setup_perf_stats(void);
41344 diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
41345 index 1ddcf40..a85f062 100644
41346 --- a/drivers/scsi/BusLogic.c
41347 +++ b/drivers/scsi/BusLogic.c
41348 @@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
41349 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
41350 *PrototypeHostAdapter)
41351 {
41352 + pax_track_stack();
41353 +
41354 /*
41355 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
41356 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
41357 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
41358 index cdbdec9..b7d560b 100644
41359 --- a/drivers/scsi/aacraid/aacraid.h
41360 +++ b/drivers/scsi/aacraid/aacraid.h
41361 @@ -471,7 +471,7 @@ struct adapter_ops
41362 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
41363 /* Administrative operations */
41364 int (*adapter_comm)(struct aac_dev * dev, int comm);
41365 -};
41366 +} __no_const;
41367
41368 /*
41369 * Define which interrupt handler needs to be installed
41370 diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
41371 index a5b8e7b..a6a0e43 100644
41372 --- a/drivers/scsi/aacraid/commctrl.c
41373 +++ b/drivers/scsi/aacraid/commctrl.c
41374 @@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
41375 u32 actual_fibsize64, actual_fibsize = 0;
41376 int i;
41377
41378 + pax_track_stack();
41379
41380 if (dev->in_reset) {
41381 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
41382 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
41383 index 9b97c3e..f099725 100644
41384 --- a/drivers/scsi/aacraid/linit.c
41385 +++ b/drivers/scsi/aacraid/linit.c
41386 @@ -91,7 +91,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
41387 #elif defined(__devinitconst)
41388 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
41389 #else
41390 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
41391 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
41392 #endif
41393 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
41394 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
41395 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
41396 index 996f722..9127845 100644
41397 --- a/drivers/scsi/aic94xx/aic94xx_init.c
41398 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
41399 @@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
41400 flash_error_table[i].reason);
41401 }
41402
41403 -static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
41404 +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
41405 asd_show_update_bios, asd_store_update_bios);
41406
41407 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
41408 @@ -1011,7 +1011,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
41409 .lldd_control_phy = asd_control_phy,
41410 };
41411
41412 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
41413 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
41414 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
41415 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
41416 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
41417 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
41418 index 58efd4b..cb48dc7 100644
41419 --- a/drivers/scsi/bfa/bfa_ioc.h
41420 +++ b/drivers/scsi/bfa/bfa_ioc.h
41421 @@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
41422 bfa_ioc_disable_cbfn_t disable_cbfn;
41423 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
41424 bfa_ioc_reset_cbfn_t reset_cbfn;
41425 -};
41426 +} __no_const;
41427
41428 /**
41429 * Heartbeat failure notification queue element.
41430 diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
41431 index 7ad177e..5503586 100644
41432 --- a/drivers/scsi/bfa/bfa_iocfc.h
41433 +++ b/drivers/scsi/bfa/bfa_iocfc.h
41434 @@ -61,7 +61,7 @@ struct bfa_hwif_s {
41435 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
41436 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
41437 u32 *nvecs, u32 *maxvec);
41438 -};
41439 +} __no_const;
41440 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
41441
41442 struct bfa_iocfc_s {
41443 diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
41444 index 4967643..cbec06b 100644
41445 --- a/drivers/scsi/dpt_i2o.c
41446 +++ b/drivers/scsi/dpt_i2o.c
41447 @@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
41448 dma_addr_t addr;
41449 ulong flags = 0;
41450
41451 + pax_track_stack();
41452 +
41453 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
41454 // get user msg size in u32s
41455 if(get_user(size, &user_msg[0])){
41456 @@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
41457 s32 rcode;
41458 dma_addr_t addr;
41459
41460 + pax_track_stack();
41461 +
41462 memset(msg, 0 , sizeof(msg));
41463 len = scsi_bufflen(cmd);
41464 direction = 0x00000000;
41465 diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
41466 index c7076ce..e20c67c 100644
41467 --- a/drivers/scsi/eata.c
41468 +++ b/drivers/scsi/eata.c
41469 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long port_base, unsigned int j,
41470 struct hostdata *ha;
41471 char name[16];
41472
41473 + pax_track_stack();
41474 +
41475 sprintf(name, "%s%d", driver_name, j);
41476
41477 if (!request_region(port_base, REGION_SIZE, driver_name)) {
41478 diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
41479 index 11ae5c9..891daec 100644
41480 --- a/drivers/scsi/fcoe/libfcoe.c
41481 +++ b/drivers/scsi/fcoe/libfcoe.c
41482 @@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
41483 size_t rlen;
41484 size_t dlen;
41485
41486 + pax_track_stack();
41487 +
41488 fiph = (struct fip_header *)skb->data;
41489 sub = fiph->fip_subcode;
41490 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
41491 diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
41492 index 71c7bbe..e93088a 100644
41493 --- a/drivers/scsi/fnic/fnic_main.c
41494 +++ b/drivers/scsi/fnic/fnic_main.c
41495 @@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
41496 /* Start local port initiatialization */
41497
41498 lp->link_up = 0;
41499 - lp->tt = fnic_transport_template;
41500 + memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
41501
41502 lp->max_retry_count = fnic->config.flogi_retries;
41503 lp->max_rport_retry_count = fnic->config.plogi_retries;
41504 diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
41505 index bb96d74..9ec3ce4 100644
41506 --- a/drivers/scsi/gdth.c
41507 +++ b/drivers/scsi/gdth.c
41508 @@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
41509 ulong flags;
41510 gdth_ha_str *ha;
41511
41512 + pax_track_stack();
41513 +
41514 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
41515 return -EFAULT;
41516 ha = gdth_find_ha(ldrv.ionode);
41517 @@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg, char *cmnd)
41518 gdth_ha_str *ha;
41519 int rval;
41520
41521 + pax_track_stack();
41522 +
41523 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
41524 res.number >= MAX_HDRIVES)
41525 return -EFAULT;
41526 @@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg, char *cmnd)
41527 gdth_ha_str *ha;
41528 int rval;
41529
41530 + pax_track_stack();
41531 +
41532 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
41533 return -EFAULT;
41534 ha = gdth_find_ha(gen.ionode);
41535 @@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
41536 int i;
41537 gdth_cmd_str gdtcmd;
41538 char cmnd[MAX_COMMAND_SIZE];
41539 +
41540 + pax_track_stack();
41541 +
41542 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
41543
41544 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
41545 diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
41546 index 1258da3..20d8ae6 100644
41547 --- a/drivers/scsi/gdth_proc.c
41548 +++ b/drivers/scsi/gdth_proc.c
41549 @@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
41550 ulong64 paddr;
41551
41552 char cmnd[MAX_COMMAND_SIZE];
41553 +
41554 + pax_track_stack();
41555 +
41556 memset(cmnd, 0xff, 12);
41557 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
41558
41559 @@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
41560 gdth_hget_str *phg;
41561 char cmnd[MAX_COMMAND_SIZE];
41562
41563 + pax_track_stack();
41564 +
41565 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
41566 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
41567 if (!gdtcmd || !estr)
41568 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
41569 index d03a926..f324286 100644
41570 --- a/drivers/scsi/hosts.c
41571 +++ b/drivers/scsi/hosts.c
41572 @@ -40,7 +40,7 @@
41573 #include "scsi_logging.h"
41574
41575
41576 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
41577 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
41578
41579
41580 static void scsi_host_cls_release(struct device *dev)
41581 @@ -347,7 +347,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
41582 * subtract one because we increment first then return, but we need to
41583 * know what the next host number was before increment
41584 */
41585 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
41586 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
41587 shost->dma_channel = 0xff;
41588
41589 /* These three are default values which can be overridden */
41590 diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
41591 index a601159..55e19d2 100644
41592 --- a/drivers/scsi/ipr.c
41593 +++ b/drivers/scsi/ipr.c
41594 @@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
41595 return true;
41596 }
41597
41598 -static struct ata_port_operations ipr_sata_ops = {
41599 +static const struct ata_port_operations ipr_sata_ops = {
41600 .phy_reset = ipr_ata_phy_reset,
41601 .hardreset = ipr_sata_reset,
41602 .post_internal_cmd = ipr_ata_post_internal,
41603 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
41604 index 4e49fbc..97907ff 100644
41605 --- a/drivers/scsi/ips.h
41606 +++ b/drivers/scsi/ips.h
41607 @@ -1027,7 +1027,7 @@ typedef struct {
41608 int (*intr)(struct ips_ha *);
41609 void (*enableint)(struct ips_ha *);
41610 uint32_t (*statupd)(struct ips_ha *);
41611 -} ips_hw_func_t;
41612 +} __no_const ips_hw_func_t;
41613
41614 typedef struct ips_ha {
41615 uint8_t ha_id[IPS_MAX_CHANNELS+1];
41616 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
41617 index c1c1574..a9c9348 100644
41618 --- a/drivers/scsi/libfc/fc_exch.c
41619 +++ b/drivers/scsi/libfc/fc_exch.c
41620 @@ -86,12 +86,12 @@ struct fc_exch_mgr {
41621 * all together if not used XXX
41622 */
41623 struct {
41624 - atomic_t no_free_exch;
41625 - atomic_t no_free_exch_xid;
41626 - atomic_t xid_not_found;
41627 - atomic_t xid_busy;
41628 - atomic_t seq_not_found;
41629 - atomic_t non_bls_resp;
41630 + atomic_unchecked_t no_free_exch;
41631 + atomic_unchecked_t no_free_exch_xid;
41632 + atomic_unchecked_t xid_not_found;
41633 + atomic_unchecked_t xid_busy;
41634 + atomic_unchecked_t seq_not_found;
41635 + atomic_unchecked_t non_bls_resp;
41636 } stats;
41637 };
41638 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
41639 @@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
41640 /* allocate memory for exchange */
41641 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
41642 if (!ep) {
41643 - atomic_inc(&mp->stats.no_free_exch);
41644 + atomic_inc_unchecked(&mp->stats.no_free_exch);
41645 goto out;
41646 }
41647 memset(ep, 0, sizeof(*ep));
41648 @@ -557,7 +557,7 @@ out:
41649 return ep;
41650 err:
41651 spin_unlock_bh(&pool->lock);
41652 - atomic_inc(&mp->stats.no_free_exch_xid);
41653 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
41654 mempool_free(ep, mp->ep_pool);
41655 return NULL;
41656 }
41657 @@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41658 xid = ntohs(fh->fh_ox_id); /* we originated exch */
41659 ep = fc_exch_find(mp, xid);
41660 if (!ep) {
41661 - atomic_inc(&mp->stats.xid_not_found);
41662 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41663 reject = FC_RJT_OX_ID;
41664 goto out;
41665 }
41666 @@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41667 ep = fc_exch_find(mp, xid);
41668 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
41669 if (ep) {
41670 - atomic_inc(&mp->stats.xid_busy);
41671 + atomic_inc_unchecked(&mp->stats.xid_busy);
41672 reject = FC_RJT_RX_ID;
41673 goto rel;
41674 }
41675 @@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41676 }
41677 xid = ep->xid; /* get our XID */
41678 } else if (!ep) {
41679 - atomic_inc(&mp->stats.xid_not_found);
41680 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41681 reject = FC_RJT_RX_ID; /* XID not found */
41682 goto out;
41683 }
41684 @@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41685 } else {
41686 sp = &ep->seq;
41687 if (sp->id != fh->fh_seq_id) {
41688 - atomic_inc(&mp->stats.seq_not_found);
41689 + atomic_inc_unchecked(&mp->stats.seq_not_found);
41690 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
41691 goto rel;
41692 }
41693 @@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41694
41695 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
41696 if (!ep) {
41697 - atomic_inc(&mp->stats.xid_not_found);
41698 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41699 goto out;
41700 }
41701 if (ep->esb_stat & ESB_ST_COMPLETE) {
41702 - atomic_inc(&mp->stats.xid_not_found);
41703 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41704 goto out;
41705 }
41706 if (ep->rxid == FC_XID_UNKNOWN)
41707 ep->rxid = ntohs(fh->fh_rx_id);
41708 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
41709 - atomic_inc(&mp->stats.xid_not_found);
41710 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41711 goto rel;
41712 }
41713 if (ep->did != ntoh24(fh->fh_s_id) &&
41714 ep->did != FC_FID_FLOGI) {
41715 - atomic_inc(&mp->stats.xid_not_found);
41716 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41717 goto rel;
41718 }
41719 sof = fr_sof(fp);
41720 @@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41721 } else {
41722 sp = &ep->seq;
41723 if (sp->id != fh->fh_seq_id) {
41724 - atomic_inc(&mp->stats.seq_not_found);
41725 + atomic_inc_unchecked(&mp->stats.seq_not_found);
41726 goto rel;
41727 }
41728 }
41729 @@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41730 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
41731
41732 if (!sp)
41733 - atomic_inc(&mp->stats.xid_not_found);
41734 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41735 else
41736 - atomic_inc(&mp->stats.non_bls_resp);
41737 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
41738
41739 fc_frame_free(fp);
41740 }
41741 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
41742 index 0ee989f..a582241 100644
41743 --- a/drivers/scsi/libsas/sas_ata.c
41744 +++ b/drivers/scsi/libsas/sas_ata.c
41745 @@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
41746 }
41747 }
41748
41749 -static struct ata_port_operations sas_sata_ops = {
41750 +static const struct ata_port_operations sas_sata_ops = {
41751 .phy_reset = sas_ata_phy_reset,
41752 .post_internal_cmd = sas_ata_post_internal,
41753 .qc_defer = ata_std_qc_defer,
41754 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
41755 index aa10f79..5cc79e4 100644
41756 --- a/drivers/scsi/lpfc/lpfc.h
41757 +++ b/drivers/scsi/lpfc/lpfc.h
41758 @@ -400,7 +400,7 @@ struct lpfc_vport {
41759 struct dentry *debug_nodelist;
41760 struct dentry *vport_debugfs_root;
41761 struct lpfc_debugfs_trc *disc_trc;
41762 - atomic_t disc_trc_cnt;
41763 + atomic_unchecked_t disc_trc_cnt;
41764 #endif
41765 uint8_t stat_data_enabled;
41766 uint8_t stat_data_blocked;
41767 @@ -725,8 +725,8 @@ struct lpfc_hba {
41768 struct timer_list fabric_block_timer;
41769 unsigned long bit_flags;
41770 #define FABRIC_COMANDS_BLOCKED 0
41771 - atomic_t num_rsrc_err;
41772 - atomic_t num_cmd_success;
41773 + atomic_unchecked_t num_rsrc_err;
41774 + atomic_unchecked_t num_cmd_success;
41775 unsigned long last_rsrc_error_time;
41776 unsigned long last_ramp_down_time;
41777 unsigned long last_ramp_up_time;
41778 @@ -740,7 +740,7 @@ struct lpfc_hba {
41779 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
41780 struct dentry *debug_slow_ring_trc;
41781 struct lpfc_debugfs_trc *slow_ring_trc;
41782 - atomic_t slow_ring_trc_cnt;
41783 + atomic_unchecked_t slow_ring_trc_cnt;
41784 #endif
41785
41786 /* Used for deferred freeing of ELS data buffers */
41787 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
41788 index 8d0f0de..7c77a62 100644
41789 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
41790 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
41791 @@ -124,7 +124,7 @@ struct lpfc_debug {
41792 int len;
41793 };
41794
41795 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
41796 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
41797 static unsigned long lpfc_debugfs_start_time = 0L;
41798
41799 /**
41800 @@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
41801 lpfc_debugfs_enable = 0;
41802
41803 len = 0;
41804 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
41805 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
41806 (lpfc_debugfs_max_disc_trc - 1);
41807 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
41808 dtp = vport->disc_trc + i;
41809 @@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
41810 lpfc_debugfs_enable = 0;
41811
41812 len = 0;
41813 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
41814 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
41815 (lpfc_debugfs_max_slow_ring_trc - 1);
41816 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
41817 dtp = phba->slow_ring_trc + i;
41818 @@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
41819 uint32_t *ptr;
41820 char buffer[1024];
41821
41822 + pax_track_stack();
41823 +
41824 off = 0;
41825 spin_lock_irq(&phba->hbalock);
41826
41827 @@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
41828 !vport || !vport->disc_trc)
41829 return;
41830
41831 - index = atomic_inc_return(&vport->disc_trc_cnt) &
41832 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
41833 (lpfc_debugfs_max_disc_trc - 1);
41834 dtp = vport->disc_trc + index;
41835 dtp->fmt = fmt;
41836 dtp->data1 = data1;
41837 dtp->data2 = data2;
41838 dtp->data3 = data3;
41839 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41840 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41841 dtp->jif = jiffies;
41842 #endif
41843 return;
41844 @@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
41845 !phba || !phba->slow_ring_trc)
41846 return;
41847
41848 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
41849 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
41850 (lpfc_debugfs_max_slow_ring_trc - 1);
41851 dtp = phba->slow_ring_trc + index;
41852 dtp->fmt = fmt;
41853 dtp->data1 = data1;
41854 dtp->data2 = data2;
41855 dtp->data3 = data3;
41856 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41857 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41858 dtp->jif = jiffies;
41859 #endif
41860 return;
41861 @@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41862 "slow_ring buffer\n");
41863 goto debug_failed;
41864 }
41865 - atomic_set(&phba->slow_ring_trc_cnt, 0);
41866 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
41867 memset(phba->slow_ring_trc, 0,
41868 (sizeof(struct lpfc_debugfs_trc) *
41869 lpfc_debugfs_max_slow_ring_trc));
41870 @@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41871 "buffer\n");
41872 goto debug_failed;
41873 }
41874 - atomic_set(&vport->disc_trc_cnt, 0);
41875 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
41876
41877 snprintf(name, sizeof(name), "discovery_trace");
41878 vport->debug_disc_trc =
41879 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
41880 index 549bc7d..8189dbb 100644
41881 --- a/drivers/scsi/lpfc/lpfc_init.c
41882 +++ b/drivers/scsi/lpfc/lpfc_init.c
41883 @@ -8021,8 +8021,10 @@ lpfc_init(void)
41884 printk(LPFC_COPYRIGHT "\n");
41885
41886 if (lpfc_enable_npiv) {
41887 - lpfc_transport_functions.vport_create = lpfc_vport_create;
41888 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41889 + pax_open_kernel();
41890 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
41891 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41892 + pax_close_kernel();
41893 }
41894 lpfc_transport_template =
41895 fc_attach_transport(&lpfc_transport_functions);
41896 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
41897 index c88f59f..ff2a42f 100644
41898 --- a/drivers/scsi/lpfc/lpfc_scsi.c
41899 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
41900 @@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
41901 uint32_t evt_posted;
41902
41903 spin_lock_irqsave(&phba->hbalock, flags);
41904 - atomic_inc(&phba->num_rsrc_err);
41905 + atomic_inc_unchecked(&phba->num_rsrc_err);
41906 phba->last_rsrc_error_time = jiffies;
41907
41908 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
41909 @@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
41910 unsigned long flags;
41911 struct lpfc_hba *phba = vport->phba;
41912 uint32_t evt_posted;
41913 - atomic_inc(&phba->num_cmd_success);
41914 + atomic_inc_unchecked(&phba->num_cmd_success);
41915
41916 if (vport->cfg_lun_queue_depth <= queue_depth)
41917 return;
41918 @@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41919 int i;
41920 struct lpfc_rport_data *rdata;
41921
41922 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
41923 - num_cmd_success = atomic_read(&phba->num_cmd_success);
41924 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
41925 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
41926
41927 vports = lpfc_create_vport_work_array(phba);
41928 if (vports != NULL)
41929 @@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41930 }
41931 }
41932 lpfc_destroy_vport_work_array(phba, vports);
41933 - atomic_set(&phba->num_rsrc_err, 0);
41934 - atomic_set(&phba->num_cmd_success, 0);
41935 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
41936 + atomic_set_unchecked(&phba->num_cmd_success, 0);
41937 }
41938
41939 /**
41940 @@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
41941 }
41942 }
41943 lpfc_destroy_vport_work_array(phba, vports);
41944 - atomic_set(&phba->num_rsrc_err, 0);
41945 - atomic_set(&phba->num_cmd_success, 0);
41946 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
41947 + atomic_set_unchecked(&phba->num_cmd_success, 0);
41948 }
41949
41950 /**
41951 diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
41952 index 234f0b7..3020aea 100644
41953 --- a/drivers/scsi/megaraid/megaraid_mbox.c
41954 +++ b/drivers/scsi/megaraid/megaraid_mbox.c
41955 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter)
41956 int rval;
41957 int i;
41958
41959 + pax_track_stack();
41960 +
41961 // Allocate memory for the base list of scb for management module.
41962 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
41963
41964 diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
41965 index 7a117c1..ee01e9e 100644
41966 --- a/drivers/scsi/osd/osd_initiator.c
41967 +++ b/drivers/scsi/osd/osd_initiator.c
41968 @@ -94,6 +94,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
41969 int nelem = ARRAY_SIZE(get_attrs), a = 0;
41970 int ret;
41971
41972 + pax_track_stack();
41973 +
41974 or = osd_start_request(od, GFP_KERNEL);
41975 if (!or)
41976 return -ENOMEM;
41977 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
41978 index 9ab8c86..9425ad3 100644
41979 --- a/drivers/scsi/pmcraid.c
41980 +++ b/drivers/scsi/pmcraid.c
41981 @@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
41982 res->scsi_dev = scsi_dev;
41983 scsi_dev->hostdata = res;
41984 res->change_detected = 0;
41985 - atomic_set(&res->read_failures, 0);
41986 - atomic_set(&res->write_failures, 0);
41987 + atomic_set_unchecked(&res->read_failures, 0);
41988 + atomic_set_unchecked(&res->write_failures, 0);
41989 rc = 0;
41990 }
41991 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
41992 @@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
41993
41994 /* If this was a SCSI read/write command keep count of errors */
41995 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
41996 - atomic_inc(&res->read_failures);
41997 + atomic_inc_unchecked(&res->read_failures);
41998 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
41999 - atomic_inc(&res->write_failures);
42000 + atomic_inc_unchecked(&res->write_failures);
42001
42002 if (!RES_IS_GSCSI(res->cfg_entry) &&
42003 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
42004 @@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
42005
42006 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
42007 /* add resources only after host is added into system */
42008 - if (!atomic_read(&pinstance->expose_resources))
42009 + if (!atomic_read_unchecked(&pinstance->expose_resources))
42010 return;
42011
42012 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
42013 @@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instance(
42014 init_waitqueue_head(&pinstance->reset_wait_q);
42015
42016 atomic_set(&pinstance->outstanding_cmds, 0);
42017 - atomic_set(&pinstance->expose_resources, 0);
42018 + atomic_set_unchecked(&pinstance->expose_resources, 0);
42019
42020 INIT_LIST_HEAD(&pinstance->free_res_q);
42021 INIT_LIST_HEAD(&pinstance->used_res_q);
42022 @@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
42023 /* Schedule worker thread to handle CCN and take care of adding and
42024 * removing devices to OS
42025 */
42026 - atomic_set(&pinstance->expose_resources, 1);
42027 + atomic_set_unchecked(&pinstance->expose_resources, 1);
42028 schedule_work(&pinstance->worker_q);
42029 return rc;
42030
42031 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
42032 index 3441b3f..6cbe8f7 100644
42033 --- a/drivers/scsi/pmcraid.h
42034 +++ b/drivers/scsi/pmcraid.h
42035 @@ -690,7 +690,7 @@ struct pmcraid_instance {
42036 atomic_t outstanding_cmds;
42037
42038 /* should add/delete resources to mid-layer now ?*/
42039 - atomic_t expose_resources;
42040 + atomic_unchecked_t expose_resources;
42041
42042 /* Tasklet to handle deferred processing */
42043 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
42044 @@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
42045 struct list_head queue; /* link to "to be exposed" resources */
42046 struct pmcraid_config_table_entry cfg_entry;
42047 struct scsi_device *scsi_dev; /* Link scsi_device structure */
42048 - atomic_t read_failures; /* count of failed READ commands */
42049 - atomic_t write_failures; /* count of failed WRITE commands */
42050 + atomic_unchecked_t read_failures; /* count of failed READ commands */
42051 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
42052
42053 /* To indicate add/delete/modify during CCN */
42054 u8 change_detected;
42055 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
42056 index 2150618..7034215 100644
42057 --- a/drivers/scsi/qla2xxx/qla_def.h
42058 +++ b/drivers/scsi/qla2xxx/qla_def.h
42059 @@ -2089,7 +2089,7 @@ struct isp_operations {
42060
42061 int (*get_flash_version) (struct scsi_qla_host *, void *);
42062 int (*start_scsi) (srb_t *);
42063 -};
42064 +} __no_const;
42065
42066 /* MSI-X Support *************************************************************/
42067
42068 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
42069 index 81b5f29..2ae1fad 100644
42070 --- a/drivers/scsi/qla4xxx/ql4_def.h
42071 +++ b/drivers/scsi/qla4xxx/ql4_def.h
42072 @@ -240,7 +240,7 @@ struct ddb_entry {
42073 atomic_t retry_relogin_timer; /* Min Time between relogins
42074 * (4000 only) */
42075 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
42076 - atomic_t relogin_retry_count; /* Num of times relogin has been
42077 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
42078 * retried */
42079
42080 uint16_t port;
42081 diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
42082 index af8c323..515dd51 100644
42083 --- a/drivers/scsi/qla4xxx/ql4_init.c
42084 +++ b/drivers/scsi/qla4xxx/ql4_init.c
42085 @@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
42086 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
42087 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
42088 atomic_set(&ddb_entry->relogin_timer, 0);
42089 - atomic_set(&ddb_entry->relogin_retry_count, 0);
42090 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
42091 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
42092 list_add_tail(&ddb_entry->list, &ha->ddb_list);
42093 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
42094 @@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
42095 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
42096 atomic_set(&ddb_entry->port_down_timer,
42097 ha->port_down_retry_count);
42098 - atomic_set(&ddb_entry->relogin_retry_count, 0);
42099 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
42100 atomic_set(&ddb_entry->relogin_timer, 0);
42101 clear_bit(DF_RELOGIN, &ddb_entry->flags);
42102 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
42103 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
42104 index 83c8b5e..a82b348 100644
42105 --- a/drivers/scsi/qla4xxx/ql4_os.c
42106 +++ b/drivers/scsi/qla4xxx/ql4_os.c
42107 @@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
42108 ddb_entry->fw_ddb_device_state ==
42109 DDB_DS_SESSION_FAILED) {
42110 /* Reset retry relogin timer */
42111 - atomic_inc(&ddb_entry->relogin_retry_count);
42112 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
42113 DEBUG2(printk("scsi%ld: index[%d] relogin"
42114 " timed out-retrying"
42115 " relogin (%d)\n",
42116 ha->host_no,
42117 ddb_entry->fw_ddb_index,
42118 - atomic_read(&ddb_entry->
42119 + atomic_read_unchecked(&ddb_entry->
42120 relogin_retry_count))
42121 );
42122 start_dpc++;
42123 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
42124 index dd098ca..686ce01 100644
42125 --- a/drivers/scsi/scsi.c
42126 +++ b/drivers/scsi/scsi.c
42127 @@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
42128 unsigned long timeout;
42129 int rtn = 0;
42130
42131 - atomic_inc(&cmd->device->iorequest_cnt);
42132 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
42133
42134 /* check if the device is still usable */
42135 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
42136 diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
42137 index bc3e363..e1a8e50 100644
42138 --- a/drivers/scsi/scsi_debug.c
42139 +++ b/drivers/scsi/scsi_debug.c
42140 @@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
42141 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
42142 unsigned char *cmd = (unsigned char *)scp->cmnd;
42143
42144 + pax_track_stack();
42145 +
42146 if ((errsts = check_readiness(scp, 1, devip)))
42147 return errsts;
42148 memset(arr, 0, sizeof(arr));
42149 @@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cmnd * scp,
42150 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
42151 unsigned char *cmd = (unsigned char *)scp->cmnd;
42152
42153 + pax_track_stack();
42154 +
42155 if ((errsts = check_readiness(scp, 1, devip)))
42156 return errsts;
42157 memset(arr, 0, sizeof(arr));
42158 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
42159 index 8df12522..c4c1472 100644
42160 --- a/drivers/scsi/scsi_lib.c
42161 +++ b/drivers/scsi/scsi_lib.c
42162 @@ -1389,7 +1389,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
42163 shost = sdev->host;
42164 scsi_init_cmd_errh(cmd);
42165 cmd->result = DID_NO_CONNECT << 16;
42166 - atomic_inc(&cmd->device->iorequest_cnt);
42167 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
42168
42169 /*
42170 * SCSI request completion path will do scsi_device_unbusy(),
42171 @@ -1420,9 +1420,9 @@ static void scsi_softirq_done(struct request *rq)
42172 */
42173 cmd->serial_number = 0;
42174
42175 - atomic_inc(&cmd->device->iodone_cnt);
42176 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
42177 if (cmd->result)
42178 - atomic_inc(&cmd->device->ioerr_cnt);
42179 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
42180
42181 disposition = scsi_decide_disposition(cmd);
42182 if (disposition != SUCCESS &&
42183 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
42184 index 91a93e0..eae0fe3 100644
42185 --- a/drivers/scsi/scsi_sysfs.c
42186 +++ b/drivers/scsi/scsi_sysfs.c
42187 @@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
42188 char *buf) \
42189 { \
42190 struct scsi_device *sdev = to_scsi_device(dev); \
42191 - unsigned long long count = atomic_read(&sdev->field); \
42192 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
42193 return snprintf(buf, 20, "0x%llx\n", count); \
42194 } \
42195 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
42196 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
42197 index 1030327..f91fd30 100644
42198 --- a/drivers/scsi/scsi_tgt_lib.c
42199 +++ b/drivers/scsi/scsi_tgt_lib.c
42200 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
42201 int err;
42202
42203 dprintk("%lx %u\n", uaddr, len);
42204 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
42205 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
42206 if (err) {
42207 /*
42208 * TODO: need to fixup sg_tablesize, max_segment_size,
42209 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
42210 index db02e31..1b42ea9 100644
42211 --- a/drivers/scsi/scsi_transport_fc.c
42212 +++ b/drivers/scsi/scsi_transport_fc.c
42213 @@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
42214 * Netlink Infrastructure
42215 */
42216
42217 -static atomic_t fc_event_seq;
42218 +static atomic_unchecked_t fc_event_seq;
42219
42220 /**
42221 * fc_get_event_number - Obtain the next sequential FC event number
42222 @@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
42223 u32
42224 fc_get_event_number(void)
42225 {
42226 - return atomic_add_return(1, &fc_event_seq);
42227 + return atomic_add_return_unchecked(1, &fc_event_seq);
42228 }
42229 EXPORT_SYMBOL(fc_get_event_number);
42230
42231 @@ -641,7 +641,7 @@ static __init int fc_transport_init(void)
42232 {
42233 int error;
42234
42235 - atomic_set(&fc_event_seq, 0);
42236 + atomic_set_unchecked(&fc_event_seq, 0);
42237
42238 error = transport_class_register(&fc_host_class);
42239 if (error)
42240 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
42241 index de2f8c4..63c5278 100644
42242 --- a/drivers/scsi/scsi_transport_iscsi.c
42243 +++ b/drivers/scsi/scsi_transport_iscsi.c
42244 @@ -81,7 +81,7 @@ struct iscsi_internal {
42245 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
42246 };
42247
42248 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
42249 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
42250 static struct workqueue_struct *iscsi_eh_timer_workq;
42251
42252 /*
42253 @@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
42254 int err;
42255
42256 ihost = shost->shost_data;
42257 - session->sid = atomic_add_return(1, &iscsi_session_nr);
42258 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
42259
42260 if (id == ISCSI_MAX_TARGET) {
42261 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
42262 @@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(void)
42263 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
42264 ISCSI_TRANSPORT_VERSION);
42265
42266 - atomic_set(&iscsi_session_nr, 0);
42267 + atomic_set_unchecked(&iscsi_session_nr, 0);
42268
42269 err = class_register(&iscsi_transport_class);
42270 if (err)
42271 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
42272 index 21a045e..ec89e03 100644
42273 --- a/drivers/scsi/scsi_transport_srp.c
42274 +++ b/drivers/scsi/scsi_transport_srp.c
42275 @@ -33,7 +33,7 @@
42276 #include "scsi_transport_srp_internal.h"
42277
42278 struct srp_host_attrs {
42279 - atomic_t next_port_id;
42280 + atomic_unchecked_t next_port_id;
42281 };
42282 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
42283
42284 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
42285 struct Scsi_Host *shost = dev_to_shost(dev);
42286 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
42287
42288 - atomic_set(&srp_host->next_port_id, 0);
42289 + atomic_set_unchecked(&srp_host->next_port_id, 0);
42290 return 0;
42291 }
42292
42293 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
42294 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
42295 rport->roles = ids->roles;
42296
42297 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
42298 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
42299 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
42300
42301 transport_setup_device(&rport->dev);
42302 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
42303 index 040f751..98a5ed2 100644
42304 --- a/drivers/scsi/sg.c
42305 +++ b/drivers/scsi/sg.c
42306 @@ -1064,7 +1064,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
42307 sdp->disk->disk_name,
42308 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
42309 NULL,
42310 - (char *)arg);
42311 + (char __user *)arg);
42312 case BLKTRACESTART:
42313 return blk_trace_startstop(sdp->device->request_queue, 1);
42314 case BLKTRACESTOP:
42315 @@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
42316 const struct file_operations * fops;
42317 };
42318
42319 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
42320 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
42321 {"allow_dio", &adio_fops},
42322 {"debug", &debug_fops},
42323 {"def_reserved_size", &dressz_fops},
42324 @@ -2307,7 +2307,7 @@ sg_proc_init(void)
42325 {
42326 int k, mask;
42327 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
42328 - struct sg_proc_leaf * leaf;
42329 + const struct sg_proc_leaf * leaf;
42330
42331 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
42332 if (!sg_proc_sgp)
42333 diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
42334 index c19ca5e..3eb5959 100644
42335 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c
42336 +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
42337 @@ -1758,6 +1758,8 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
42338 int do_iounmap = 0;
42339 int do_disable_device = 1;
42340
42341 + pax_track_stack();
42342 +
42343 memset(&sym_dev, 0, sizeof(sym_dev));
42344 memset(&nvram, 0, sizeof(nvram));
42345 sym_dev.pdev = pdev;
42346 diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
42347 index eadc1ab..2d81457 100644
42348 --- a/drivers/serial/kgdboc.c
42349 +++ b/drivers/serial/kgdboc.c
42350 @@ -18,7 +18,7 @@
42351
42352 #define MAX_CONFIG_LEN 40
42353
42354 -static struct kgdb_io kgdboc_io_ops;
42355 +static const struct kgdb_io kgdboc_io_ops;
42356
42357 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
42358 static int configured = -1;
42359 @@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void)
42360 module_put(THIS_MODULE);
42361 }
42362
42363 -static struct kgdb_io kgdboc_io_ops = {
42364 +static const struct kgdb_io kgdboc_io_ops = {
42365 .name = "kgdboc",
42366 .read_char = kgdboc_get_char,
42367 .write_char = kgdboc_put_char,
42368 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
42369 index b76f246..7f41af7 100644
42370 --- a/drivers/spi/spi.c
42371 +++ b/drivers/spi/spi.c
42372 @@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
42373 EXPORT_SYMBOL_GPL(spi_sync);
42374
42375 /* portable code must never pass more than 32 bytes */
42376 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
42377 +#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
42378
42379 static u8 *buf;
42380
42381 diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
42382 index b9b37ff..19dfa23 100644
42383 --- a/drivers/staging/android/binder.c
42384 +++ b/drivers/staging/android/binder.c
42385 @@ -2761,7 +2761,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
42386 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
42387 }
42388
42389 -static struct vm_operations_struct binder_vm_ops = {
42390 +static const struct vm_operations_struct binder_vm_ops = {
42391 .open = binder_vma_open,
42392 .close = binder_vma_close,
42393 };
42394 diff --git a/drivers/staging/b3dfg/b3dfg.c b/drivers/staging/b3dfg/b3dfg.c
42395 index cda26bb..39fed3f 100644
42396 --- a/drivers/staging/b3dfg/b3dfg.c
42397 +++ b/drivers/staging/b3dfg/b3dfg.c
42398 @@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_area_struct *vma,
42399 return VM_FAULT_NOPAGE;
42400 }
42401
42402 -static struct vm_operations_struct b3dfg_vm_ops = {
42403 +static const struct vm_operations_struct b3dfg_vm_ops = {
42404 .fault = b3dfg_vma_fault,
42405 };
42406
42407 @@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp, struct vm_area_struct *vma)
42408 return r;
42409 }
42410
42411 -static struct file_operations b3dfg_fops = {
42412 +static const struct file_operations b3dfg_fops = {
42413 .owner = THIS_MODULE,
42414 .open = b3dfg_open,
42415 .release = b3dfg_release,
42416 diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
42417 index 908f25a..c9a579b 100644
42418 --- a/drivers/staging/comedi/comedi_fops.c
42419 +++ b/drivers/staging/comedi/comedi_fops.c
42420 @@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct *area)
42421 mutex_unlock(&dev->mutex);
42422 }
42423
42424 -static struct vm_operations_struct comedi_vm_ops = {
42425 +static const struct vm_operations_struct comedi_vm_ops = {
42426 .close = comedi_unmap,
42427 };
42428
42429 diff --git a/drivers/staging/dream/qdsp5/adsp_driver.c b/drivers/staging/dream/qdsp5/adsp_driver.c
42430 index e55a0db..577b776 100644
42431 --- a/drivers/staging/dream/qdsp5/adsp_driver.c
42432 +++ b/drivers/staging/dream/qdsp5/adsp_driver.c
42433 @@ -576,7 +576,7 @@ static struct adsp_device *inode_to_device(struct inode *inode)
42434 static dev_t adsp_devno;
42435 static struct class *adsp_class;
42436
42437 -static struct file_operations adsp_fops = {
42438 +static const struct file_operations adsp_fops = {
42439 .owner = THIS_MODULE,
42440 .open = adsp_open,
42441 .unlocked_ioctl = adsp_ioctl,
42442 diff --git a/drivers/staging/dream/qdsp5/audio_aac.c b/drivers/staging/dream/qdsp5/audio_aac.c
42443 index ad2390f..4116ee8 100644
42444 --- a/drivers/staging/dream/qdsp5/audio_aac.c
42445 +++ b/drivers/staging/dream/qdsp5/audio_aac.c
42446 @@ -1022,7 +1022,7 @@ done:
42447 return rc;
42448 }
42449
42450 -static struct file_operations audio_aac_fops = {
42451 +static const struct file_operations audio_aac_fops = {
42452 .owner = THIS_MODULE,
42453 .open = audio_open,
42454 .release = audio_release,
42455 diff --git a/drivers/staging/dream/qdsp5/audio_amrnb.c b/drivers/staging/dream/qdsp5/audio_amrnb.c
42456 index cd818a5..870b37b 100644
42457 --- a/drivers/staging/dream/qdsp5/audio_amrnb.c
42458 +++ b/drivers/staging/dream/qdsp5/audio_amrnb.c
42459 @@ -833,7 +833,7 @@ done:
42460 return rc;
42461 }
42462
42463 -static struct file_operations audio_amrnb_fops = {
42464 +static const struct file_operations audio_amrnb_fops = {
42465 .owner = THIS_MODULE,
42466 .open = audamrnb_open,
42467 .release = audamrnb_release,
42468 diff --git a/drivers/staging/dream/qdsp5/audio_evrc.c b/drivers/staging/dream/qdsp5/audio_evrc.c
42469 index 4b43e18..cedafda 100644
42470 --- a/drivers/staging/dream/qdsp5/audio_evrc.c
42471 +++ b/drivers/staging/dream/qdsp5/audio_evrc.c
42472 @@ -805,7 +805,7 @@ dma_fail:
42473 return rc;
42474 }
42475
42476 -static struct file_operations audio_evrc_fops = {
42477 +static const struct file_operations audio_evrc_fops = {
42478 .owner = THIS_MODULE,
42479 .open = audevrc_open,
42480 .release = audevrc_release,
42481 diff --git a/drivers/staging/dream/qdsp5/audio_in.c b/drivers/staging/dream/qdsp5/audio_in.c
42482 index 3d950a2..9431118 100644
42483 --- a/drivers/staging/dream/qdsp5/audio_in.c
42484 +++ b/drivers/staging/dream/qdsp5/audio_in.c
42485 @@ -913,7 +913,7 @@ static int audpre_open(struct inode *inode, struct file *file)
42486 return 0;
42487 }
42488
42489 -static struct file_operations audio_fops = {
42490 +static const struct file_operations audio_fops = {
42491 .owner = THIS_MODULE,
42492 .open = audio_in_open,
42493 .release = audio_in_release,
42494 @@ -922,7 +922,7 @@ static struct file_operations audio_fops = {
42495 .unlocked_ioctl = audio_in_ioctl,
42496 };
42497
42498 -static struct file_operations audpre_fops = {
42499 +static const struct file_operations audpre_fops = {
42500 .owner = THIS_MODULE,
42501 .open = audpre_open,
42502 .unlocked_ioctl = audpre_ioctl,
42503 diff --git a/drivers/staging/dream/qdsp5/audio_mp3.c b/drivers/staging/dream/qdsp5/audio_mp3.c
42504 index b95574f..286c2f4 100644
42505 --- a/drivers/staging/dream/qdsp5/audio_mp3.c
42506 +++ b/drivers/staging/dream/qdsp5/audio_mp3.c
42507 @@ -941,7 +941,7 @@ done:
42508 return rc;
42509 }
42510
42511 -static struct file_operations audio_mp3_fops = {
42512 +static const struct file_operations audio_mp3_fops = {
42513 .owner = THIS_MODULE,
42514 .open = audio_open,
42515 .release = audio_release,
42516 diff --git a/drivers/staging/dream/qdsp5/audio_out.c b/drivers/staging/dream/qdsp5/audio_out.c
42517 index d1adcf6..f8f9833 100644
42518 --- a/drivers/staging/dream/qdsp5/audio_out.c
42519 +++ b/drivers/staging/dream/qdsp5/audio_out.c
42520 @@ -810,7 +810,7 @@ static int audpp_open(struct inode *inode, struct file *file)
42521 return 0;
42522 }
42523
42524 -static struct file_operations audio_fops = {
42525 +static const struct file_operations audio_fops = {
42526 .owner = THIS_MODULE,
42527 .open = audio_open,
42528 .release = audio_release,
42529 @@ -819,7 +819,7 @@ static struct file_operations audio_fops = {
42530 .unlocked_ioctl = audio_ioctl,
42531 };
42532
42533 -static struct file_operations audpp_fops = {
42534 +static const struct file_operations audpp_fops = {
42535 .owner = THIS_MODULE,
42536 .open = audpp_open,
42537 .unlocked_ioctl = audpp_ioctl,
42538 diff --git a/drivers/staging/dream/qdsp5/audio_qcelp.c b/drivers/staging/dream/qdsp5/audio_qcelp.c
42539 index f0f50e3..f6b9dbc 100644
42540 --- a/drivers/staging/dream/qdsp5/audio_qcelp.c
42541 +++ b/drivers/staging/dream/qdsp5/audio_qcelp.c
42542 @@ -816,7 +816,7 @@ err:
42543 return rc;
42544 }
42545
42546 -static struct file_operations audio_qcelp_fops = {
42547 +static const struct file_operations audio_qcelp_fops = {
42548 .owner = THIS_MODULE,
42549 .open = audqcelp_open,
42550 .release = audqcelp_release,
42551 diff --git a/drivers/staging/dream/qdsp5/snd.c b/drivers/staging/dream/qdsp5/snd.c
42552 index 037d7ff..5469ec3 100644
42553 --- a/drivers/staging/dream/qdsp5/snd.c
42554 +++ b/drivers/staging/dream/qdsp5/snd.c
42555 @@ -242,7 +242,7 @@ err:
42556 return rc;
42557 }
42558
42559 -static struct file_operations snd_fops = {
42560 +static const struct file_operations snd_fops = {
42561 .owner = THIS_MODULE,
42562 .open = snd_open,
42563 .release = snd_release,
42564 diff --git a/drivers/staging/dream/smd/smd_qmi.c b/drivers/staging/dream/smd/smd_qmi.c
42565 index d4e7d88..0ea632a 100644
42566 --- a/drivers/staging/dream/smd/smd_qmi.c
42567 +++ b/drivers/staging/dream/smd/smd_qmi.c
42568 @@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip, struct file *fp)
42569 return 0;
42570 }
42571
42572 -static struct file_operations qmi_fops = {
42573 +static const struct file_operations qmi_fops = {
42574 .owner = THIS_MODULE,
42575 .read = qmi_read,
42576 .write = qmi_write,
42577 diff --git a/drivers/staging/dream/smd/smd_rpcrouter_device.c b/drivers/staging/dream/smd/smd_rpcrouter_device.c
42578 index cd3910b..ff053d3 100644
42579 --- a/drivers/staging/dream/smd/smd_rpcrouter_device.c
42580 +++ b/drivers/staging/dream/smd/smd_rpcrouter_device.c
42581 @@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file *filp, unsigned int cmd,
42582 return rc;
42583 }
42584
42585 -static struct file_operations rpcrouter_server_fops = {
42586 +static const struct file_operations rpcrouter_server_fops = {
42587 .owner = THIS_MODULE,
42588 .open = rpcrouter_open,
42589 .release = rpcrouter_release,
42590 @@ -224,7 +224,7 @@ static struct file_operations rpcrouter_server_fops = {
42591 .unlocked_ioctl = rpcrouter_ioctl,
42592 };
42593
42594 -static struct file_operations rpcrouter_router_fops = {
42595 +static const struct file_operations rpcrouter_router_fops = {
42596 .owner = THIS_MODULE,
42597 .open = rpcrouter_open,
42598 .release = rpcrouter_release,
42599 diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
42600 index c24e4e0..07665be 100644
42601 --- a/drivers/staging/dst/dcore.c
42602 +++ b/drivers/staging/dst/dcore.c
42603 @@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendisk *disk, fmode_t mode)
42604 return 0;
42605 }
42606
42607 -static struct block_device_operations dst_blk_ops = {
42608 +static const struct block_device_operations dst_blk_ops = {
42609 .open = dst_bdev_open,
42610 .release = dst_bdev_release,
42611 .owner = THIS_MODULE,
42612 @@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(struct dst_ctl *ctl,
42613 n->size = ctl->size;
42614
42615 atomic_set(&n->refcnt, 1);
42616 - atomic_long_set(&n->gen, 0);
42617 + atomic_long_set_unchecked(&n->gen, 0);
42618 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
42619
42620 err = dst_node_sysfs_init(n);
42621 diff --git a/drivers/staging/dst/trans.c b/drivers/staging/dst/trans.c
42622 index 557d372..8d84422 100644
42623 --- a/drivers/staging/dst/trans.c
42624 +++ b/drivers/staging/dst/trans.c
42625 @@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n, struct bio *bio)
42626 t->error = 0;
42627 t->retries = 0;
42628 atomic_set(&t->refcnt, 1);
42629 - t->gen = atomic_long_inc_return(&n->gen);
42630 + t->gen = atomic_long_inc_return_unchecked(&n->gen);
42631
42632 t->enc = bio_data_dir(bio);
42633 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
42634 diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
42635 index 94f7752..d051514 100644
42636 --- a/drivers/staging/et131x/et1310_tx.c
42637 +++ b/drivers/staging/et131x/et1310_tx.c
42638 @@ -710,11 +710,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
42639 struct net_device_stats *stats = &etdev->net_stats;
42640
42641 if (pMpTcb->Flags & fMP_DEST_BROAD)
42642 - atomic_inc(&etdev->Stats.brdcstxmt);
42643 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
42644 else if (pMpTcb->Flags & fMP_DEST_MULTI)
42645 - atomic_inc(&etdev->Stats.multixmt);
42646 + atomic_inc_unchecked(&etdev->Stats.multixmt);
42647 else
42648 - atomic_inc(&etdev->Stats.unixmt);
42649 + atomic_inc_unchecked(&etdev->Stats.unixmt);
42650
42651 if (pMpTcb->Packet) {
42652 stats->tx_bytes += pMpTcb->Packet->len;
42653 diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
42654 index 1dfe06f..f469b4d 100644
42655 --- a/drivers/staging/et131x/et131x_adapter.h
42656 +++ b/drivers/staging/et131x/et131x_adapter.h
42657 @@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
42658 * operations
42659 */
42660 u32 unircv; /* # multicast packets received */
42661 - atomic_t unixmt; /* # multicast packets for Tx */
42662 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
42663 u32 multircv; /* # multicast packets received */
42664 - atomic_t multixmt; /* # multicast packets for Tx */
42665 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
42666 u32 brdcstrcv; /* # broadcast packets received */
42667 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
42668 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
42669 u32 norcvbuf; /* # Rx packets discarded */
42670 u32 noxmtbuf; /* # Tx packets discarded */
42671
42672 diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c
42673 index 4bd353a..e28f455 100644
42674 --- a/drivers/staging/go7007/go7007-v4l2.c
42675 +++ b/drivers/staging/go7007/go7007-v4l2.c
42676 @@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42677 return 0;
42678 }
42679
42680 -static struct vm_operations_struct go7007_vm_ops = {
42681 +static const struct vm_operations_struct go7007_vm_ops = {
42682 .open = go7007_vm_open,
42683 .close = go7007_vm_close,
42684 .fault = go7007_vm_fault,
42685 diff --git a/drivers/staging/hv/Channel.c b/drivers/staging/hv/Channel.c
42686 index 366dc95..b974d87 100644
42687 --- a/drivers/staging/hv/Channel.c
42688 +++ b/drivers/staging/hv/Channel.c
42689 @@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vmbus_channel *Channel, void *Kbuffer,
42690
42691 DPRINT_ENTER(VMBUS);
42692
42693 - nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
42694 - atomic_inc(&gVmbusConnection.NextGpadlHandle);
42695 + nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
42696 + atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
42697
42698 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
42699 ASSERT(msgInfo != NULL);
42700 diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
42701 index b12237f..01ae28a 100644
42702 --- a/drivers/staging/hv/Hv.c
42703 +++ b/drivers/staging/hv/Hv.c
42704 @@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, void *Input, void *Output)
42705 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
42706 u32 outputAddressHi = outputAddress >> 32;
42707 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
42708 - volatile void *hypercallPage = gHvContext.HypercallPage;
42709 + volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
42710
42711 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
42712 Control, Input, Output);
42713 diff --git a/drivers/staging/hv/VmbusApi.h b/drivers/staging/hv/VmbusApi.h
42714 index d089bb1..2ebc158 100644
42715 --- a/drivers/staging/hv/VmbusApi.h
42716 +++ b/drivers/staging/hv/VmbusApi.h
42717 @@ -109,7 +109,7 @@ struct vmbus_channel_interface {
42718 u32 *GpadlHandle);
42719 int (*TeardownGpadl)(struct hv_device *device, u32 GpadlHandle);
42720 void (*GetInfo)(struct hv_device *dev, struct hv_device_info *devinfo);
42721 -};
42722 +} __no_const;
42723
42724 /* Base driver object */
42725 struct hv_driver {
42726 diff --git a/drivers/staging/hv/VmbusPrivate.h b/drivers/staging/hv/VmbusPrivate.h
42727 index 5a37cce..6ecc88c 100644
42728 --- a/drivers/staging/hv/VmbusPrivate.h
42729 +++ b/drivers/staging/hv/VmbusPrivate.h
42730 @@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
42731 struct VMBUS_CONNECTION {
42732 enum VMBUS_CONNECT_STATE ConnectState;
42733
42734 - atomic_t NextGpadlHandle;
42735 + atomic_unchecked_t NextGpadlHandle;
42736
42737 /*
42738 * Represents channel interrupts. Each bit position represents a
42739 diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
42740 index 871a202..ca50ddf 100644
42741 --- a/drivers/staging/hv/blkvsc_drv.c
42742 +++ b/drivers/staging/hv/blkvsc_drv.c
42743 @@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
42744 /* The one and only one */
42745 static struct blkvsc_driver_context g_blkvsc_drv;
42746
42747 -static struct block_device_operations block_ops = {
42748 +static const struct block_device_operations block_ops = {
42749 .owner = THIS_MODULE,
42750 .open = blkvsc_open,
42751 .release = blkvsc_release,
42752 diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
42753 index 6acc49a..fbc8d46 100644
42754 --- a/drivers/staging/hv/vmbus_drv.c
42755 +++ b/drivers/staging/hv/vmbus_drv.c
42756 @@ -532,7 +532,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
42757 to_device_context(root_device_obj);
42758 struct device_context *child_device_ctx =
42759 to_device_context(child_device_obj);
42760 - static atomic_t device_num = ATOMIC_INIT(0);
42761 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
42762
42763 DPRINT_ENTER(VMBUS_DRV);
42764
42765 @@ -541,7 +541,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
42766
42767 /* Set the device name. Otherwise, device_register() will fail. */
42768 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
42769 - atomic_inc_return(&device_num));
42770 + atomic_inc_return_unchecked(&device_num));
42771
42772 /* The new device belongs to this bus */
42773 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
42774 diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
42775 index d926189..17b19fd 100644
42776 --- a/drivers/staging/iio/ring_generic.h
42777 +++ b/drivers/staging/iio/ring_generic.h
42778 @@ -87,7 +87,7 @@ struct iio_ring_access_funcs {
42779
42780 int (*is_enabled)(struct iio_ring_buffer *ring);
42781 int (*enable)(struct iio_ring_buffer *ring);
42782 -};
42783 +} __no_const;
42784
42785 /**
42786 * struct iio_ring_buffer - general ring buffer structure
42787 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
42788 index 1b237b7..88c624e 100644
42789 --- a/drivers/staging/octeon/ethernet-rx.c
42790 +++ b/drivers/staging/octeon/ethernet-rx.c
42791 @@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long unused)
42792 /* Increment RX stats for virtual ports */
42793 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
42794 #ifdef CONFIG_64BIT
42795 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
42796 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
42797 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
42798 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
42799 #else
42800 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
42801 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
42802 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
42803 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
42804 #endif
42805 }
42806 netif_receive_skb(skb);
42807 @@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long unused)
42808 dev->name);
42809 */
42810 #ifdef CONFIG_64BIT
42811 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
42812 + atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
42813 #else
42814 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
42815 + atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
42816 #endif
42817 dev_kfree_skb_irq(skb);
42818 }
42819 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
42820 index 492c502..d9909f1 100644
42821 --- a/drivers/staging/octeon/ethernet.c
42822 +++ b/drivers/staging/octeon/ethernet.c
42823 @@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
42824 * since the RX tasklet also increments it.
42825 */
42826 #ifdef CONFIG_64BIT
42827 - atomic64_add(rx_status.dropped_packets,
42828 - (atomic64_t *)&priv->stats.rx_dropped);
42829 + atomic64_add_unchecked(rx_status.dropped_packets,
42830 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
42831 #else
42832 - atomic_add(rx_status.dropped_packets,
42833 - (atomic_t *)&priv->stats.rx_dropped);
42834 + atomic_add_unchecked(rx_status.dropped_packets,
42835 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
42836 #endif
42837 }
42838
42839 diff --git a/drivers/staging/otus/80211core/pub_zfi.h b/drivers/staging/otus/80211core/pub_zfi.h
42840 index a35bd5d..28fff45 100644
42841 --- a/drivers/staging/otus/80211core/pub_zfi.h
42842 +++ b/drivers/staging/otus/80211core/pub_zfi.h
42843 @@ -531,7 +531,7 @@ struct zsCbFuncTbl
42844 u8_t (*zfcbClassifyTxPacket)(zdev_t* dev, zbuf_t* buf);
42845
42846 void (*zfcbHwWatchDogNotify)(zdev_t* dev);
42847 -};
42848 +} __no_const;
42849
42850 extern void zfZeroMemory(u8_t* va, u16_t length);
42851 #define ZM_INIT_CB_FUNC_TABLE(p) zfZeroMemory((u8_t *)p, sizeof(struct zsCbFuncTbl));
42852 diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
42853 index c39a25f..696f5aa 100644
42854 --- a/drivers/staging/panel/panel.c
42855 +++ b/drivers/staging/panel/panel.c
42856 @@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *inode, struct file *file)
42857 return 0;
42858 }
42859
42860 -static struct file_operations lcd_fops = {
42861 +static const struct file_operations lcd_fops = {
42862 .write = lcd_write,
42863 .open = lcd_open,
42864 .release = lcd_release,
42865 @@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *inode, struct file *file)
42866 return 0;
42867 }
42868
42869 -static struct file_operations keypad_fops = {
42870 +static const struct file_operations keypad_fops = {
42871 .read = keypad_read, /* read */
42872 .open = keypad_open, /* open */
42873 .release = keypad_release, /* close */
42874 diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
42875 index 270ebcb..37e46af 100644
42876 --- a/drivers/staging/phison/phison.c
42877 +++ b/drivers/staging/phison/phison.c
42878 @@ -43,7 +43,7 @@ static struct scsi_host_template phison_sht = {
42879 ATA_BMDMA_SHT(DRV_NAME),
42880 };
42881
42882 -static struct ata_port_operations phison_ops = {
42883 +static const struct ata_port_operations phison_ops = {
42884 .inherits = &ata_bmdma_port_ops,
42885 .prereset = phison_pre_reset,
42886 };
42887 diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c
42888 index 2eb8e3d..57616a7 100644
42889 --- a/drivers/staging/poch/poch.c
42890 +++ b/drivers/staging/poch/poch.c
42891 @@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inode, struct file *filp,
42892 return 0;
42893 }
42894
42895 -static struct file_operations poch_fops = {
42896 +static const struct file_operations poch_fops = {
42897 .owner = THIS_MODULE,
42898 .open = poch_open,
42899 .release = poch_release,
42900 diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
42901 index c94de31..19402bc 100644
42902 --- a/drivers/staging/pohmelfs/inode.c
42903 +++ b/drivers/staging/pohmelfs/inode.c
42904 @@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
42905 mutex_init(&psb->mcache_lock);
42906 psb->mcache_root = RB_ROOT;
42907 psb->mcache_timeout = msecs_to_jiffies(5000);
42908 - atomic_long_set(&psb->mcache_gen, 0);
42909 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
42910
42911 psb->trans_max_pages = 100;
42912
42913 @@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
42914 INIT_LIST_HEAD(&psb->crypto_ready_list);
42915 INIT_LIST_HEAD(&psb->crypto_active_list);
42916
42917 - atomic_set(&psb->trans_gen, 1);
42918 + atomic_set_unchecked(&psb->trans_gen, 1);
42919 atomic_long_set(&psb->total_inodes, 0);
42920
42921 mutex_init(&psb->state_lock);
42922 diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
42923 index e22665c..a2a9390 100644
42924 --- a/drivers/staging/pohmelfs/mcache.c
42925 +++ b/drivers/staging/pohmelfs/mcache.c
42926 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
42927 m->data = data;
42928 m->start = start;
42929 m->size = size;
42930 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
42931 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
42932
42933 mutex_lock(&psb->mcache_lock);
42934 err = pohmelfs_mcache_insert(psb, m);
42935 diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
42936 index 623a07d..4035c19 100644
42937 --- a/drivers/staging/pohmelfs/netfs.h
42938 +++ b/drivers/staging/pohmelfs/netfs.h
42939 @@ -570,14 +570,14 @@ struct pohmelfs_config;
42940 struct pohmelfs_sb {
42941 struct rb_root mcache_root;
42942 struct mutex mcache_lock;
42943 - atomic_long_t mcache_gen;
42944 + atomic_long_unchecked_t mcache_gen;
42945 unsigned long mcache_timeout;
42946
42947 unsigned int idx;
42948
42949 unsigned int trans_retries;
42950
42951 - atomic_t trans_gen;
42952 + atomic_unchecked_t trans_gen;
42953
42954 unsigned int crypto_attached_size;
42955 unsigned int crypto_align_size;
42956 diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
42957 index 36a2535..0591bf4 100644
42958 --- a/drivers/staging/pohmelfs/trans.c
42959 +++ b/drivers/staging/pohmelfs/trans.c
42960 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
42961 int err;
42962 struct netfs_cmd *cmd = t->iovec.iov_base;
42963
42964 - t->gen = atomic_inc_return(&psb->trans_gen);
42965 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
42966
42967 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
42968 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
42969 diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
42970 index f890a16..509ece8 100644
42971 --- a/drivers/staging/sep/sep_driver.c
42972 +++ b/drivers/staging/sep/sep_driver.c
42973 @@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver = {
42974 static dev_t sep_devno;
42975
42976 /* the files operations structure of the driver */
42977 -static struct file_operations sep_file_operations = {
42978 +static const struct file_operations sep_file_operations = {
42979 .owner = THIS_MODULE,
42980 .ioctl = sep_ioctl,
42981 .poll = sep_poll,
42982 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
42983 index 5e16bc3..7655b10 100644
42984 --- a/drivers/staging/usbip/usbip_common.h
42985 +++ b/drivers/staging/usbip/usbip_common.h
42986 @@ -374,7 +374,7 @@ struct usbip_device {
42987 void (*shutdown)(struct usbip_device *);
42988 void (*reset)(struct usbip_device *);
42989 void (*unusable)(struct usbip_device *);
42990 - } eh_ops;
42991 + } __no_const eh_ops;
42992 };
42993
42994
42995 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
42996 index 57f7946..d9df23d 100644
42997 --- a/drivers/staging/usbip/vhci.h
42998 +++ b/drivers/staging/usbip/vhci.h
42999 @@ -92,7 +92,7 @@ struct vhci_hcd {
43000 unsigned resuming:1;
43001 unsigned long re_timeout;
43002
43003 - atomic_t seqnum;
43004 + atomic_unchecked_t seqnum;
43005
43006 /*
43007 * NOTE:
43008 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
43009 index 20cd7db..c2693ff 100644
43010 --- a/drivers/staging/usbip/vhci_hcd.c
43011 +++ b/drivers/staging/usbip/vhci_hcd.c
43012 @@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
43013 return;
43014 }
43015
43016 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
43017 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
43018 if (priv->seqnum == 0xffff)
43019 usbip_uinfo("seqnum max\n");
43020
43021 @@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
43022 return -ENOMEM;
43023 }
43024
43025 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
43026 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
43027 if (unlink->seqnum == 0xffff)
43028 usbip_uinfo("seqnum max\n");
43029
43030 @@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hcd)
43031 vdev->rhport = rhport;
43032 }
43033
43034 - atomic_set(&vhci->seqnum, 0);
43035 + atomic_set_unchecked(&vhci->seqnum, 0);
43036 spin_lock_init(&vhci->lock);
43037
43038
43039 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
43040 index 7fd76fe..673695a 100644
43041 --- a/drivers/staging/usbip/vhci_rx.c
43042 +++ b/drivers/staging/usbip/vhci_rx.c
43043 @@ -79,7 +79,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
43044 usbip_uerr("cannot find a urb of seqnum %u\n",
43045 pdu->base.seqnum);
43046 usbip_uinfo("max seqnum %d\n",
43047 - atomic_read(&the_controller->seqnum));
43048 + atomic_read_unchecked(&the_controller->seqnum));
43049 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
43050 return;
43051 }
43052 diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
43053 index 7891288..8e31300 100644
43054 --- a/drivers/staging/vme/devices/vme_user.c
43055 +++ b/drivers/staging/vme/devices/vme_user.c
43056 @@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *, struct file *, unsigned int,
43057 static int __init vme_user_probe(struct device *, int, int);
43058 static int __exit vme_user_remove(struct device *, int, int);
43059
43060 -static struct file_operations vme_user_fops = {
43061 +static const struct file_operations vme_user_fops = {
43062 .open = vme_user_open,
43063 .release = vme_user_release,
43064 .read = vme_user_read,
43065 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
43066 index 58abf44..00c1fc8 100644
43067 --- a/drivers/staging/vt6655/hostap.c
43068 +++ b/drivers/staging/vt6655/hostap.c
43069 @@ -84,7 +84,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
43070 PSDevice apdev_priv;
43071 struct net_device *dev = pDevice->dev;
43072 int ret;
43073 - const struct net_device_ops apdev_netdev_ops = {
43074 + net_device_ops_no_const apdev_netdev_ops = {
43075 .ndo_start_xmit = pDevice->tx_80211,
43076 };
43077
43078 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
43079 index 0c8267a..db1f363 100644
43080 --- a/drivers/staging/vt6656/hostap.c
43081 +++ b/drivers/staging/vt6656/hostap.c
43082 @@ -86,7 +86,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
43083 PSDevice apdev_priv;
43084 struct net_device *dev = pDevice->dev;
43085 int ret;
43086 - const struct net_device_ops apdev_netdev_ops = {
43087 + net_device_ops_no_const apdev_netdev_ops = {
43088 .ndo_start_xmit = pDevice->tx_80211,
43089 };
43090
43091 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
43092 index 925678b..da7f5ed 100644
43093 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
43094 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
43095 @@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
43096
43097 struct usbctlx_completor {
43098 int (*complete) (struct usbctlx_completor *);
43099 -};
43100 +} __no_const;
43101 typedef struct usbctlx_completor usbctlx_completor_t;
43102
43103 static int
43104 diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
43105 index 40de151..924f268 100644
43106 --- a/drivers/telephony/ixj.c
43107 +++ b/drivers/telephony/ixj.c
43108 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
43109 bool mContinue;
43110 char *pIn, *pOut;
43111
43112 + pax_track_stack();
43113 +
43114 if (!SCI_Prepare(j))
43115 return 0;
43116
43117 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
43118 index e941367..b631f5a 100644
43119 --- a/drivers/uio/uio.c
43120 +++ b/drivers/uio/uio.c
43121 @@ -23,6 +23,7 @@
43122 #include <linux/string.h>
43123 #include <linux/kobject.h>
43124 #include <linux/uio_driver.h>
43125 +#include <asm/local.h>
43126
43127 #define UIO_MAX_DEVICES 255
43128
43129 @@ -30,10 +31,10 @@ struct uio_device {
43130 struct module *owner;
43131 struct device *dev;
43132 int minor;
43133 - atomic_t event;
43134 + atomic_unchecked_t event;
43135 struct fasync_struct *async_queue;
43136 wait_queue_head_t wait;
43137 - int vma_count;
43138 + local_t vma_count;
43139 struct uio_info *info;
43140 struct kobject *map_dir;
43141 struct kobject *portio_dir;
43142 @@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr,
43143 return entry->show(mem, buf);
43144 }
43145
43146 -static struct sysfs_ops map_sysfs_ops = {
43147 +static const struct sysfs_ops map_sysfs_ops = {
43148 .show = map_type_show,
43149 };
43150
43151 @@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr,
43152 return entry->show(port, buf);
43153 }
43154
43155 -static struct sysfs_ops portio_sysfs_ops = {
43156 +static const struct sysfs_ops portio_sysfs_ops = {
43157 .show = portio_type_show,
43158 };
43159
43160 @@ -255,7 +256,7 @@ static ssize_t show_event(struct device *dev,
43161 struct uio_device *idev = dev_get_drvdata(dev);
43162 if (idev)
43163 return sprintf(buf, "%u\n",
43164 - (unsigned int)atomic_read(&idev->event));
43165 + (unsigned int)atomic_read_unchecked(&idev->event));
43166 else
43167 return -ENODEV;
43168 }
43169 @@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *info)
43170 {
43171 struct uio_device *idev = info->uio_dev;
43172
43173 - atomic_inc(&idev->event);
43174 + atomic_inc_unchecked(&idev->event);
43175 wake_up_interruptible(&idev->wait);
43176 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
43177 }
43178 @@ -477,7 +478,7 @@ static int uio_open(struct inode *inode, struct file *filep)
43179 }
43180
43181 listener->dev = idev;
43182 - listener->event_count = atomic_read(&idev->event);
43183 + listener->event_count = atomic_read_unchecked(&idev->event);
43184 filep->private_data = listener;
43185
43186 if (idev->info->open) {
43187 @@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
43188 return -EIO;
43189
43190 poll_wait(filep, &idev->wait, wait);
43191 - if (listener->event_count != atomic_read(&idev->event))
43192 + if (listener->event_count != atomic_read_unchecked(&idev->event))
43193 return POLLIN | POLLRDNORM;
43194 return 0;
43195 }
43196 @@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
43197 do {
43198 set_current_state(TASK_INTERRUPTIBLE);
43199
43200 - event_count = atomic_read(&idev->event);
43201 + event_count = atomic_read_unchecked(&idev->event);
43202 if (event_count != listener->event_count) {
43203 if (copy_to_user(buf, &event_count, count))
43204 retval = -EFAULT;
43205 @@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
43206 static void uio_vma_open(struct vm_area_struct *vma)
43207 {
43208 struct uio_device *idev = vma->vm_private_data;
43209 - idev->vma_count++;
43210 + local_inc(&idev->vma_count);
43211 }
43212
43213 static void uio_vma_close(struct vm_area_struct *vma)
43214 {
43215 struct uio_device *idev = vma->vm_private_data;
43216 - idev->vma_count--;
43217 + local_dec(&idev->vma_count);
43218 }
43219
43220 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
43221 @@ -840,7 +841,7 @@ int __uio_register_device(struct module *owner,
43222 idev->owner = owner;
43223 idev->info = info;
43224 init_waitqueue_head(&idev->wait);
43225 - atomic_set(&idev->event, 0);
43226 + atomic_set_unchecked(&idev->event, 0);
43227
43228 ret = uio_get_minor(idev);
43229 if (ret)
43230 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
43231 index fbea856..06efea6 100644
43232 --- a/drivers/usb/atm/usbatm.c
43233 +++ b/drivers/usb/atm/usbatm.c
43234 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
43235 if (printk_ratelimit())
43236 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
43237 __func__, vpi, vci);
43238 - atomic_inc(&vcc->stats->rx_err);
43239 + atomic_inc_unchecked(&vcc->stats->rx_err);
43240 return;
43241 }
43242
43243 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
43244 if (length > ATM_MAX_AAL5_PDU) {
43245 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
43246 __func__, length, vcc);
43247 - atomic_inc(&vcc->stats->rx_err);
43248 + atomic_inc_unchecked(&vcc->stats->rx_err);
43249 goto out;
43250 }
43251
43252 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
43253 if (sarb->len < pdu_length) {
43254 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
43255 __func__, pdu_length, sarb->len, vcc);
43256 - atomic_inc(&vcc->stats->rx_err);
43257 + atomic_inc_unchecked(&vcc->stats->rx_err);
43258 goto out;
43259 }
43260
43261 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
43262 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
43263 __func__, vcc);
43264 - atomic_inc(&vcc->stats->rx_err);
43265 + atomic_inc_unchecked(&vcc->stats->rx_err);
43266 goto out;
43267 }
43268
43269 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
43270 if (printk_ratelimit())
43271 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
43272 __func__, length);
43273 - atomic_inc(&vcc->stats->rx_drop);
43274 + atomic_inc_unchecked(&vcc->stats->rx_drop);
43275 goto out;
43276 }
43277
43278 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
43279
43280 vcc->push(vcc, skb);
43281
43282 - atomic_inc(&vcc->stats->rx);
43283 + atomic_inc_unchecked(&vcc->stats->rx);
43284 out:
43285 skb_trim(sarb, 0);
43286 }
43287 @@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned long data)
43288 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
43289
43290 usbatm_pop(vcc, skb);
43291 - atomic_inc(&vcc->stats->tx);
43292 + atomic_inc_unchecked(&vcc->stats->tx);
43293
43294 skb = skb_dequeue(&instance->sndqueue);
43295 }
43296 @@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
43297 if (!left--)
43298 return sprintf(page,
43299 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
43300 - atomic_read(&atm_dev->stats.aal5.tx),
43301 - atomic_read(&atm_dev->stats.aal5.tx_err),
43302 - atomic_read(&atm_dev->stats.aal5.rx),
43303 - atomic_read(&atm_dev->stats.aal5.rx_err),
43304 - atomic_read(&atm_dev->stats.aal5.rx_drop));
43305 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
43306 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
43307 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
43308 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
43309 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
43310
43311 if (!left--) {
43312 if (instance->disconnected)
43313 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
43314 index 24e6205..fe5a5d4 100644
43315 --- a/drivers/usb/core/hcd.c
43316 +++ b/drivers/usb/core/hcd.c
43317 @@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown);
43318
43319 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
43320
43321 -struct usb_mon_operations *mon_ops;
43322 +const struct usb_mon_operations *mon_ops;
43323
43324 /*
43325 * The registration is unlocked.
43326 @@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
43327 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
43328 */
43329
43330 -int usb_mon_register (struct usb_mon_operations *ops)
43331 +int usb_mon_register (const struct usb_mon_operations *ops)
43332 {
43333
43334 if (mon_ops)
43335 diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
43336 index bcbe104..9cfd1c6 100644
43337 --- a/drivers/usb/core/hcd.h
43338 +++ b/drivers/usb/core/hcd.h
43339 @@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) { }
43340 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
43341
43342 struct usb_mon_operations {
43343 - void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
43344 - void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
43345 - void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
43346 + void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
43347 + void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
43348 + void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
43349 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
43350 };
43351
43352 -extern struct usb_mon_operations *mon_ops;
43353 +extern const struct usb_mon_operations *mon_ops;
43354
43355 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
43356 {
43357 @@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
43358 (*mon_ops->urb_complete)(bus, urb, status);
43359 }
43360
43361 -int usb_mon_register(struct usb_mon_operations *ops);
43362 +int usb_mon_register(const struct usb_mon_operations *ops);
43363 void usb_mon_deregister(void);
43364
43365 #else
43366 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
43367 index 409cc94..a673bad 100644
43368 --- a/drivers/usb/core/message.c
43369 +++ b/drivers/usb/core/message.c
43370 @@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
43371 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
43372 if (buf) {
43373 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
43374 - if (len > 0) {
43375 - smallbuf = kmalloc(++len, GFP_NOIO);
43376 + if (len++ > 0) {
43377 + smallbuf = kmalloc(len, GFP_NOIO);
43378 if (!smallbuf)
43379 return buf;
43380 memcpy(smallbuf, buf, len);
43381 diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
43382 index 62ff5e7..530b74e 100644
43383 --- a/drivers/usb/misc/appledisplay.c
43384 +++ b/drivers/usb/misc/appledisplay.c
43385 @@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
43386 return pdata->msgdata[1];
43387 }
43388
43389 -static struct backlight_ops appledisplay_bl_data = {
43390 +static const struct backlight_ops appledisplay_bl_data = {
43391 .get_brightness = appledisplay_bl_get_brightness,
43392 .update_status = appledisplay_bl_update_status,
43393 };
43394 diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
43395 index e0c2db3..bd8cb66 100644
43396 --- a/drivers/usb/mon/mon_main.c
43397 +++ b/drivers/usb/mon/mon_main.c
43398 @@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
43399 /*
43400 * Ops
43401 */
43402 -static struct usb_mon_operations mon_ops_0 = {
43403 +static const struct usb_mon_operations mon_ops_0 = {
43404 .urb_submit = mon_submit,
43405 .urb_submit_error = mon_submit_error,
43406 .urb_complete = mon_complete,
43407 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
43408 index d6bea3e..60b250e 100644
43409 --- a/drivers/usb/wusbcore/wa-hc.h
43410 +++ b/drivers/usb/wusbcore/wa-hc.h
43411 @@ -192,7 +192,7 @@ struct wahc {
43412 struct list_head xfer_delayed_list;
43413 spinlock_t xfer_list_lock;
43414 struct work_struct xfer_work;
43415 - atomic_t xfer_id_count;
43416 + atomic_unchecked_t xfer_id_count;
43417 };
43418
43419
43420 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
43421 INIT_LIST_HEAD(&wa->xfer_delayed_list);
43422 spin_lock_init(&wa->xfer_list_lock);
43423 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
43424 - atomic_set(&wa->xfer_id_count, 1);
43425 + atomic_set_unchecked(&wa->xfer_id_count, 1);
43426 }
43427
43428 /**
43429 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
43430 index 613a5fc..3174865 100644
43431 --- a/drivers/usb/wusbcore/wa-xfer.c
43432 +++ b/drivers/usb/wusbcore/wa-xfer.c
43433 @@ -293,7 +293,7 @@ out:
43434 */
43435 static void wa_xfer_id_init(struct wa_xfer *xfer)
43436 {
43437 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
43438 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
43439 }
43440
43441 /*
43442 diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
43443 index aa42fce..f8a828c 100644
43444 --- a/drivers/uwb/wlp/messages.c
43445 +++ b/drivers/uwb/wlp/messages.c
43446 @@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb)
43447 size_t len = skb->len;
43448 size_t used;
43449 ssize_t result;
43450 - struct wlp_nonce enonce, rnonce;
43451 + struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
43452 enum wlp_assc_error assc_err;
43453 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
43454 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
43455 diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c
43456 index 0370399..6627c94 100644
43457 --- a/drivers/uwb/wlp/sysfs.c
43458 +++ b/drivers/uwb/wlp/sysfs.c
43459 @@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr,
43460 return ret;
43461 }
43462
43463 -static
43464 -struct sysfs_ops wss_sysfs_ops = {
43465 +static const struct sysfs_ops wss_sysfs_ops = {
43466 .show = wlp_wss_attr_show,
43467 .store = wlp_wss_attr_store,
43468 };
43469 diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
43470 index 8c5e432..5ee90ea 100644
43471 --- a/drivers/video/atmel_lcdfb.c
43472 +++ b/drivers/video/atmel_lcdfb.c
43473 @@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struct backlight_device *bl)
43474 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
43475 }
43476
43477 -static struct backlight_ops atmel_lcdc_bl_ops = {
43478 +static const struct backlight_ops atmel_lcdc_bl_ops = {
43479 .update_status = atmel_bl_update_status,
43480 .get_brightness = atmel_bl_get_brightness,
43481 };
43482 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
43483 index e4e4d43..66bcbcc 100644
43484 --- a/drivers/video/aty/aty128fb.c
43485 +++ b/drivers/video/aty/aty128fb.c
43486 @@ -149,7 +149,7 @@ enum {
43487 };
43488
43489 /* Must match above enum */
43490 -static const char *r128_family[] __devinitdata = {
43491 +static const char *r128_family[] __devinitconst = {
43492 "AGP",
43493 "PCI",
43494 "PRO AGP",
43495 @@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(struct backlight_device *bd)
43496 return bd->props.brightness;
43497 }
43498
43499 -static struct backlight_ops aty128_bl_data = {
43500 +static const struct backlight_ops aty128_bl_data = {
43501 .get_brightness = aty128_bl_get_brightness,
43502 .update_status = aty128_bl_update_status,
43503 };
43504 diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
43505 index 913b4a4..9295a38 100644
43506 --- a/drivers/video/aty/atyfb_base.c
43507 +++ b/drivers/video/aty/atyfb_base.c
43508 @@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct backlight_device *bd)
43509 return bd->props.brightness;
43510 }
43511
43512 -static struct backlight_ops aty_bl_data = {
43513 +static const struct backlight_ops aty_bl_data = {
43514 .get_brightness = aty_bl_get_brightness,
43515 .update_status = aty_bl_update_status,
43516 };
43517 diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
43518 index 1a056ad..221bd6a 100644
43519 --- a/drivers/video/aty/radeon_backlight.c
43520 +++ b/drivers/video/aty/radeon_backlight.c
43521 @@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(struct backlight_device *bd)
43522 return bd->props.brightness;
43523 }
43524
43525 -static struct backlight_ops radeon_bl_data = {
43526 +static const struct backlight_ops radeon_bl_data = {
43527 .get_brightness = radeon_bl_get_brightness,
43528 .update_status = radeon_bl_update_status,
43529 };
43530 diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
43531 index ad05da5..3cb2cb9 100644
43532 --- a/drivers/video/backlight/adp5520_bl.c
43533 +++ b/drivers/video/backlight/adp5520_bl.c
43534 @@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(struct backlight_device *bl)
43535 return error ? data->current_brightness : reg_val;
43536 }
43537
43538 -static struct backlight_ops adp5520_bl_ops = {
43539 +static const struct backlight_ops adp5520_bl_ops = {
43540 .update_status = adp5520_bl_update_status,
43541 .get_brightness = adp5520_bl_get_brightness,
43542 };
43543 diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
43544 index 2c3bdfc..d769b0b 100644
43545 --- a/drivers/video/backlight/adx_bl.c
43546 +++ b/drivers/video/backlight/adx_bl.c
43547 @@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct fb_info *fb)
43548 return 1;
43549 }
43550
43551 -static struct backlight_ops adx_backlight_ops = {
43552 +static const struct backlight_ops adx_backlight_ops = {
43553 .options = 0,
43554 .update_status = adx_backlight_update_status,
43555 .get_brightness = adx_backlight_get_brightness,
43556 diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
43557 index 505c082..6b6b3cc 100644
43558 --- a/drivers/video/backlight/atmel-pwm-bl.c
43559 +++ b/drivers/video/backlight/atmel-pwm-bl.c
43560 @@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
43561 return pwm_channel_enable(&pwmbl->pwmc);
43562 }
43563
43564 -static struct backlight_ops atmel_pwm_bl_ops = {
43565 +static const struct backlight_ops atmel_pwm_bl_ops = {
43566 .get_brightness = atmel_pwm_bl_get_intensity,
43567 .update_status = atmel_pwm_bl_set_intensity,
43568 };
43569 diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
43570 index 5e20e6e..89025e6 100644
43571 --- a/drivers/video/backlight/backlight.c
43572 +++ b/drivers/video/backlight/backlight.c
43573 @@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
43574 * ERR_PTR() or a pointer to the newly allocated device.
43575 */
43576 struct backlight_device *backlight_device_register(const char *name,
43577 - struct device *parent, void *devdata, struct backlight_ops *ops)
43578 + struct device *parent, void *devdata, const struct backlight_ops *ops)
43579 {
43580 struct backlight_device *new_bd;
43581 int rc;
43582 diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
43583 index 9677494..b4bcf80 100644
43584 --- a/drivers/video/backlight/corgi_lcd.c
43585 +++ b/drivers/video/backlight/corgi_lcd.c
43586 @@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit)
43587 }
43588 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
43589
43590 -static struct backlight_ops corgi_bl_ops = {
43591 +static const struct backlight_ops corgi_bl_ops = {
43592 .get_brightness = corgi_bl_get_intensity,
43593 .update_status = corgi_bl_update_status,
43594 };
43595 diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
43596 index b9fe62b..2914bf1 100644
43597 --- a/drivers/video/backlight/cr_bllcd.c
43598 +++ b/drivers/video/backlight/cr_bllcd.c
43599 @@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(struct backlight_device *bd)
43600 return intensity;
43601 }
43602
43603 -static struct backlight_ops cr_backlight_ops = {
43604 +static const struct backlight_ops cr_backlight_ops = {
43605 .get_brightness = cr_backlight_get_intensity,
43606 .update_status = cr_backlight_set_intensity,
43607 };
43608 diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
43609 index 701a108..feacfd5 100644
43610 --- a/drivers/video/backlight/da903x_bl.c
43611 +++ b/drivers/video/backlight/da903x_bl.c
43612 @@ -94,7 +94,7 @@ static int da903x_backlight_get_brightness(struct backlight_device *bl)
43613 return data->current_brightness;
43614 }
43615
43616 -static struct backlight_ops da903x_backlight_ops = {
43617 +static const struct backlight_ops da903x_backlight_ops = {
43618 .update_status = da903x_backlight_update_status,
43619 .get_brightness = da903x_backlight_get_brightness,
43620 };
43621 diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
43622 index 6d27f62..e6d348e 100644
43623 --- a/drivers/video/backlight/generic_bl.c
43624 +++ b/drivers/video/backlight/generic_bl.c
43625 @@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
43626 }
43627 EXPORT_SYMBOL(corgibl_limit_intensity);
43628
43629 -static struct backlight_ops genericbl_ops = {
43630 +static const struct backlight_ops genericbl_ops = {
43631 .options = BL_CORE_SUSPENDRESUME,
43632 .get_brightness = genericbl_get_intensity,
43633 .update_status = genericbl_send_intensity,
43634 diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
43635 index 7fb4eef..f7cc528 100644
43636 --- a/drivers/video/backlight/hp680_bl.c
43637 +++ b/drivers/video/backlight/hp680_bl.c
43638 @@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct backlight_device *bd)
43639 return current_intensity;
43640 }
43641
43642 -static struct backlight_ops hp680bl_ops = {
43643 +static const struct backlight_ops hp680bl_ops = {
43644 .get_brightness = hp680bl_get_intensity,
43645 .update_status = hp680bl_set_intensity,
43646 };
43647 diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
43648 index 7aed256..db9071f 100644
43649 --- a/drivers/video/backlight/jornada720_bl.c
43650 +++ b/drivers/video/backlight/jornada720_bl.c
43651 @@ -93,7 +93,7 @@ out:
43652 return ret;
43653 }
43654
43655 -static struct backlight_ops jornada_bl_ops = {
43656 +static const struct backlight_ops jornada_bl_ops = {
43657 .get_brightness = jornada_bl_get_brightness,
43658 .update_status = jornada_bl_update_status,
43659 .options = BL_CORE_SUSPENDRESUME,
43660 diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
43661 index a38fda1..939e7b8 100644
43662 --- a/drivers/video/backlight/kb3886_bl.c
43663 +++ b/drivers/video/backlight/kb3886_bl.c
43664 @@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct backlight_device *bd)
43665 return kb3886bl_intensity;
43666 }
43667
43668 -static struct backlight_ops kb3886bl_ops = {
43669 +static const struct backlight_ops kb3886bl_ops = {
43670 .get_brightness = kb3886bl_get_intensity,
43671 .update_status = kb3886bl_send_intensity,
43672 };
43673 diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
43674 index 6b488b8..00a9591 100644
43675 --- a/drivers/video/backlight/locomolcd.c
43676 +++ b/drivers/video/backlight/locomolcd.c
43677 @@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struct backlight_device *bd)
43678 return current_intensity;
43679 }
43680
43681 -static struct backlight_ops locomobl_data = {
43682 +static const struct backlight_ops locomobl_data = {
43683 .get_brightness = locomolcd_get_intensity,
43684 .update_status = locomolcd_set_intensity,
43685 };
43686 diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
43687 index 99bdfa8..3dac448 100644
43688 --- a/drivers/video/backlight/mbp_nvidia_bl.c
43689 +++ b/drivers/video/backlight/mbp_nvidia_bl.c
43690 @@ -33,7 +33,7 @@ struct dmi_match_data {
43691 unsigned long iostart;
43692 unsigned long iolen;
43693 /* Backlight operations structure. */
43694 - struct backlight_ops backlight_ops;
43695 + const struct backlight_ops backlight_ops;
43696 };
43697
43698 /* Module parameters. */
43699 diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
43700 index cbad67e..3cf900e 100644
43701 --- a/drivers/video/backlight/omap1_bl.c
43702 +++ b/drivers/video/backlight/omap1_bl.c
43703 @@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct backlight_device *dev)
43704 return bl->current_intensity;
43705 }
43706
43707 -static struct backlight_ops omapbl_ops = {
43708 +static const struct backlight_ops omapbl_ops = {
43709 .get_brightness = omapbl_get_intensity,
43710 .update_status = omapbl_update_status,
43711 };
43712 diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
43713 index 9edaf24..075786e 100644
43714 --- a/drivers/video/backlight/progear_bl.c
43715 +++ b/drivers/video/backlight/progear_bl.c
43716 @@ -54,7 +54,7 @@ static int progearbl_get_intensity(struct backlight_device *bd)
43717 return intensity - HW_LEVEL_MIN;
43718 }
43719
43720 -static struct backlight_ops progearbl_ops = {
43721 +static const struct backlight_ops progearbl_ops = {
43722 .get_brightness = progearbl_get_intensity,
43723 .update_status = progearbl_set_intensity,
43724 };
43725 diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
43726 index 8871662..df9e0b3 100644
43727 --- a/drivers/video/backlight/pwm_bl.c
43728 +++ b/drivers/video/backlight/pwm_bl.c
43729 @@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
43730 return bl->props.brightness;
43731 }
43732
43733 -static struct backlight_ops pwm_backlight_ops = {
43734 +static const struct backlight_ops pwm_backlight_ops = {
43735 .update_status = pwm_backlight_update_status,
43736 .get_brightness = pwm_backlight_get_brightness,
43737 };
43738 diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
43739 index 43edbad..e14ce4d 100644
43740 --- a/drivers/video/backlight/tosa_bl.c
43741 +++ b/drivers/video/backlight/tosa_bl.c
43742 @@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct backlight_device *dev)
43743 return props->brightness;
43744 }
43745
43746 -static struct backlight_ops bl_ops = {
43747 +static const struct backlight_ops bl_ops = {
43748 .get_brightness = tosa_bl_get_brightness,
43749 .update_status = tosa_bl_update_status,
43750 };
43751 diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
43752 index 467bdb7..e32add3 100644
43753 --- a/drivers/video/backlight/wm831x_bl.c
43754 +++ b/drivers/video/backlight/wm831x_bl.c
43755 @@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightness(struct backlight_device *bl)
43756 return data->current_brightness;
43757 }
43758
43759 -static struct backlight_ops wm831x_backlight_ops = {
43760 +static const struct backlight_ops wm831x_backlight_ops = {
43761 .options = BL_CORE_SUSPENDRESUME,
43762 .update_status = wm831x_backlight_update_status,
43763 .get_brightness = wm831x_backlight_get_brightness,
43764 diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
43765 index e49ae5e..db4e6f7 100644
43766 --- a/drivers/video/bf54x-lq043fb.c
43767 +++ b/drivers/video/bf54x-lq043fb.c
43768 @@ -463,7 +463,7 @@ static int bl_get_brightness(struct backlight_device *bd)
43769 return 0;
43770 }
43771
43772 -static struct backlight_ops bfin_lq043fb_bl_ops = {
43773 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
43774 .get_brightness = bl_get_brightness,
43775 };
43776
43777 diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
43778 index 2c72a7c..d523e52 100644
43779 --- a/drivers/video/bfin-t350mcqb-fb.c
43780 +++ b/drivers/video/bfin-t350mcqb-fb.c
43781 @@ -381,7 +381,7 @@ static int bl_get_brightness(struct backlight_device *bd)
43782 return 0;
43783 }
43784
43785 -static struct backlight_ops bfin_lq043fb_bl_ops = {
43786 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
43787 .get_brightness = bl_get_brightness,
43788 };
43789
43790 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
43791 index f53b9f1..958bf4e 100644
43792 --- a/drivers/video/fbcmap.c
43793 +++ b/drivers/video/fbcmap.c
43794 @@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
43795 rc = -ENODEV;
43796 goto out;
43797 }
43798 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
43799 - !info->fbops->fb_setcmap)) {
43800 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
43801 rc = -EINVAL;
43802 goto out1;
43803 }
43804 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
43805 index 99bbd28..ad3829e 100644
43806 --- a/drivers/video/fbmem.c
43807 +++ b/drivers/video/fbmem.c
43808 @@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
43809 image->dx += image->width + 8;
43810 }
43811 } else if (rotate == FB_ROTATE_UD) {
43812 - for (x = 0; x < num && image->dx >= 0; x++) {
43813 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
43814 info->fbops->fb_imageblit(info, image);
43815 image->dx -= image->width + 8;
43816 }
43817 @@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
43818 image->dy += image->height + 8;
43819 }
43820 } else if (rotate == FB_ROTATE_CCW) {
43821 - for (x = 0; x < num && image->dy >= 0; x++) {
43822 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
43823 info->fbops->fb_imageblit(info, image);
43824 image->dy -= image->height + 8;
43825 }
43826 @@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
43827 int flags = info->flags;
43828 int ret = 0;
43829
43830 + pax_track_stack();
43831 +
43832 if (var->activate & FB_ACTIVATE_INV_MODE) {
43833 struct fb_videomode mode1, mode2;
43834
43835 @@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
43836 void __user *argp = (void __user *)arg;
43837 long ret = 0;
43838
43839 + pax_track_stack();
43840 +
43841 switch (cmd) {
43842 case FBIOGET_VSCREENINFO:
43843 if (!lock_fb_info(info))
43844 @@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
43845 return -EFAULT;
43846 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
43847 return -EINVAL;
43848 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
43849 + if (con2fb.framebuffer >= FB_MAX)
43850 return -EINVAL;
43851 if (!registered_fb[con2fb.framebuffer])
43852 request_module("fb%d", con2fb.framebuffer);
43853 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
43854 index f20eff8..3e4f622 100644
43855 --- a/drivers/video/geode/gx1fb_core.c
43856 +++ b/drivers/video/geode/gx1fb_core.c
43857 @@ -30,7 +30,7 @@ static int crt_option = 1;
43858 static char panel_option[32] = "";
43859
43860 /* Modes relevant to the GX1 (taken from modedb.c) */
43861 -static const struct fb_videomode __initdata gx1_modedb[] = {
43862 +static const struct fb_videomode __initconst gx1_modedb[] = {
43863 /* 640x480-60 VESA */
43864 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
43865 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
43866 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
43867 index 896e53d..4d87d0b 100644
43868 --- a/drivers/video/gxt4500.c
43869 +++ b/drivers/video/gxt4500.c
43870 @@ -156,7 +156,7 @@ struct gxt4500_par {
43871 static char *mode_option;
43872
43873 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
43874 -static const struct fb_videomode defaultmode __devinitdata = {
43875 +static const struct fb_videomode defaultmode __devinitconst = {
43876 .refresh = 60,
43877 .xres = 1280,
43878 .yres = 1024,
43879 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
43880 return 0;
43881 }
43882
43883 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
43884 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
43885 .id = "IBM GXT4500P",
43886 .type = FB_TYPE_PACKED_PIXELS,
43887 .visual = FB_VISUAL_PSEUDOCOLOR,
43888 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
43889 index f5bedee..28c6028 100644
43890 --- a/drivers/video/i810/i810_accel.c
43891 +++ b/drivers/video/i810/i810_accel.c
43892 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
43893 }
43894 }
43895 printk("ringbuffer lockup!!!\n");
43896 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
43897 i810_report_error(mmio);
43898 par->dev_flags |= LOCKUP;
43899 info->pixmap.scan_align = 1;
43900 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
43901 index 5743ea2..457f82c 100644
43902 --- a/drivers/video/i810/i810_main.c
43903 +++ b/drivers/video/i810/i810_main.c
43904 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
43905 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
43906
43907 /* PCI */
43908 -static const char *i810_pci_list[] __devinitdata = {
43909 +static const char *i810_pci_list[] __devinitconst = {
43910 "Intel(R) 810 Framebuffer Device" ,
43911 "Intel(R) 810-DC100 Framebuffer Device" ,
43912 "Intel(R) 810E Framebuffer Device" ,
43913 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
43914 index 3c14e43..eafa544 100644
43915 --- a/drivers/video/logo/logo_linux_clut224.ppm
43916 +++ b/drivers/video/logo/logo_linux_clut224.ppm
43917 @@ -1,1604 +1,1123 @@
43918 P3
43919 -# Standard 224-color Linux logo
43920 80 80
43921 255
43922 - 0 0 0 0 0 0 0 0 0 0 0 0
43923 - 0 0 0 0 0 0 0 0 0 0 0 0
43924 - 0 0 0 0 0 0 0 0 0 0 0 0
43925 - 0 0 0 0 0 0 0 0 0 0 0 0
43926 - 0 0 0 0 0 0 0 0 0 0 0 0
43927 - 0 0 0 0 0 0 0 0 0 0 0 0
43928 - 0 0 0 0 0 0 0 0 0 0 0 0
43929 - 0 0 0 0 0 0 0 0 0 0 0 0
43930 - 0 0 0 0 0 0 0 0 0 0 0 0
43931 - 6 6 6 6 6 6 10 10 10 10 10 10
43932 - 10 10 10 6 6 6 6 6 6 6 6 6
43933 - 0 0 0 0 0 0 0 0 0 0 0 0
43934 - 0 0 0 0 0 0 0 0 0 0 0 0
43935 - 0 0 0 0 0 0 0 0 0 0 0 0
43936 - 0 0 0 0 0 0 0 0 0 0 0 0
43937 - 0 0 0 0 0 0 0 0 0 0 0 0
43938 - 0 0 0 0 0 0 0 0 0 0 0 0
43939 - 0 0 0 0 0 0 0 0 0 0 0 0
43940 - 0 0 0 0 0 0 0 0 0 0 0 0
43941 - 0 0 0 0 0 0 0 0 0 0 0 0
43942 - 0 0 0 0 0 0 0 0 0 0 0 0
43943 - 0 0 0 0 0 0 0 0 0 0 0 0
43944 - 0 0 0 0 0 0 0 0 0 0 0 0
43945 - 0 0 0 0 0 0 0 0 0 0 0 0
43946 - 0 0 0 0 0 0 0 0 0 0 0 0
43947 - 0 0 0 0 0 0 0 0 0 0 0 0
43948 - 0 0 0 0 0 0 0 0 0 0 0 0
43949 - 0 0 0 0 0 0 0 0 0 0 0 0
43950 - 0 0 0 6 6 6 10 10 10 14 14 14
43951 - 22 22 22 26 26 26 30 30 30 34 34 34
43952 - 30 30 30 30 30 30 26 26 26 18 18 18
43953 - 14 14 14 10 10 10 6 6 6 0 0 0
43954 - 0 0 0 0 0 0 0 0 0 0 0 0
43955 - 0 0 0 0 0 0 0 0 0 0 0 0
43956 - 0 0 0 0 0 0 0 0 0 0 0 0
43957 - 0 0 0 0 0 0 0 0 0 0 0 0
43958 - 0 0 0 0 0 0 0 0 0 0 0 0
43959 - 0 0 0 0 0 0 0 0 0 0 0 0
43960 - 0 0 0 0 0 0 0 0 0 0 0 0
43961 - 0 0 0 0 0 0 0 0 0 0 0 0
43962 - 0 0 0 0 0 0 0 0 0 0 0 0
43963 - 0 0 0 0 0 1 0 0 1 0 0 0
43964 - 0 0 0 0 0 0 0 0 0 0 0 0
43965 - 0 0 0 0 0 0 0 0 0 0 0 0
43966 - 0 0 0 0 0 0 0 0 0 0 0 0
43967 - 0 0 0 0 0 0 0 0 0 0 0 0
43968 - 0 0 0 0 0 0 0 0 0 0 0 0
43969 - 0 0 0 0 0 0 0 0 0 0 0 0
43970 - 6 6 6 14 14 14 26 26 26 42 42 42
43971 - 54 54 54 66 66 66 78 78 78 78 78 78
43972 - 78 78 78 74 74 74 66 66 66 54 54 54
43973 - 42 42 42 26 26 26 18 18 18 10 10 10
43974 - 6 6 6 0 0 0 0 0 0 0 0 0
43975 - 0 0 0 0 0 0 0 0 0 0 0 0
43976 - 0 0 0 0 0 0 0 0 0 0 0 0
43977 - 0 0 0 0 0 0 0 0 0 0 0 0
43978 - 0 0 0 0 0 0 0 0 0 0 0 0
43979 - 0 0 0 0 0 0 0 0 0 0 0 0
43980 - 0 0 0 0 0 0 0 0 0 0 0 0
43981 - 0 0 0 0 0 0 0 0 0 0 0 0
43982 - 0 0 0 0 0 0 0 0 0 0 0 0
43983 - 0 0 1 0 0 0 0 0 0 0 0 0
43984 - 0 0 0 0 0 0 0 0 0 0 0 0
43985 - 0 0 0 0 0 0 0 0 0 0 0 0
43986 - 0 0 0 0 0 0 0 0 0 0 0 0
43987 - 0 0 0 0 0 0 0 0 0 0 0 0
43988 - 0 0 0 0 0 0 0 0 0 0 0 0
43989 - 0 0 0 0 0 0 0 0 0 10 10 10
43990 - 22 22 22 42 42 42 66 66 66 86 86 86
43991 - 66 66 66 38 38 38 38 38 38 22 22 22
43992 - 26 26 26 34 34 34 54 54 54 66 66 66
43993 - 86 86 86 70 70 70 46 46 46 26 26 26
43994 - 14 14 14 6 6 6 0 0 0 0 0 0
43995 - 0 0 0 0 0 0 0 0 0 0 0 0
43996 - 0 0 0 0 0 0 0 0 0 0 0 0
43997 - 0 0 0 0 0 0 0 0 0 0 0 0
43998 - 0 0 0 0 0 0 0 0 0 0 0 0
43999 - 0 0 0 0 0 0 0 0 0 0 0 0
44000 - 0 0 0 0 0 0 0 0 0 0 0 0
44001 - 0 0 0 0 0 0 0 0 0 0 0 0
44002 - 0 0 0 0 0 0 0 0 0 0 0 0
44003 - 0 0 1 0 0 1 0 0 1 0 0 0
44004 - 0 0 0 0 0 0 0 0 0 0 0 0
44005 - 0 0 0 0 0 0 0 0 0 0 0 0
44006 - 0 0 0 0 0 0 0 0 0 0 0 0
44007 - 0 0 0 0 0 0 0 0 0 0 0 0
44008 - 0 0 0 0 0 0 0 0 0 0 0 0
44009 - 0 0 0 0 0 0 10 10 10 26 26 26
44010 - 50 50 50 82 82 82 58 58 58 6 6 6
44011 - 2 2 6 2 2 6 2 2 6 2 2 6
44012 - 2 2 6 2 2 6 2 2 6 2 2 6
44013 - 6 6 6 54 54 54 86 86 86 66 66 66
44014 - 38 38 38 18 18 18 6 6 6 0 0 0
44015 - 0 0 0 0 0 0 0 0 0 0 0 0
44016 - 0 0 0 0 0 0 0 0 0 0 0 0
44017 - 0 0 0 0 0 0 0 0 0 0 0 0
44018 - 0 0 0 0 0 0 0 0 0 0 0 0
44019 - 0 0 0 0 0 0 0 0 0 0 0 0
44020 - 0 0 0 0 0 0 0 0 0 0 0 0
44021 - 0 0 0 0 0 0 0 0 0 0 0 0
44022 - 0 0 0 0 0 0 0 0 0 0 0 0
44023 - 0 0 0 0 0 0 0 0 0 0 0 0
44024 - 0 0 0 0 0 0 0 0 0 0 0 0
44025 - 0 0 0 0 0 0 0 0 0 0 0 0
44026 - 0 0 0 0 0 0 0 0 0 0 0 0
44027 - 0 0 0 0 0 0 0 0 0 0 0 0
44028 - 0 0 0 0 0 0 0 0 0 0 0 0
44029 - 0 0 0 6 6 6 22 22 22 50 50 50
44030 - 78 78 78 34 34 34 2 2 6 2 2 6
44031 - 2 2 6 2 2 6 2 2 6 2 2 6
44032 - 2 2 6 2 2 6 2 2 6 2 2 6
44033 - 2 2 6 2 2 6 6 6 6 70 70 70
44034 - 78 78 78 46 46 46 22 22 22 6 6 6
44035 - 0 0 0 0 0 0 0 0 0 0 0 0
44036 - 0 0 0 0 0 0 0 0 0 0 0 0
44037 - 0 0 0 0 0 0 0 0 0 0 0 0
44038 - 0 0 0 0 0 0 0 0 0 0 0 0
44039 - 0 0 0 0 0 0 0 0 0 0 0 0
44040 - 0 0 0 0 0 0 0 0 0 0 0 0
44041 - 0 0 0 0 0 0 0 0 0 0 0 0
44042 - 0 0 0 0 0 0 0 0 0 0 0 0
44043 - 0 0 1 0 0 1 0 0 1 0 0 0
44044 - 0 0 0 0 0 0 0 0 0 0 0 0
44045 - 0 0 0 0 0 0 0 0 0 0 0 0
44046 - 0 0 0 0 0 0 0 0 0 0 0 0
44047 - 0 0 0 0 0 0 0 0 0 0 0 0
44048 - 0 0 0 0 0 0 0 0 0 0 0 0
44049 - 6 6 6 18 18 18 42 42 42 82 82 82
44050 - 26 26 26 2 2 6 2 2 6 2 2 6
44051 - 2 2 6 2 2 6 2 2 6 2 2 6
44052 - 2 2 6 2 2 6 2 2 6 14 14 14
44053 - 46 46 46 34 34 34 6 6 6 2 2 6
44054 - 42 42 42 78 78 78 42 42 42 18 18 18
44055 - 6 6 6 0 0 0 0 0 0 0 0 0
44056 - 0 0 0 0 0 0 0 0 0 0 0 0
44057 - 0 0 0 0 0 0 0 0 0 0 0 0
44058 - 0 0 0 0 0 0 0 0 0 0 0 0
44059 - 0 0 0 0 0 0 0 0 0 0 0 0
44060 - 0 0 0 0 0 0 0 0 0 0 0 0
44061 - 0 0 0 0 0 0 0 0 0 0 0 0
44062 - 0 0 0 0 0 0 0 0 0 0 0 0
44063 - 0 0 1 0 0 0 0 0 1 0 0 0
44064 - 0 0 0 0 0 0 0 0 0 0 0 0
44065 - 0 0 0 0 0 0 0 0 0 0 0 0
44066 - 0 0 0 0 0 0 0 0 0 0 0 0
44067 - 0 0 0 0 0 0 0 0 0 0 0 0
44068 - 0 0 0 0 0 0 0 0 0 0 0 0
44069 - 10 10 10 30 30 30 66 66 66 58 58 58
44070 - 2 2 6 2 2 6 2 2 6 2 2 6
44071 - 2 2 6 2 2 6 2 2 6 2 2 6
44072 - 2 2 6 2 2 6 2 2 6 26 26 26
44073 - 86 86 86 101 101 101 46 46 46 10 10 10
44074 - 2 2 6 58 58 58 70 70 70 34 34 34
44075 - 10 10 10 0 0 0 0 0 0 0 0 0
44076 - 0 0 0 0 0 0 0 0 0 0 0 0
44077 - 0 0 0 0 0 0 0 0 0 0 0 0
44078 - 0 0 0 0 0 0 0 0 0 0 0 0
44079 - 0 0 0 0 0 0 0 0 0 0 0 0
44080 - 0 0 0 0 0 0 0 0 0 0 0 0
44081 - 0 0 0 0 0 0 0 0 0 0 0 0
44082 - 0 0 0 0 0 0 0 0 0 0 0 0
44083 - 0 0 1 0 0 1 0 0 1 0 0 0
44084 - 0 0 0 0 0 0 0 0 0 0 0 0
44085 - 0 0 0 0 0 0 0 0 0 0 0 0
44086 - 0 0 0 0 0 0 0 0 0 0 0 0
44087 - 0 0 0 0 0 0 0 0 0 0 0 0
44088 - 0 0 0 0 0 0 0 0 0 0 0 0
44089 - 14 14 14 42 42 42 86 86 86 10 10 10
44090 - 2 2 6 2 2 6 2 2 6 2 2 6
44091 - 2 2 6 2 2 6 2 2 6 2 2 6
44092 - 2 2 6 2 2 6 2 2 6 30 30 30
44093 - 94 94 94 94 94 94 58 58 58 26 26 26
44094 - 2 2 6 6 6 6 78 78 78 54 54 54
44095 - 22 22 22 6 6 6 0 0 0 0 0 0
44096 - 0 0 0 0 0 0 0 0 0 0 0 0
44097 - 0 0 0 0 0 0 0 0 0 0 0 0
44098 - 0 0 0 0 0 0 0 0 0 0 0 0
44099 - 0 0 0 0 0 0 0 0 0 0 0 0
44100 - 0 0 0 0 0 0 0 0 0 0 0 0
44101 - 0 0 0 0 0 0 0 0 0 0 0 0
44102 - 0 0 0 0 0 0 0 0 0 0 0 0
44103 - 0 0 0 0 0 0 0 0 0 0 0 0
44104 - 0 0 0 0 0 0 0 0 0 0 0 0
44105 - 0 0 0 0 0 0 0 0 0 0 0 0
44106 - 0 0 0 0 0 0 0 0 0 0 0 0
44107 - 0 0 0 0 0 0 0 0 0 0 0 0
44108 - 0 0 0 0 0 0 0 0 0 6 6 6
44109 - 22 22 22 62 62 62 62 62 62 2 2 6
44110 - 2 2 6 2 2 6 2 2 6 2 2 6
44111 - 2 2 6 2 2 6 2 2 6 2 2 6
44112 - 2 2 6 2 2 6 2 2 6 26 26 26
44113 - 54 54 54 38 38 38 18 18 18 10 10 10
44114 - 2 2 6 2 2 6 34 34 34 82 82 82
44115 - 38 38 38 14 14 14 0 0 0 0 0 0
44116 - 0 0 0 0 0 0 0 0 0 0 0 0
44117 - 0 0 0 0 0 0 0 0 0 0 0 0
44118 - 0 0 0 0 0 0 0 0 0 0 0 0
44119 - 0 0 0 0 0 0 0 0 0 0 0 0
44120 - 0 0 0 0 0 0 0 0 0 0 0 0
44121 - 0 0 0 0 0 0 0 0 0 0 0 0
44122 - 0 0 0 0 0 0 0 0 0 0 0 0
44123 - 0 0 0 0 0 1 0 0 1 0 0 0
44124 - 0 0 0 0 0 0 0 0 0 0 0 0
44125 - 0 0 0 0 0 0 0 0 0 0 0 0
44126 - 0 0 0 0 0 0 0 0 0 0 0 0
44127 - 0 0 0 0 0 0 0 0 0 0 0 0
44128 - 0 0 0 0 0 0 0 0 0 6 6 6
44129 - 30 30 30 78 78 78 30 30 30 2 2 6
44130 - 2 2 6 2 2 6 2 2 6 2 2 6
44131 - 2 2 6 2 2 6 2 2 6 2 2 6
44132 - 2 2 6 2 2 6 2 2 6 10 10 10
44133 - 10 10 10 2 2 6 2 2 6 2 2 6
44134 - 2 2 6 2 2 6 2 2 6 78 78 78
44135 - 50 50 50 18 18 18 6 6 6 0 0 0
44136 - 0 0 0 0 0 0 0 0 0 0 0 0
44137 - 0 0 0 0 0 0 0 0 0 0 0 0
44138 - 0 0 0 0 0 0 0 0 0 0 0 0
44139 - 0 0 0 0 0 0 0 0 0 0 0 0
44140 - 0 0 0 0 0 0 0 0 0 0 0 0
44141 - 0 0 0 0 0 0 0 0 0 0 0 0
44142 - 0 0 0 0 0 0 0 0 0 0 0 0
44143 - 0 0 1 0 0 0 0 0 0 0 0 0
44144 - 0 0 0 0 0 0 0 0 0 0 0 0
44145 - 0 0 0 0 0 0 0 0 0 0 0 0
44146 - 0 0 0 0 0 0 0 0 0 0 0 0
44147 - 0 0 0 0 0 0 0 0 0 0 0 0
44148 - 0 0 0 0 0 0 0 0 0 10 10 10
44149 - 38 38 38 86 86 86 14 14 14 2 2 6
44150 - 2 2 6 2 2 6 2 2 6 2 2 6
44151 - 2 2 6 2 2 6 2 2 6 2 2 6
44152 - 2 2 6 2 2 6 2 2 6 2 2 6
44153 - 2 2 6 2 2 6 2 2 6 2 2 6
44154 - 2 2 6 2 2 6 2 2 6 54 54 54
44155 - 66 66 66 26 26 26 6 6 6 0 0 0
44156 - 0 0 0 0 0 0 0 0 0 0 0 0
44157 - 0 0 0 0 0 0 0 0 0 0 0 0
44158 - 0 0 0 0 0 0 0 0 0 0 0 0
44159 - 0 0 0 0 0 0 0 0 0 0 0 0
44160 - 0 0 0 0 0 0 0 0 0 0 0 0
44161 - 0 0 0 0 0 0 0 0 0 0 0 0
44162 - 0 0 0 0 0 0 0 0 0 0 0 0
44163 - 0 0 0 0 0 1 0 0 1 0 0 0
44164 - 0 0 0 0 0 0 0 0 0 0 0 0
44165 - 0 0 0 0 0 0 0 0 0 0 0 0
44166 - 0 0 0 0 0 0 0 0 0 0 0 0
44167 - 0 0 0 0 0 0 0 0 0 0 0 0
44168 - 0 0 0 0 0 0 0 0 0 14 14 14
44169 - 42 42 42 82 82 82 2 2 6 2 2 6
44170 - 2 2 6 6 6 6 10 10 10 2 2 6
44171 - 2 2 6 2 2 6 2 2 6 2 2 6
44172 - 2 2 6 2 2 6 2 2 6 6 6 6
44173 - 14 14 14 10 10 10 2 2 6 2 2 6
44174 - 2 2 6 2 2 6 2 2 6 18 18 18
44175 - 82 82 82 34 34 34 10 10 10 0 0 0
44176 - 0 0 0 0 0 0 0 0 0 0 0 0
44177 - 0 0 0 0 0 0 0 0 0 0 0 0
44178 - 0 0 0 0 0 0 0 0 0 0 0 0
44179 - 0 0 0 0 0 0 0 0 0 0 0 0
44180 - 0 0 0 0 0 0 0 0 0 0 0 0
44181 - 0 0 0 0 0 0 0 0 0 0 0 0
44182 - 0 0 0 0 0 0 0 0 0 0 0 0
44183 - 0 0 1 0 0 0 0 0 0 0 0 0
44184 - 0 0 0 0 0 0 0 0 0 0 0 0
44185 - 0 0 0 0 0 0 0 0 0 0 0 0
44186 - 0 0 0 0 0 0 0 0 0 0 0 0
44187 - 0 0 0 0 0 0 0 0 0 0 0 0
44188 - 0 0 0 0 0 0 0 0 0 14 14 14
44189 - 46 46 46 86 86 86 2 2 6 2 2 6
44190 - 6 6 6 6 6 6 22 22 22 34 34 34
44191 - 6 6 6 2 2 6 2 2 6 2 2 6
44192 - 2 2 6 2 2 6 18 18 18 34 34 34
44193 - 10 10 10 50 50 50 22 22 22 2 2 6
44194 - 2 2 6 2 2 6 2 2 6 10 10 10
44195 - 86 86 86 42 42 42 14 14 14 0 0 0
44196 - 0 0 0 0 0 0 0 0 0 0 0 0
44197 - 0 0 0 0 0 0 0 0 0 0 0 0
44198 - 0 0 0 0 0 0 0 0 0 0 0 0
44199 - 0 0 0 0 0 0 0 0 0 0 0 0
44200 - 0 0 0 0 0 0 0 0 0 0 0 0
44201 - 0 0 0 0 0 0 0 0 0 0 0 0
44202 - 0 0 0 0 0 0 0 0 0 0 0 0
44203 - 0 0 1 0 0 1 0 0 1 0 0 0
44204 - 0 0 0 0 0 0 0 0 0 0 0 0
44205 - 0 0 0 0 0 0 0 0 0 0 0 0
44206 - 0 0 0 0 0 0 0 0 0 0 0 0
44207 - 0 0 0 0 0 0 0 0 0 0 0 0
44208 - 0 0 0 0 0 0 0 0 0 14 14 14
44209 - 46 46 46 86 86 86 2 2 6 2 2 6
44210 - 38 38 38 116 116 116 94 94 94 22 22 22
44211 - 22 22 22 2 2 6 2 2 6 2 2 6
44212 - 14 14 14 86 86 86 138 138 138 162 162 162
44213 -154 154 154 38 38 38 26 26 26 6 6 6
44214 - 2 2 6 2 2 6 2 2 6 2 2 6
44215 - 86 86 86 46 46 46 14 14 14 0 0 0
44216 - 0 0 0 0 0 0 0 0 0 0 0 0
44217 - 0 0 0 0 0 0 0 0 0 0 0 0
44218 - 0 0 0 0 0 0 0 0 0 0 0 0
44219 - 0 0 0 0 0 0 0 0 0 0 0 0
44220 - 0 0 0 0 0 0 0 0 0 0 0 0
44221 - 0 0 0 0 0 0 0 0 0 0 0 0
44222 - 0 0 0 0 0 0 0 0 0 0 0 0
44223 - 0 0 0 0 0 0 0 0 0 0 0 0
44224 - 0 0 0 0 0 0 0 0 0 0 0 0
44225 - 0 0 0 0 0 0 0 0 0 0 0 0
44226 - 0 0 0 0 0 0 0 0 0 0 0 0
44227 - 0 0 0 0 0 0 0 0 0 0 0 0
44228 - 0 0 0 0 0 0 0 0 0 14 14 14
44229 - 46 46 46 86 86 86 2 2 6 14 14 14
44230 -134 134 134 198 198 198 195 195 195 116 116 116
44231 - 10 10 10 2 2 6 2 2 6 6 6 6
44232 -101 98 89 187 187 187 210 210 210 218 218 218
44233 -214 214 214 134 134 134 14 14 14 6 6 6
44234 - 2 2 6 2 2 6 2 2 6 2 2 6
44235 - 86 86 86 50 50 50 18 18 18 6 6 6
44236 - 0 0 0 0 0 0 0 0 0 0 0 0
44237 - 0 0 0 0 0 0 0 0 0 0 0 0
44238 - 0 0 0 0 0 0 0 0 0 0 0 0
44239 - 0 0 0 0 0 0 0 0 0 0 0 0
44240 - 0 0 0 0 0 0 0 0 0 0 0 0
44241 - 0 0 0 0 0 0 0 0 0 0 0 0
44242 - 0 0 0 0 0 0 0 0 1 0 0 0
44243 - 0 0 1 0 0 1 0 0 1 0 0 0
44244 - 0 0 0 0 0 0 0 0 0 0 0 0
44245 - 0 0 0 0 0 0 0 0 0 0 0 0
44246 - 0 0 0 0 0 0 0 0 0 0 0 0
44247 - 0 0 0 0 0 0 0 0 0 0 0 0
44248 - 0 0 0 0 0 0 0 0 0 14 14 14
44249 - 46 46 46 86 86 86 2 2 6 54 54 54
44250 -218 218 218 195 195 195 226 226 226 246 246 246
44251 - 58 58 58 2 2 6 2 2 6 30 30 30
44252 -210 210 210 253 253 253 174 174 174 123 123 123
44253 -221 221 221 234 234 234 74 74 74 2 2 6
44254 - 2 2 6 2 2 6 2 2 6 2 2 6
44255 - 70 70 70 58 58 58 22 22 22 6 6 6
44256 - 0 0 0 0 0 0 0 0 0 0 0 0
44257 - 0 0 0 0 0 0 0 0 0 0 0 0
44258 - 0 0 0 0 0 0 0 0 0 0 0 0
44259 - 0 0 0 0 0 0 0 0 0 0 0 0
44260 - 0 0 0 0 0 0 0 0 0 0 0 0
44261 - 0 0 0 0 0 0 0 0 0 0 0 0
44262 - 0 0 0 0 0 0 0 0 0 0 0 0
44263 - 0 0 0 0 0 0 0 0 0 0 0 0
44264 - 0 0 0 0 0 0 0 0 0 0 0 0
44265 - 0 0 0 0 0 0 0 0 0 0 0 0
44266 - 0 0 0 0 0 0 0 0 0 0 0 0
44267 - 0 0 0 0 0 0 0 0 0 0 0 0
44268 - 0 0 0 0 0 0 0 0 0 14 14 14
44269 - 46 46 46 82 82 82 2 2 6 106 106 106
44270 -170 170 170 26 26 26 86 86 86 226 226 226
44271 -123 123 123 10 10 10 14 14 14 46 46 46
44272 -231 231 231 190 190 190 6 6 6 70 70 70
44273 - 90 90 90 238 238 238 158 158 158 2 2 6
44274 - 2 2 6 2 2 6 2 2 6 2 2 6
44275 - 70 70 70 58 58 58 22 22 22 6 6 6
44276 - 0 0 0 0 0 0 0 0 0 0 0 0
44277 - 0 0 0 0 0 0 0 0 0 0 0 0
44278 - 0 0 0 0 0 0 0 0 0 0 0 0
44279 - 0 0 0 0 0 0 0 0 0 0 0 0
44280 - 0 0 0 0 0 0 0 0 0 0 0 0
44281 - 0 0 0 0 0 0 0 0 0 0 0 0
44282 - 0 0 0 0 0 0 0 0 1 0 0 0
44283 - 0 0 1 0 0 1 0 0 1 0 0 0
44284 - 0 0 0 0 0 0 0 0 0 0 0 0
44285 - 0 0 0 0 0 0 0 0 0 0 0 0
44286 - 0 0 0 0 0 0 0 0 0 0 0 0
44287 - 0 0 0 0 0 0 0 0 0 0 0 0
44288 - 0 0 0 0 0 0 0 0 0 14 14 14
44289 - 42 42 42 86 86 86 6 6 6 116 116 116
44290 -106 106 106 6 6 6 70 70 70 149 149 149
44291 -128 128 128 18 18 18 38 38 38 54 54 54
44292 -221 221 221 106 106 106 2 2 6 14 14 14
44293 - 46 46 46 190 190 190 198 198 198 2 2 6
44294 - 2 2 6 2 2 6 2 2 6 2 2 6
44295 - 74 74 74 62 62 62 22 22 22 6 6 6
44296 - 0 0 0 0 0 0 0 0 0 0 0 0
44297 - 0 0 0 0 0 0 0 0 0 0 0 0
44298 - 0 0 0 0 0 0 0 0 0 0 0 0
44299 - 0 0 0 0 0 0 0 0 0 0 0 0
44300 - 0 0 0 0 0 0 0 0 0 0 0 0
44301 - 0 0 0 0 0 0 0 0 0 0 0 0
44302 - 0 0 0 0 0 0 0 0 1 0 0 0
44303 - 0 0 1 0 0 0 0 0 1 0 0 0
44304 - 0 0 0 0 0 0 0 0 0 0 0 0
44305 - 0 0 0 0 0 0 0 0 0 0 0 0
44306 - 0 0 0 0 0 0 0 0 0 0 0 0
44307 - 0 0 0 0 0 0 0 0 0 0 0 0
44308 - 0 0 0 0 0 0 0 0 0 14 14 14
44309 - 42 42 42 94 94 94 14 14 14 101 101 101
44310 -128 128 128 2 2 6 18 18 18 116 116 116
44311 -118 98 46 121 92 8 121 92 8 98 78 10
44312 -162 162 162 106 106 106 2 2 6 2 2 6
44313 - 2 2 6 195 195 195 195 195 195 6 6 6
44314 - 2 2 6 2 2 6 2 2 6 2 2 6
44315 - 74 74 74 62 62 62 22 22 22 6 6 6
44316 - 0 0 0 0 0 0 0 0 0 0 0 0
44317 - 0 0 0 0 0 0 0 0 0 0 0 0
44318 - 0 0 0 0 0 0 0 0 0 0 0 0
44319 - 0 0 0 0 0 0 0 0 0 0 0 0
44320 - 0 0 0 0 0 0 0 0 0 0 0 0
44321 - 0 0 0 0 0 0 0 0 0 0 0 0
44322 - 0 0 0 0 0 0 0 0 1 0 0 1
44323 - 0 0 1 0 0 0 0 0 1 0 0 0
44324 - 0 0 0 0 0 0 0 0 0 0 0 0
44325 - 0 0 0 0 0 0 0 0 0 0 0 0
44326 - 0 0 0 0 0 0 0 0 0 0 0 0
44327 - 0 0 0 0 0 0 0 0 0 0 0 0
44328 - 0 0 0 0 0 0 0 0 0 10 10 10
44329 - 38 38 38 90 90 90 14 14 14 58 58 58
44330 -210 210 210 26 26 26 54 38 6 154 114 10
44331 -226 170 11 236 186 11 225 175 15 184 144 12
44332 -215 174 15 175 146 61 37 26 9 2 2 6
44333 - 70 70 70 246 246 246 138 138 138 2 2 6
44334 - 2 2 6 2 2 6 2 2 6 2 2 6
44335 - 70 70 70 66 66 66 26 26 26 6 6 6
44336 - 0 0 0 0 0 0 0 0 0 0 0 0
44337 - 0 0 0 0 0 0 0 0 0 0 0 0
44338 - 0 0 0 0 0 0 0 0 0 0 0 0
44339 - 0 0 0 0 0 0 0 0 0 0 0 0
44340 - 0 0 0 0 0 0 0 0 0 0 0 0
44341 - 0 0 0 0 0 0 0 0 0 0 0 0
44342 - 0 0 0 0 0 0 0 0 0 0 0 0
44343 - 0 0 0 0 0 0 0 0 0 0 0 0
44344 - 0 0 0 0 0 0 0 0 0 0 0 0
44345 - 0 0 0 0 0 0 0 0 0 0 0 0
44346 - 0 0 0 0 0 0 0 0 0 0 0 0
44347 - 0 0 0 0 0 0 0 0 0 0 0 0
44348 - 0 0 0 0 0 0 0 0 0 10 10 10
44349 - 38 38 38 86 86 86 14 14 14 10 10 10
44350 -195 195 195 188 164 115 192 133 9 225 175 15
44351 -239 182 13 234 190 10 232 195 16 232 200 30
44352 -245 207 45 241 208 19 232 195 16 184 144 12
44353 -218 194 134 211 206 186 42 42 42 2 2 6
44354 - 2 2 6 2 2 6 2 2 6 2 2 6
44355 - 50 50 50 74 74 74 30 30 30 6 6 6
44356 - 0 0 0 0 0 0 0 0 0 0 0 0
44357 - 0 0 0 0 0 0 0 0 0 0 0 0
44358 - 0 0 0 0 0 0 0 0 0 0 0 0
44359 - 0 0 0 0 0 0 0 0 0 0 0 0
44360 - 0 0 0 0 0 0 0 0 0 0 0 0
44361 - 0 0 0 0 0 0 0 0 0 0 0 0
44362 - 0 0 0 0 0 0 0 0 0 0 0 0
44363 - 0 0 0 0 0 0 0 0 0 0 0 0
44364 - 0 0 0 0 0 0 0 0 0 0 0 0
44365 - 0 0 0 0 0 0 0 0 0 0 0 0
44366 - 0 0 0 0 0 0 0 0 0 0 0 0
44367 - 0 0 0 0 0 0 0 0 0 0 0 0
44368 - 0 0 0 0 0 0 0 0 0 10 10 10
44369 - 34 34 34 86 86 86 14 14 14 2 2 6
44370 -121 87 25 192 133 9 219 162 10 239 182 13
44371 -236 186 11 232 195 16 241 208 19 244 214 54
44372 -246 218 60 246 218 38 246 215 20 241 208 19
44373 -241 208 19 226 184 13 121 87 25 2 2 6
44374 - 2 2 6 2 2 6 2 2 6 2 2 6
44375 - 50 50 50 82 82 82 34 34 34 10 10 10
44376 - 0 0 0 0 0 0 0 0 0 0 0 0
44377 - 0 0 0 0 0 0 0 0 0 0 0 0
44378 - 0 0 0 0 0 0 0 0 0 0 0 0
44379 - 0 0 0 0 0 0 0 0 0 0 0 0
44380 - 0 0 0 0 0 0 0 0 0 0 0 0
44381 - 0 0 0 0 0 0 0 0 0 0 0 0
44382 - 0 0 0 0 0 0 0 0 0 0 0 0
44383 - 0 0 0 0 0 0 0 0 0 0 0 0
44384 - 0 0 0 0 0 0 0 0 0 0 0 0
44385 - 0 0 0 0 0 0 0 0 0 0 0 0
44386 - 0 0 0 0 0 0 0 0 0 0 0 0
44387 - 0 0 0 0 0 0 0 0 0 0 0 0
44388 - 0 0 0 0 0 0 0 0 0 10 10 10
44389 - 34 34 34 82 82 82 30 30 30 61 42 6
44390 -180 123 7 206 145 10 230 174 11 239 182 13
44391 -234 190 10 238 202 15 241 208 19 246 218 74
44392 -246 218 38 246 215 20 246 215 20 246 215 20
44393 -226 184 13 215 174 15 184 144 12 6 6 6
44394 - 2 2 6 2 2 6 2 2 6 2 2 6
44395 - 26 26 26 94 94 94 42 42 42 14 14 14
44396 - 0 0 0 0 0 0 0 0 0 0 0 0
44397 - 0 0 0 0 0 0 0 0 0 0 0 0
44398 - 0 0 0 0 0 0 0 0 0 0 0 0
44399 - 0 0 0 0 0 0 0 0 0 0 0 0
44400 - 0 0 0 0 0 0 0 0 0 0 0 0
44401 - 0 0 0 0 0 0 0 0 0 0 0 0
44402 - 0 0 0 0 0 0 0 0 0 0 0 0
44403 - 0 0 0 0 0 0 0 0 0 0 0 0
44404 - 0 0 0 0 0 0 0 0 0 0 0 0
44405 - 0 0 0 0 0 0 0 0 0 0 0 0
44406 - 0 0 0 0 0 0 0 0 0 0 0 0
44407 - 0 0 0 0 0 0 0 0 0 0 0 0
44408 - 0 0 0 0 0 0 0 0 0 10 10 10
44409 - 30 30 30 78 78 78 50 50 50 104 69 6
44410 -192 133 9 216 158 10 236 178 12 236 186 11
44411 -232 195 16 241 208 19 244 214 54 245 215 43
44412 -246 215 20 246 215 20 241 208 19 198 155 10
44413 -200 144 11 216 158 10 156 118 10 2 2 6
44414 - 2 2 6 2 2 6 2 2 6 2 2 6
44415 - 6 6 6 90 90 90 54 54 54 18 18 18
44416 - 6 6 6 0 0 0 0 0 0 0 0 0
44417 - 0 0 0 0 0 0 0 0 0 0 0 0
44418 - 0 0 0 0 0 0 0 0 0 0 0 0
44419 - 0 0 0 0 0 0 0 0 0 0 0 0
44420 - 0 0 0 0 0 0 0 0 0 0 0 0
44421 - 0 0 0 0 0 0 0 0 0 0 0 0
44422 - 0 0 0 0 0 0 0 0 0 0 0 0
44423 - 0 0 0 0 0 0 0 0 0 0 0 0
44424 - 0 0 0 0 0 0 0 0 0 0 0 0
44425 - 0 0 0 0 0 0 0 0 0 0 0 0
44426 - 0 0 0 0 0 0 0 0 0 0 0 0
44427 - 0 0 0 0 0 0 0 0 0 0 0 0
44428 - 0 0 0 0 0 0 0 0 0 10 10 10
44429 - 30 30 30 78 78 78 46 46 46 22 22 22
44430 -137 92 6 210 162 10 239 182 13 238 190 10
44431 -238 202 15 241 208 19 246 215 20 246 215 20
44432 -241 208 19 203 166 17 185 133 11 210 150 10
44433 -216 158 10 210 150 10 102 78 10 2 2 6
44434 - 6 6 6 54 54 54 14 14 14 2 2 6
44435 - 2 2 6 62 62 62 74 74 74 30 30 30
44436 - 10 10 10 0 0 0 0 0 0 0 0 0
44437 - 0 0 0 0 0 0 0 0 0 0 0 0
44438 - 0 0 0 0 0 0 0 0 0 0 0 0
44439 - 0 0 0 0 0 0 0 0 0 0 0 0
44440 - 0 0 0 0 0 0 0 0 0 0 0 0
44441 - 0 0 0 0 0 0 0 0 0 0 0 0
44442 - 0 0 0 0 0 0 0 0 0 0 0 0
44443 - 0 0 0 0 0 0 0 0 0 0 0 0
44444 - 0 0 0 0 0 0 0 0 0 0 0 0
44445 - 0 0 0 0 0 0 0 0 0 0 0 0
44446 - 0 0 0 0 0 0 0 0 0 0 0 0
44447 - 0 0 0 0 0 0 0 0 0 0 0 0
44448 - 0 0 0 0 0 0 0 0 0 10 10 10
44449 - 34 34 34 78 78 78 50 50 50 6 6 6
44450 - 94 70 30 139 102 15 190 146 13 226 184 13
44451 -232 200 30 232 195 16 215 174 15 190 146 13
44452 -168 122 10 192 133 9 210 150 10 213 154 11
44453 -202 150 34 182 157 106 101 98 89 2 2 6
44454 - 2 2 6 78 78 78 116 116 116 58 58 58
44455 - 2 2 6 22 22 22 90 90 90 46 46 46
44456 - 18 18 18 6 6 6 0 0 0 0 0 0
44457 - 0 0 0 0 0 0 0 0 0 0 0 0
44458 - 0 0 0 0 0 0 0 0 0 0 0 0
44459 - 0 0 0 0 0 0 0 0 0 0 0 0
44460 - 0 0 0 0 0 0 0 0 0 0 0 0
44461 - 0 0 0 0 0 0 0 0 0 0 0 0
44462 - 0 0 0 0 0 0 0 0 0 0 0 0
44463 - 0 0 0 0 0 0 0 0 0 0 0 0
44464 - 0 0 0 0 0 0 0 0 0 0 0 0
44465 - 0 0 0 0 0 0 0 0 0 0 0 0
44466 - 0 0 0 0 0 0 0 0 0 0 0 0
44467 - 0 0 0 0 0 0 0 0 0 0 0 0
44468 - 0 0 0 0 0 0 0 0 0 10 10 10
44469 - 38 38 38 86 86 86 50 50 50 6 6 6
44470 -128 128 128 174 154 114 156 107 11 168 122 10
44471 -198 155 10 184 144 12 197 138 11 200 144 11
44472 -206 145 10 206 145 10 197 138 11 188 164 115
44473 -195 195 195 198 198 198 174 174 174 14 14 14
44474 - 2 2 6 22 22 22 116 116 116 116 116 116
44475 - 22 22 22 2 2 6 74 74 74 70 70 70
44476 - 30 30 30 10 10 10 0 0 0 0 0 0
44477 - 0 0 0 0 0 0 0 0 0 0 0 0
44478 - 0 0 0 0 0 0 0 0 0 0 0 0
44479 - 0 0 0 0 0 0 0 0 0 0 0 0
44480 - 0 0 0 0 0 0 0 0 0 0 0 0
44481 - 0 0 0 0 0 0 0 0 0 0 0 0
44482 - 0 0 0 0 0 0 0 0 0 0 0 0
44483 - 0 0 0 0 0 0 0 0 0 0 0 0
44484 - 0 0 0 0 0 0 0 0 0 0 0 0
44485 - 0 0 0 0 0 0 0 0 0 0 0 0
44486 - 0 0 0 0 0 0 0 0 0 0 0 0
44487 - 0 0 0 0 0 0 0 0 0 0 0 0
44488 - 0 0 0 0 0 0 6 6 6 18 18 18
44489 - 50 50 50 101 101 101 26 26 26 10 10 10
44490 -138 138 138 190 190 190 174 154 114 156 107 11
44491 -197 138 11 200 144 11 197 138 11 192 133 9
44492 -180 123 7 190 142 34 190 178 144 187 187 187
44493 -202 202 202 221 221 221 214 214 214 66 66 66
44494 - 2 2 6 2 2 6 50 50 50 62 62 62
44495 - 6 6 6 2 2 6 10 10 10 90 90 90
44496 - 50 50 50 18 18 18 6 6 6 0 0 0
44497 - 0 0 0 0 0 0 0 0 0 0 0 0
44498 - 0 0 0 0 0 0 0 0 0 0 0 0
44499 - 0 0 0 0 0 0 0 0 0 0 0 0
44500 - 0 0 0 0 0 0 0 0 0 0 0 0
44501 - 0 0 0 0 0 0 0 0 0 0 0 0
44502 - 0 0 0 0 0 0 0 0 0 0 0 0
44503 - 0 0 0 0 0 0 0 0 0 0 0 0
44504 - 0 0 0 0 0 0 0 0 0 0 0 0
44505 - 0 0 0 0 0 0 0 0 0 0 0 0
44506 - 0 0 0 0 0 0 0 0 0 0 0 0
44507 - 0 0 0 0 0 0 0 0 0 0 0 0
44508 - 0 0 0 0 0 0 10 10 10 34 34 34
44509 - 74 74 74 74 74 74 2 2 6 6 6 6
44510 -144 144 144 198 198 198 190 190 190 178 166 146
44511 -154 121 60 156 107 11 156 107 11 168 124 44
44512 -174 154 114 187 187 187 190 190 190 210 210 210
44513 -246 246 246 253 253 253 253 253 253 182 182 182
44514 - 6 6 6 2 2 6 2 2 6 2 2 6
44515 - 2 2 6 2 2 6 2 2 6 62 62 62
44516 - 74 74 74 34 34 34 14 14 14 0 0 0
44517 - 0 0 0 0 0 0 0 0 0 0 0 0
44518 - 0 0 0 0 0 0 0 0 0 0 0 0
44519 - 0 0 0 0 0 0 0 0 0 0 0 0
44520 - 0 0 0 0 0 0 0 0 0 0 0 0
44521 - 0 0 0 0 0 0 0 0 0 0 0 0
44522 - 0 0 0 0 0 0 0 0 0 0 0 0
44523 - 0 0 0 0 0 0 0 0 0 0 0 0
44524 - 0 0 0 0 0 0 0 0 0 0 0 0
44525 - 0 0 0 0 0 0 0 0 0 0 0 0
44526 - 0 0 0 0 0 0 0 0 0 0 0 0
44527 - 0 0 0 0 0 0 0 0 0 0 0 0
44528 - 0 0 0 10 10 10 22 22 22 54 54 54
44529 - 94 94 94 18 18 18 2 2 6 46 46 46
44530 -234 234 234 221 221 221 190 190 190 190 190 190
44531 -190 190 190 187 187 187 187 187 187 190 190 190
44532 -190 190 190 195 195 195 214 214 214 242 242 242
44533 -253 253 253 253 253 253 253 253 253 253 253 253
44534 - 82 82 82 2 2 6 2 2 6 2 2 6
44535 - 2 2 6 2 2 6 2 2 6 14 14 14
44536 - 86 86 86 54 54 54 22 22 22 6 6 6
44537 - 0 0 0 0 0 0 0 0 0 0 0 0
44538 - 0 0 0 0 0 0 0 0 0 0 0 0
44539 - 0 0 0 0 0 0 0 0 0 0 0 0
44540 - 0 0 0 0 0 0 0 0 0 0 0 0
44541 - 0 0 0 0 0 0 0 0 0 0 0 0
44542 - 0 0 0 0 0 0 0 0 0 0 0 0
44543 - 0 0 0 0 0 0 0 0 0 0 0 0
44544 - 0 0 0 0 0 0 0 0 0 0 0 0
44545 - 0 0 0 0 0 0 0 0 0 0 0 0
44546 - 0 0 0 0 0 0 0 0 0 0 0 0
44547 - 0 0 0 0 0 0 0 0 0 0 0 0
44548 - 6 6 6 18 18 18 46 46 46 90 90 90
44549 - 46 46 46 18 18 18 6 6 6 182 182 182
44550 -253 253 253 246 246 246 206 206 206 190 190 190
44551 -190 190 190 190 190 190 190 190 190 190 190 190
44552 -206 206 206 231 231 231 250 250 250 253 253 253
44553 -253 253 253 253 253 253 253 253 253 253 253 253
44554 -202 202 202 14 14 14 2 2 6 2 2 6
44555 - 2 2 6 2 2 6 2 2 6 2 2 6
44556 - 42 42 42 86 86 86 42 42 42 18 18 18
44557 - 6 6 6 0 0 0 0 0 0 0 0 0
44558 - 0 0 0 0 0 0 0 0 0 0 0 0
44559 - 0 0 0 0 0 0 0 0 0 0 0 0
44560 - 0 0 0 0 0 0 0 0 0 0 0 0
44561 - 0 0 0 0 0 0 0 0 0 0 0 0
44562 - 0 0 0 0 0 0 0 0 0 0 0 0
44563 - 0 0 0 0 0 0 0 0 0 0 0 0
44564 - 0 0 0 0 0 0 0 0 0 0 0 0
44565 - 0 0 0 0 0 0 0 0 0 0 0 0
44566 - 0 0 0 0 0 0 0 0 0 0 0 0
44567 - 0 0 0 0 0 0 0 0 0 6 6 6
44568 - 14 14 14 38 38 38 74 74 74 66 66 66
44569 - 2 2 6 6 6 6 90 90 90 250 250 250
44570 -253 253 253 253 253 253 238 238 238 198 198 198
44571 -190 190 190 190 190 190 195 195 195 221 221 221
44572 -246 246 246 253 253 253 253 253 253 253 253 253
44573 -253 253 253 253 253 253 253 253 253 253 253 253
44574 -253 253 253 82 82 82 2 2 6 2 2 6
44575 - 2 2 6 2 2 6 2 2 6 2 2 6
44576 - 2 2 6 78 78 78 70 70 70 34 34 34
44577 - 14 14 14 6 6 6 0 0 0 0 0 0
44578 - 0 0 0 0 0 0 0 0 0 0 0 0
44579 - 0 0 0 0 0 0 0 0 0 0 0 0
44580 - 0 0 0 0 0 0 0 0 0 0 0 0
44581 - 0 0 0 0 0 0 0 0 0 0 0 0
44582 - 0 0 0 0 0 0 0 0 0 0 0 0
44583 - 0 0 0 0 0 0 0 0 0 0 0 0
44584 - 0 0 0 0 0 0 0 0 0 0 0 0
44585 - 0 0 0 0 0 0 0 0 0 0 0 0
44586 - 0 0 0 0 0 0 0 0 0 0 0 0
44587 - 0 0 0 0 0 0 0 0 0 14 14 14
44588 - 34 34 34 66 66 66 78 78 78 6 6 6
44589 - 2 2 6 18 18 18 218 218 218 253 253 253
44590 -253 253 253 253 253 253 253 253 253 246 246 246
44591 -226 226 226 231 231 231 246 246 246 253 253 253
44592 -253 253 253 253 253 253 253 253 253 253 253 253
44593 -253 253 253 253 253 253 253 253 253 253 253 253
44594 -253 253 253 178 178 178 2 2 6 2 2 6
44595 - 2 2 6 2 2 6 2 2 6 2 2 6
44596 - 2 2 6 18 18 18 90 90 90 62 62 62
44597 - 30 30 30 10 10 10 0 0 0 0 0 0
44598 - 0 0 0 0 0 0 0 0 0 0 0 0
44599 - 0 0 0 0 0 0 0 0 0 0 0 0
44600 - 0 0 0 0 0 0 0 0 0 0 0 0
44601 - 0 0 0 0 0 0 0 0 0 0 0 0
44602 - 0 0 0 0 0 0 0 0 0 0 0 0
44603 - 0 0 0 0 0 0 0 0 0 0 0 0
44604 - 0 0 0 0 0 0 0 0 0 0 0 0
44605 - 0 0 0 0 0 0 0 0 0 0 0 0
44606 - 0 0 0 0 0 0 0 0 0 0 0 0
44607 - 0 0 0 0 0 0 10 10 10 26 26 26
44608 - 58 58 58 90 90 90 18 18 18 2 2 6
44609 - 2 2 6 110 110 110 253 253 253 253 253 253
44610 -253 253 253 253 253 253 253 253 253 253 253 253
44611 -250 250 250 253 253 253 253 253 253 253 253 253
44612 -253 253 253 253 253 253 253 253 253 253 253 253
44613 -253 253 253 253 253 253 253 253 253 253 253 253
44614 -253 253 253 231 231 231 18 18 18 2 2 6
44615 - 2 2 6 2 2 6 2 2 6 2 2 6
44616 - 2 2 6 2 2 6 18 18 18 94 94 94
44617 - 54 54 54 26 26 26 10 10 10 0 0 0
44618 - 0 0 0 0 0 0 0 0 0 0 0 0
44619 - 0 0 0 0 0 0 0 0 0 0 0 0
44620 - 0 0 0 0 0 0 0 0 0 0 0 0
44621 - 0 0 0 0 0 0 0 0 0 0 0 0
44622 - 0 0 0 0 0 0 0 0 0 0 0 0
44623 - 0 0 0 0 0 0 0 0 0 0 0 0
44624 - 0 0 0 0 0 0 0 0 0 0 0 0
44625 - 0 0 0 0 0 0 0 0 0 0 0 0
44626 - 0 0 0 0 0 0 0 0 0 0 0 0
44627 - 0 0 0 6 6 6 22 22 22 50 50 50
44628 - 90 90 90 26 26 26 2 2 6 2 2 6
44629 - 14 14 14 195 195 195 250 250 250 253 253 253
44630 -253 253 253 253 253 253 253 253 253 253 253 253
44631 -253 253 253 253 253 253 253 253 253 253 253 253
44632 -253 253 253 253 253 253 253 253 253 253 253 253
44633 -253 253 253 253 253 253 253 253 253 253 253 253
44634 -250 250 250 242 242 242 54 54 54 2 2 6
44635 - 2 2 6 2 2 6 2 2 6 2 2 6
44636 - 2 2 6 2 2 6 2 2 6 38 38 38
44637 - 86 86 86 50 50 50 22 22 22 6 6 6
44638 - 0 0 0 0 0 0 0 0 0 0 0 0
44639 - 0 0 0 0 0 0 0 0 0 0 0 0
44640 - 0 0 0 0 0 0 0 0 0 0 0 0
44641 - 0 0 0 0 0 0 0 0 0 0 0 0
44642 - 0 0 0 0 0 0 0 0 0 0 0 0
44643 - 0 0 0 0 0 0 0 0 0 0 0 0
44644 - 0 0 0 0 0 0 0 0 0 0 0 0
44645 - 0 0 0 0 0 0 0 0 0 0 0 0
44646 - 0 0 0 0 0 0 0 0 0 0 0 0
44647 - 6 6 6 14 14 14 38 38 38 82 82 82
44648 - 34 34 34 2 2 6 2 2 6 2 2 6
44649 - 42 42 42 195 195 195 246 246 246 253 253 253
44650 -253 253 253 253 253 253 253 253 253 250 250 250
44651 -242 242 242 242 242 242 250 250 250 253 253 253
44652 -253 253 253 253 253 253 253 253 253 253 253 253
44653 -253 253 253 250 250 250 246 246 246 238 238 238
44654 -226 226 226 231 231 231 101 101 101 6 6 6
44655 - 2 2 6 2 2 6 2 2 6 2 2 6
44656 - 2 2 6 2 2 6 2 2 6 2 2 6
44657 - 38 38 38 82 82 82 42 42 42 14 14 14
44658 - 6 6 6 0 0 0 0 0 0 0 0 0
44659 - 0 0 0 0 0 0 0 0 0 0 0 0
44660 - 0 0 0 0 0 0 0 0 0 0 0 0
44661 - 0 0 0 0 0 0 0 0 0 0 0 0
44662 - 0 0 0 0 0 0 0 0 0 0 0 0
44663 - 0 0 0 0 0 0 0 0 0 0 0 0
44664 - 0 0 0 0 0 0 0 0 0 0 0 0
44665 - 0 0 0 0 0 0 0 0 0 0 0 0
44666 - 0 0 0 0 0 0 0 0 0 0 0 0
44667 - 10 10 10 26 26 26 62 62 62 66 66 66
44668 - 2 2 6 2 2 6 2 2 6 6 6 6
44669 - 70 70 70 170 170 170 206 206 206 234 234 234
44670 -246 246 246 250 250 250 250 250 250 238 238 238
44671 -226 226 226 231 231 231 238 238 238 250 250 250
44672 -250 250 250 250 250 250 246 246 246 231 231 231
44673 -214 214 214 206 206 206 202 202 202 202 202 202
44674 -198 198 198 202 202 202 182 182 182 18 18 18
44675 - 2 2 6 2 2 6 2 2 6 2 2 6
44676 - 2 2 6 2 2 6 2 2 6 2 2 6
44677 - 2 2 6 62 62 62 66 66 66 30 30 30
44678 - 10 10 10 0 0 0 0 0 0 0 0 0
44679 - 0 0 0 0 0 0 0 0 0 0 0 0
44680 - 0 0 0 0 0 0 0 0 0 0 0 0
44681 - 0 0 0 0 0 0 0 0 0 0 0 0
44682 - 0 0 0 0 0 0 0 0 0 0 0 0
44683 - 0 0 0 0 0 0 0 0 0 0 0 0
44684 - 0 0 0 0 0 0 0 0 0 0 0 0
44685 - 0 0 0 0 0 0 0 0 0 0 0 0
44686 - 0 0 0 0 0 0 0 0 0 0 0 0
44687 - 14 14 14 42 42 42 82 82 82 18 18 18
44688 - 2 2 6 2 2 6 2 2 6 10 10 10
44689 - 94 94 94 182 182 182 218 218 218 242 242 242
44690 -250 250 250 253 253 253 253 253 253 250 250 250
44691 -234 234 234 253 253 253 253 253 253 253 253 253
44692 -253 253 253 253 253 253 253 253 253 246 246 246
44693 -238 238 238 226 226 226 210 210 210 202 202 202
44694 -195 195 195 195 195 195 210 210 210 158 158 158
44695 - 6 6 6 14 14 14 50 50 50 14 14 14
44696 - 2 2 6 2 2 6 2 2 6 2 2 6
44697 - 2 2 6 6 6 6 86 86 86 46 46 46
44698 - 18 18 18 6 6 6 0 0 0 0 0 0
44699 - 0 0 0 0 0 0 0 0 0 0 0 0
44700 - 0 0 0 0 0 0 0 0 0 0 0 0
44701 - 0 0 0 0 0 0 0 0 0 0 0 0
44702 - 0 0 0 0 0 0 0 0 0 0 0 0
44703 - 0 0 0 0 0 0 0 0 0 0 0 0
44704 - 0 0 0 0 0 0 0 0 0 0 0 0
44705 - 0 0 0 0 0 0 0 0 0 0 0 0
44706 - 0 0 0 0 0 0 0 0 0 6 6 6
44707 - 22 22 22 54 54 54 70 70 70 2 2 6
44708 - 2 2 6 10 10 10 2 2 6 22 22 22
44709 -166 166 166 231 231 231 250 250 250 253 253 253
44710 -253 253 253 253 253 253 253 253 253 250 250 250
44711 -242 242 242 253 253 253 253 253 253 253 253 253
44712 -253 253 253 253 253 253 253 253 253 253 253 253
44713 -253 253 253 253 253 253 253 253 253 246 246 246
44714 -231 231 231 206 206 206 198 198 198 226 226 226
44715 - 94 94 94 2 2 6 6 6 6 38 38 38
44716 - 30 30 30 2 2 6 2 2 6 2 2 6
44717 - 2 2 6 2 2 6 62 62 62 66 66 66
44718 - 26 26 26 10 10 10 0 0 0 0 0 0
44719 - 0 0 0 0 0 0 0 0 0 0 0 0
44720 - 0 0 0 0 0 0 0 0 0 0 0 0
44721 - 0 0 0 0 0 0 0 0 0 0 0 0
44722 - 0 0 0 0 0 0 0 0 0 0 0 0
44723 - 0 0 0 0 0 0 0 0 0 0 0 0
44724 - 0 0 0 0 0 0 0 0 0 0 0 0
44725 - 0 0 0 0 0 0 0 0 0 0 0 0
44726 - 0 0 0 0 0 0 0 0 0 10 10 10
44727 - 30 30 30 74 74 74 50 50 50 2 2 6
44728 - 26 26 26 26 26 26 2 2 6 106 106 106
44729 -238 238 238 253 253 253 253 253 253 253 253 253
44730 -253 253 253 253 253 253 253 253 253 253 253 253
44731 -253 253 253 253 253 253 253 253 253 253 253 253
44732 -253 253 253 253 253 253 253 253 253 253 253 253
44733 -253 253 253 253 253 253 253 253 253 253 253 253
44734 -253 253 253 246 246 246 218 218 218 202 202 202
44735 -210 210 210 14 14 14 2 2 6 2 2 6
44736 - 30 30 30 22 22 22 2 2 6 2 2 6
44737 - 2 2 6 2 2 6 18 18 18 86 86 86
44738 - 42 42 42 14 14 14 0 0 0 0 0 0
44739 - 0 0 0 0 0 0 0 0 0 0 0 0
44740 - 0 0 0 0 0 0 0 0 0 0 0 0
44741 - 0 0 0 0 0 0 0 0 0 0 0 0
44742 - 0 0 0 0 0 0 0 0 0 0 0 0
44743 - 0 0 0 0 0 0 0 0 0 0 0 0
44744 - 0 0 0 0 0 0 0 0 0 0 0 0
44745 - 0 0 0 0 0 0 0 0 0 0 0 0
44746 - 0 0 0 0 0 0 0 0 0 14 14 14
44747 - 42 42 42 90 90 90 22 22 22 2 2 6
44748 - 42 42 42 2 2 6 18 18 18 218 218 218
44749 -253 253 253 253 253 253 253 253 253 253 253 253
44750 -253 253 253 253 253 253 253 253 253 253 253 253
44751 -253 253 253 253 253 253 253 253 253 253 253 253
44752 -253 253 253 253 253 253 253 253 253 253 253 253
44753 -253 253 253 253 253 253 253 253 253 253 253 253
44754 -253 253 253 253 253 253 250 250 250 221 221 221
44755 -218 218 218 101 101 101 2 2 6 14 14 14
44756 - 18 18 18 38 38 38 10 10 10 2 2 6
44757 - 2 2 6 2 2 6 2 2 6 78 78 78
44758 - 58 58 58 22 22 22 6 6 6 0 0 0
44759 - 0 0 0 0 0 0 0 0 0 0 0 0
44760 - 0 0 0 0 0 0 0 0 0 0 0 0
44761 - 0 0 0 0 0 0 0 0 0 0 0 0
44762 - 0 0 0 0 0 0 0 0 0 0 0 0
44763 - 0 0 0 0 0 0 0 0 0 0 0 0
44764 - 0 0 0 0 0 0 0 0 0 0 0 0
44765 - 0 0 0 0 0 0 0 0 0 0 0 0
44766 - 0 0 0 0 0 0 6 6 6 18 18 18
44767 - 54 54 54 82 82 82 2 2 6 26 26 26
44768 - 22 22 22 2 2 6 123 123 123 253 253 253
44769 -253 253 253 253 253 253 253 253 253 253 253 253
44770 -253 253 253 253 253 253 253 253 253 253 253 253
44771 -253 253 253 253 253 253 253 253 253 253 253 253
44772 -253 253 253 253 253 253 253 253 253 253 253 253
44773 -253 253 253 253 253 253 253 253 253 253 253 253
44774 -253 253 253 253 253 253 253 253 253 250 250 250
44775 -238 238 238 198 198 198 6 6 6 38 38 38
44776 - 58 58 58 26 26 26 38 38 38 2 2 6
44777 - 2 2 6 2 2 6 2 2 6 46 46 46
44778 - 78 78 78 30 30 30 10 10 10 0 0 0
44779 - 0 0 0 0 0 0 0 0 0 0 0 0
44780 - 0 0 0 0 0 0 0 0 0 0 0 0
44781 - 0 0 0 0 0 0 0 0 0 0 0 0
44782 - 0 0 0 0 0 0 0 0 0 0 0 0
44783 - 0 0 0 0 0 0 0 0 0 0 0 0
44784 - 0 0 0 0 0 0 0 0 0 0 0 0
44785 - 0 0 0 0 0 0 0 0 0 0 0 0
44786 - 0 0 0 0 0 0 10 10 10 30 30 30
44787 - 74 74 74 58 58 58 2 2 6 42 42 42
44788 - 2 2 6 22 22 22 231 231 231 253 253 253
44789 -253 253 253 253 253 253 253 253 253 253 253 253
44790 -253 253 253 253 253 253 253 253 253 250 250 250
44791 -253 253 253 253 253 253 253 253 253 253 253 253
44792 -253 253 253 253 253 253 253 253 253 253 253 253
44793 -253 253 253 253 253 253 253 253 253 253 253 253
44794 -253 253 253 253 253 253 253 253 253 253 253 253
44795 -253 253 253 246 246 246 46 46 46 38 38 38
44796 - 42 42 42 14 14 14 38 38 38 14 14 14
44797 - 2 2 6 2 2 6 2 2 6 6 6 6
44798 - 86 86 86 46 46 46 14 14 14 0 0 0
44799 - 0 0 0 0 0 0 0 0 0 0 0 0
44800 - 0 0 0 0 0 0 0 0 0 0 0 0
44801 - 0 0 0 0 0 0 0 0 0 0 0 0
44802 - 0 0 0 0 0 0 0 0 0 0 0 0
44803 - 0 0 0 0 0 0 0 0 0 0 0 0
44804 - 0 0 0 0 0 0 0 0 0 0 0 0
44805 - 0 0 0 0 0 0 0 0 0 0 0 0
44806 - 0 0 0 6 6 6 14 14 14 42 42 42
44807 - 90 90 90 18 18 18 18 18 18 26 26 26
44808 - 2 2 6 116 116 116 253 253 253 253 253 253
44809 -253 253 253 253 253 253 253 253 253 253 253 253
44810 -253 253 253 253 253 253 250 250 250 238 238 238
44811 -253 253 253 253 253 253 253 253 253 253 253 253
44812 -253 253 253 253 253 253 253 253 253 253 253 253
44813 -253 253 253 253 253 253 253 253 253 253 253 253
44814 -253 253 253 253 253 253 253 253 253 253 253 253
44815 -253 253 253 253 253 253 94 94 94 6 6 6
44816 - 2 2 6 2 2 6 10 10 10 34 34 34
44817 - 2 2 6 2 2 6 2 2 6 2 2 6
44818 - 74 74 74 58 58 58 22 22 22 6 6 6
44819 - 0 0 0 0 0 0 0 0 0 0 0 0
44820 - 0 0 0 0 0 0 0 0 0 0 0 0
44821 - 0 0 0 0 0 0 0 0 0 0 0 0
44822 - 0 0 0 0 0 0 0 0 0 0 0 0
44823 - 0 0 0 0 0 0 0 0 0 0 0 0
44824 - 0 0 0 0 0 0 0 0 0 0 0 0
44825 - 0 0 0 0 0 0 0 0 0 0 0 0
44826 - 0 0 0 10 10 10 26 26 26 66 66 66
44827 - 82 82 82 2 2 6 38 38 38 6 6 6
44828 - 14 14 14 210 210 210 253 253 253 253 253 253
44829 -253 253 253 253 253 253 253 253 253 253 253 253
44830 -253 253 253 253 253 253 246 246 246 242 242 242
44831 -253 253 253 253 253 253 253 253 253 253 253 253
44832 -253 253 253 253 253 253 253 253 253 253 253 253
44833 -253 253 253 253 253 253 253 253 253 253 253 253
44834 -253 253 253 253 253 253 253 253 253 253 253 253
44835 -253 253 253 253 253 253 144 144 144 2 2 6
44836 - 2 2 6 2 2 6 2 2 6 46 46 46
44837 - 2 2 6 2 2 6 2 2 6 2 2 6
44838 - 42 42 42 74 74 74 30 30 30 10 10 10
44839 - 0 0 0 0 0 0 0 0 0 0 0 0
44840 - 0 0 0 0 0 0 0 0 0 0 0 0
44841 - 0 0 0 0 0 0 0 0 0 0 0 0
44842 - 0 0 0 0 0 0 0 0 0 0 0 0
44843 - 0 0 0 0 0 0 0 0 0 0 0 0
44844 - 0 0 0 0 0 0 0 0 0 0 0 0
44845 - 0 0 0 0 0 0 0 0 0 0 0 0
44846 - 6 6 6 14 14 14 42 42 42 90 90 90
44847 - 26 26 26 6 6 6 42 42 42 2 2 6
44848 - 74 74 74 250 250 250 253 253 253 253 253 253
44849 -253 253 253 253 253 253 253 253 253 253 253 253
44850 -253 253 253 253 253 253 242 242 242 242 242 242
44851 -253 253 253 253 253 253 253 253 253 253 253 253
44852 -253 253 253 253 253 253 253 253 253 253 253 253
44853 -253 253 253 253 253 253 253 253 253 253 253 253
44854 -253 253 253 253 253 253 253 253 253 253 253 253
44855 -253 253 253 253 253 253 182 182 182 2 2 6
44856 - 2 2 6 2 2 6 2 2 6 46 46 46
44857 - 2 2 6 2 2 6 2 2 6 2 2 6
44858 - 10 10 10 86 86 86 38 38 38 10 10 10
44859 - 0 0 0 0 0 0 0 0 0 0 0 0
44860 - 0 0 0 0 0 0 0 0 0 0 0 0
44861 - 0 0 0 0 0 0 0 0 0 0 0 0
44862 - 0 0 0 0 0 0 0 0 0 0 0 0
44863 - 0 0 0 0 0 0 0 0 0 0 0 0
44864 - 0 0 0 0 0 0 0 0 0 0 0 0
44865 - 0 0 0 0 0 0 0 0 0 0 0 0
44866 - 10 10 10 26 26 26 66 66 66 82 82 82
44867 - 2 2 6 22 22 22 18 18 18 2 2 6
44868 -149 149 149 253 253 253 253 253 253 253 253 253
44869 -253 253 253 253 253 253 253 253 253 253 253 253
44870 -253 253 253 253 253 253 234 234 234 242 242 242
44871 -253 253 253 253 253 253 253 253 253 253 253 253
44872 -253 253 253 253 253 253 253 253 253 253 253 253
44873 -253 253 253 253 253 253 253 253 253 253 253 253
44874 -253 253 253 253 253 253 253 253 253 253 253 253
44875 -253 253 253 253 253 253 206 206 206 2 2 6
44876 - 2 2 6 2 2 6 2 2 6 38 38 38
44877 - 2 2 6 2 2 6 2 2 6 2 2 6
44878 - 6 6 6 86 86 86 46 46 46 14 14 14
44879 - 0 0 0 0 0 0 0 0 0 0 0 0
44880 - 0 0 0 0 0 0 0 0 0 0 0 0
44881 - 0 0 0 0 0 0 0 0 0 0 0 0
44882 - 0 0 0 0 0 0 0 0 0 0 0 0
44883 - 0 0 0 0 0 0 0 0 0 0 0 0
44884 - 0 0 0 0 0 0 0 0 0 0 0 0
44885 - 0 0 0 0 0 0 0 0 0 6 6 6
44886 - 18 18 18 46 46 46 86 86 86 18 18 18
44887 - 2 2 6 34 34 34 10 10 10 6 6 6
44888 -210 210 210 253 253 253 253 253 253 253 253 253
44889 -253 253 253 253 253 253 253 253 253 253 253 253
44890 -253 253 253 253 253 253 234 234 234 242 242 242
44891 -253 253 253 253 253 253 253 253 253 253 253 253
44892 -253 253 253 253 253 253 253 253 253 253 253 253
44893 -253 253 253 253 253 253 253 253 253 253 253 253
44894 -253 253 253 253 253 253 253 253 253 253 253 253
44895 -253 253 253 253 253 253 221 221 221 6 6 6
44896 - 2 2 6 2 2 6 6 6 6 30 30 30
44897 - 2 2 6 2 2 6 2 2 6 2 2 6
44898 - 2 2 6 82 82 82 54 54 54 18 18 18
44899 - 6 6 6 0 0 0 0 0 0 0 0 0
44900 - 0 0 0 0 0 0 0 0 0 0 0 0
44901 - 0 0 0 0 0 0 0 0 0 0 0 0
44902 - 0 0 0 0 0 0 0 0 0 0 0 0
44903 - 0 0 0 0 0 0 0 0 0 0 0 0
44904 - 0 0 0 0 0 0 0 0 0 0 0 0
44905 - 0 0 0 0 0 0 0 0 0 10 10 10
44906 - 26 26 26 66 66 66 62 62 62 2 2 6
44907 - 2 2 6 38 38 38 10 10 10 26 26 26
44908 -238 238 238 253 253 253 253 253 253 253 253 253
44909 -253 253 253 253 253 253 253 253 253 253 253 253
44910 -253 253 253 253 253 253 231 231 231 238 238 238
44911 -253 253 253 253 253 253 253 253 253 253 253 253
44912 -253 253 253 253 253 253 253 253 253 253 253 253
44913 -253 253 253 253 253 253 253 253 253 253 253 253
44914 -253 253 253 253 253 253 253 253 253 253 253 253
44915 -253 253 253 253 253 253 231 231 231 6 6 6
44916 - 2 2 6 2 2 6 10 10 10 30 30 30
44917 - 2 2 6 2 2 6 2 2 6 2 2 6
44918 - 2 2 6 66 66 66 58 58 58 22 22 22
44919 - 6 6 6 0 0 0 0 0 0 0 0 0
44920 - 0 0 0 0 0 0 0 0 0 0 0 0
44921 - 0 0 0 0 0 0 0 0 0 0 0 0
44922 - 0 0 0 0 0 0 0 0 0 0 0 0
44923 - 0 0 0 0 0 0 0 0 0 0 0 0
44924 - 0 0 0 0 0 0 0 0 0 0 0 0
44925 - 0 0 0 0 0 0 0 0 0 10 10 10
44926 - 38 38 38 78 78 78 6 6 6 2 2 6
44927 - 2 2 6 46 46 46 14 14 14 42 42 42
44928 -246 246 246 253 253 253 253 253 253 253 253 253
44929 -253 253 253 253 253 253 253 253 253 253 253 253
44930 -253 253 253 253 253 253 231 231 231 242 242 242
44931 -253 253 253 253 253 253 253 253 253 253 253 253
44932 -253 253 253 253 253 253 253 253 253 253 253 253
44933 -253 253 253 253 253 253 253 253 253 253 253 253
44934 -253 253 253 253 253 253 253 253 253 253 253 253
44935 -253 253 253 253 253 253 234 234 234 10 10 10
44936 - 2 2 6 2 2 6 22 22 22 14 14 14
44937 - 2 2 6 2 2 6 2 2 6 2 2 6
44938 - 2 2 6 66 66 66 62 62 62 22 22 22
44939 - 6 6 6 0 0 0 0 0 0 0 0 0
44940 - 0 0 0 0 0 0 0 0 0 0 0 0
44941 - 0 0 0 0 0 0 0 0 0 0 0 0
44942 - 0 0 0 0 0 0 0 0 0 0 0 0
44943 - 0 0 0 0 0 0 0 0 0 0 0 0
44944 - 0 0 0 0 0 0 0 0 0 0 0 0
44945 - 0 0 0 0 0 0 6 6 6 18 18 18
44946 - 50 50 50 74 74 74 2 2 6 2 2 6
44947 - 14 14 14 70 70 70 34 34 34 62 62 62
44948 -250 250 250 253 253 253 253 253 253 253 253 253
44949 -253 253 253 253 253 253 253 253 253 253 253 253
44950 -253 253 253 253 253 253 231 231 231 246 246 246
44951 -253 253 253 253 253 253 253 253 253 253 253 253
44952 -253 253 253 253 253 253 253 253 253 253 253 253
44953 -253 253 253 253 253 253 253 253 253 253 253 253
44954 -253 253 253 253 253 253 253 253 253 253 253 253
44955 -253 253 253 253 253 253 234 234 234 14 14 14
44956 - 2 2 6 2 2 6 30 30 30 2 2 6
44957 - 2 2 6 2 2 6 2 2 6 2 2 6
44958 - 2 2 6 66 66 66 62 62 62 22 22 22
44959 - 6 6 6 0 0 0 0 0 0 0 0 0
44960 - 0 0 0 0 0 0 0 0 0 0 0 0
44961 - 0 0 0 0 0 0 0 0 0 0 0 0
44962 - 0 0 0 0 0 0 0 0 0 0 0 0
44963 - 0 0 0 0 0 0 0 0 0 0 0 0
44964 - 0 0 0 0 0 0 0 0 0 0 0 0
44965 - 0 0 0 0 0 0 6 6 6 18 18 18
44966 - 54 54 54 62 62 62 2 2 6 2 2 6
44967 - 2 2 6 30 30 30 46 46 46 70 70 70
44968 -250 250 250 253 253 253 253 253 253 253 253 253
44969 -253 253 253 253 253 253 253 253 253 253 253 253
44970 -253 253 253 253 253 253 231 231 231 246 246 246
44971 -253 253 253 253 253 253 253 253 253 253 253 253
44972 -253 253 253 253 253 253 253 253 253 253 253 253
44973 -253 253 253 253 253 253 253 253 253 253 253 253
44974 -253 253 253 253 253 253 253 253 253 253 253 253
44975 -253 253 253 253 253 253 226 226 226 10 10 10
44976 - 2 2 6 6 6 6 30 30 30 2 2 6
44977 - 2 2 6 2 2 6 2 2 6 2 2 6
44978 - 2 2 6 66 66 66 58 58 58 22 22 22
44979 - 6 6 6 0 0 0 0 0 0 0 0 0
44980 - 0 0 0 0 0 0 0 0 0 0 0 0
44981 - 0 0 0 0 0 0 0 0 0 0 0 0
44982 - 0 0 0 0 0 0 0 0 0 0 0 0
44983 - 0 0 0 0 0 0 0 0 0 0 0 0
44984 - 0 0 0 0 0 0 0 0 0 0 0 0
44985 - 0 0 0 0 0 0 6 6 6 22 22 22
44986 - 58 58 58 62 62 62 2 2 6 2 2 6
44987 - 2 2 6 2 2 6 30 30 30 78 78 78
44988 -250 250 250 253 253 253 253 253 253 253 253 253
44989 -253 253 253 253 253 253 253 253 253 253 253 253
44990 -253 253 253 253 253 253 231 231 231 246 246 246
44991 -253 253 253 253 253 253 253 253 253 253 253 253
44992 -253 253 253 253 253 253 253 253 253 253 253 253
44993 -253 253 253 253 253 253 253 253 253 253 253 253
44994 -253 253 253 253 253 253 253 253 253 253 253 253
44995 -253 253 253 253 253 253 206 206 206 2 2 6
44996 - 22 22 22 34 34 34 18 14 6 22 22 22
44997 - 26 26 26 18 18 18 6 6 6 2 2 6
44998 - 2 2 6 82 82 82 54 54 54 18 18 18
44999 - 6 6 6 0 0 0 0 0 0 0 0 0
45000 - 0 0 0 0 0 0 0 0 0 0 0 0
45001 - 0 0 0 0 0 0 0 0 0 0 0 0
45002 - 0 0 0 0 0 0 0 0 0 0 0 0
45003 - 0 0 0 0 0 0 0 0 0 0 0 0
45004 - 0 0 0 0 0 0 0 0 0 0 0 0
45005 - 0 0 0 0 0 0 6 6 6 26 26 26
45006 - 62 62 62 106 106 106 74 54 14 185 133 11
45007 -210 162 10 121 92 8 6 6 6 62 62 62
45008 -238 238 238 253 253 253 253 253 253 253 253 253
45009 -253 253 253 253 253 253 253 253 253 253 253 253
45010 -253 253 253 253 253 253 231 231 231 246 246 246
45011 -253 253 253 253 253 253 253 253 253 253 253 253
45012 -253 253 253 253 253 253 253 253 253 253 253 253
45013 -253 253 253 253 253 253 253 253 253 253 253 253
45014 -253 253 253 253 253 253 253 253 253 253 253 253
45015 -253 253 253 253 253 253 158 158 158 18 18 18
45016 - 14 14 14 2 2 6 2 2 6 2 2 6
45017 - 6 6 6 18 18 18 66 66 66 38 38 38
45018 - 6 6 6 94 94 94 50 50 50 18 18 18
45019 - 6 6 6 0 0 0 0 0 0 0 0 0
45020 - 0 0 0 0 0 0 0 0 0 0 0 0
45021 - 0 0 0 0 0 0 0 0 0 0 0 0
45022 - 0 0 0 0 0 0 0 0 0 0 0 0
45023 - 0 0 0 0 0 0 0 0 0 0 0 0
45024 - 0 0 0 0 0 0 0 0 0 6 6 6
45025 - 10 10 10 10 10 10 18 18 18 38 38 38
45026 - 78 78 78 142 134 106 216 158 10 242 186 14
45027 -246 190 14 246 190 14 156 118 10 10 10 10
45028 - 90 90 90 238 238 238 253 253 253 253 253 253
45029 -253 253 253 253 253 253 253 253 253 253 253 253
45030 -253 253 253 253 253 253 231 231 231 250 250 250
45031 -253 253 253 253 253 253 253 253 253 253 253 253
45032 -253 253 253 253 253 253 253 253 253 253 253 253
45033 -253 253 253 253 253 253 253 253 253 253 253 253
45034 -253 253 253 253 253 253 253 253 253 246 230 190
45035 -238 204 91 238 204 91 181 142 44 37 26 9
45036 - 2 2 6 2 2 6 2 2 6 2 2 6
45037 - 2 2 6 2 2 6 38 38 38 46 46 46
45038 - 26 26 26 106 106 106 54 54 54 18 18 18
45039 - 6 6 6 0 0 0 0 0 0 0 0 0
45040 - 0 0 0 0 0 0 0 0 0 0 0 0
45041 - 0 0 0 0 0 0 0 0 0 0 0 0
45042 - 0 0 0 0 0 0 0 0 0 0 0 0
45043 - 0 0 0 0 0 0 0 0 0 0 0 0
45044 - 0 0 0 6 6 6 14 14 14 22 22 22
45045 - 30 30 30 38 38 38 50 50 50 70 70 70
45046 -106 106 106 190 142 34 226 170 11 242 186 14
45047 -246 190 14 246 190 14 246 190 14 154 114 10
45048 - 6 6 6 74 74 74 226 226 226 253 253 253
45049 -253 253 253 253 253 253 253 253 253 253 253 253
45050 -253 253 253 253 253 253 231 231 231 250 250 250
45051 -253 253 253 253 253 253 253 253 253 253 253 253
45052 -253 253 253 253 253 253 253 253 253 253 253 253
45053 -253 253 253 253 253 253 253 253 253 253 253 253
45054 -253 253 253 253 253 253 253 253 253 228 184 62
45055 -241 196 14 241 208 19 232 195 16 38 30 10
45056 - 2 2 6 2 2 6 2 2 6 2 2 6
45057 - 2 2 6 6 6 6 30 30 30 26 26 26
45058 -203 166 17 154 142 90 66 66 66 26 26 26
45059 - 6 6 6 0 0 0 0 0 0 0 0 0
45060 - 0 0 0 0 0 0 0 0 0 0 0 0
45061 - 0 0 0 0 0 0 0 0 0 0 0 0
45062 - 0 0 0 0 0 0 0 0 0 0 0 0
45063 - 0 0 0 0 0 0 0 0 0 0 0 0
45064 - 6 6 6 18 18 18 38 38 38 58 58 58
45065 - 78 78 78 86 86 86 101 101 101 123 123 123
45066 -175 146 61 210 150 10 234 174 13 246 186 14
45067 -246 190 14 246 190 14 246 190 14 238 190 10
45068 -102 78 10 2 2 6 46 46 46 198 198 198
45069 -253 253 253 253 253 253 253 253 253 253 253 253
45070 -253 253 253 253 253 253 234 234 234 242 242 242
45071 -253 253 253 253 253 253 253 253 253 253 253 253
45072 -253 253 253 253 253 253 253 253 253 253 253 253
45073 -253 253 253 253 253 253 253 253 253 253 253 253
45074 -253 253 253 253 253 253 253 253 253 224 178 62
45075 -242 186 14 241 196 14 210 166 10 22 18 6
45076 - 2 2 6 2 2 6 2 2 6 2 2 6
45077 - 2 2 6 2 2 6 6 6 6 121 92 8
45078 -238 202 15 232 195 16 82 82 82 34 34 34
45079 - 10 10 10 0 0 0 0 0 0 0 0 0
45080 - 0 0 0 0 0 0 0 0 0 0 0 0
45081 - 0 0 0 0 0 0 0 0 0 0 0 0
45082 - 0 0 0 0 0 0 0 0 0 0 0 0
45083 - 0 0 0 0 0 0 0 0 0 0 0 0
45084 - 14 14 14 38 38 38 70 70 70 154 122 46
45085 -190 142 34 200 144 11 197 138 11 197 138 11
45086 -213 154 11 226 170 11 242 186 14 246 190 14
45087 -246 190 14 246 190 14 246 190 14 246 190 14
45088 -225 175 15 46 32 6 2 2 6 22 22 22
45089 -158 158 158 250 250 250 253 253 253 253 253 253
45090 -253 253 253 253 253 253 253 253 253 253 253 253
45091 -253 253 253 253 253 253 253 253 253 253 253 253
45092 -253 253 253 253 253 253 253 253 253 253 253 253
45093 -253 253 253 253 253 253 253 253 253 253 253 253
45094 -253 253 253 250 250 250 242 242 242 224 178 62
45095 -239 182 13 236 186 11 213 154 11 46 32 6
45096 - 2 2 6 2 2 6 2 2 6 2 2 6
45097 - 2 2 6 2 2 6 61 42 6 225 175 15
45098 -238 190 10 236 186 11 112 100 78 42 42 42
45099 - 14 14 14 0 0 0 0 0 0 0 0 0
45100 - 0 0 0 0 0 0 0 0 0 0 0 0
45101 - 0 0 0 0 0 0 0 0 0 0 0 0
45102 - 0 0 0 0 0 0 0 0 0 0 0 0
45103 - 0 0 0 0 0 0 0 0 0 6 6 6
45104 - 22 22 22 54 54 54 154 122 46 213 154 11
45105 -226 170 11 230 174 11 226 170 11 226 170 11
45106 -236 178 12 242 186 14 246 190 14 246 190 14
45107 -246 190 14 246 190 14 246 190 14 246 190 14
45108 -241 196 14 184 144 12 10 10 10 2 2 6
45109 - 6 6 6 116 116 116 242 242 242 253 253 253
45110 -253 253 253 253 253 253 253 253 253 253 253 253
45111 -253 253 253 253 253 253 253 253 253 253 253 253
45112 -253 253 253 253 253 253 253 253 253 253 253 253
45113 -253 253 253 253 253 253 253 253 253 253 253 253
45114 -253 253 253 231 231 231 198 198 198 214 170 54
45115 -236 178 12 236 178 12 210 150 10 137 92 6
45116 - 18 14 6 2 2 6 2 2 6 2 2 6
45117 - 6 6 6 70 47 6 200 144 11 236 178 12
45118 -239 182 13 239 182 13 124 112 88 58 58 58
45119 - 22 22 22 6 6 6 0 0 0 0 0 0
45120 - 0 0 0 0 0 0 0 0 0 0 0 0
45121 - 0 0 0 0 0 0 0 0 0 0 0 0
45122 - 0 0 0 0 0 0 0 0 0 0 0 0
45123 - 0 0 0 0 0 0 0 0 0 10 10 10
45124 - 30 30 30 70 70 70 180 133 36 226 170 11
45125 -239 182 13 242 186 14 242 186 14 246 186 14
45126 -246 190 14 246 190 14 246 190 14 246 190 14
45127 -246 190 14 246 190 14 246 190 14 246 190 14
45128 -246 190 14 232 195 16 98 70 6 2 2 6
45129 - 2 2 6 2 2 6 66 66 66 221 221 221
45130 -253 253 253 253 253 253 253 253 253 253 253 253
45131 -253 253 253 253 253 253 253 253 253 253 253 253
45132 -253 253 253 253 253 253 253 253 253 253 253 253
45133 -253 253 253 253 253 253 253 253 253 253 253 253
45134 -253 253 253 206 206 206 198 198 198 214 166 58
45135 -230 174 11 230 174 11 216 158 10 192 133 9
45136 -163 110 8 116 81 8 102 78 10 116 81 8
45137 -167 114 7 197 138 11 226 170 11 239 182 13
45138 -242 186 14 242 186 14 162 146 94 78 78 78
45139 - 34 34 34 14 14 14 6 6 6 0 0 0
45140 - 0 0 0 0 0 0 0 0 0 0 0 0
45141 - 0 0 0 0 0 0 0 0 0 0 0 0
45142 - 0 0 0 0 0 0 0 0 0 0 0 0
45143 - 0 0 0 0 0 0 0 0 0 6 6 6
45144 - 30 30 30 78 78 78 190 142 34 226 170 11
45145 -239 182 13 246 190 14 246 190 14 246 190 14
45146 -246 190 14 246 190 14 246 190 14 246 190 14
45147 -246 190 14 246 190 14 246 190 14 246 190 14
45148 -246 190 14 241 196 14 203 166 17 22 18 6
45149 - 2 2 6 2 2 6 2 2 6 38 38 38
45150 -218 218 218 253 253 253 253 253 253 253 253 253
45151 -253 253 253 253 253 253 253 253 253 253 253 253
45152 -253 253 253 253 253 253 253 253 253 253 253 253
45153 -253 253 253 253 253 253 253 253 253 253 253 253
45154 -250 250 250 206 206 206 198 198 198 202 162 69
45155 -226 170 11 236 178 12 224 166 10 210 150 10
45156 -200 144 11 197 138 11 192 133 9 197 138 11
45157 -210 150 10 226 170 11 242 186 14 246 190 14
45158 -246 190 14 246 186 14 225 175 15 124 112 88
45159 - 62 62 62 30 30 30 14 14 14 6 6 6
45160 - 0 0 0 0 0 0 0 0 0 0 0 0
45161 - 0 0 0 0 0 0 0 0 0 0 0 0
45162 - 0 0 0 0 0 0 0 0 0 0 0 0
45163 - 0 0 0 0 0 0 0 0 0 10 10 10
45164 - 30 30 30 78 78 78 174 135 50 224 166 10
45165 -239 182 13 246 190 14 246 190 14 246 190 14
45166 -246 190 14 246 190 14 246 190 14 246 190 14
45167 -246 190 14 246 190 14 246 190 14 246 190 14
45168 -246 190 14 246 190 14 241 196 14 139 102 15
45169 - 2 2 6 2 2 6 2 2 6 2 2 6
45170 - 78 78 78 250 250 250 253 253 253 253 253 253
45171 -253 253 253 253 253 253 253 253 253 253 253 253
45172 -253 253 253 253 253 253 253 253 253 253 253 253
45173 -253 253 253 253 253 253 253 253 253 253 253 253
45174 -250 250 250 214 214 214 198 198 198 190 150 46
45175 -219 162 10 236 178 12 234 174 13 224 166 10
45176 -216 158 10 213 154 11 213 154 11 216 158 10
45177 -226 170 11 239 182 13 246 190 14 246 190 14
45178 -246 190 14 246 190 14 242 186 14 206 162 42
45179 -101 101 101 58 58 58 30 30 30 14 14 14
45180 - 6 6 6 0 0 0 0 0 0 0 0 0
45181 - 0 0 0 0 0 0 0 0 0 0 0 0
45182 - 0 0 0 0 0 0 0 0 0 0 0 0
45183 - 0 0 0 0 0 0 0 0 0 10 10 10
45184 - 30 30 30 74 74 74 174 135 50 216 158 10
45185 -236 178 12 246 190 14 246 190 14 246 190 14
45186 -246 190 14 246 190 14 246 190 14 246 190 14
45187 -246 190 14 246 190 14 246 190 14 246 190 14
45188 -246 190 14 246 190 14 241 196 14 226 184 13
45189 - 61 42 6 2 2 6 2 2 6 2 2 6
45190 - 22 22 22 238 238 238 253 253 253 253 253 253
45191 -253 253 253 253 253 253 253 253 253 253 253 253
45192 -253 253 253 253 253 253 253 253 253 253 253 253
45193 -253 253 253 253 253 253 253 253 253 253 253 253
45194 -253 253 253 226 226 226 187 187 187 180 133 36
45195 -216 158 10 236 178 12 239 182 13 236 178 12
45196 -230 174 11 226 170 11 226 170 11 230 174 11
45197 -236 178 12 242 186 14 246 190 14 246 190 14
45198 -246 190 14 246 190 14 246 186 14 239 182 13
45199 -206 162 42 106 106 106 66 66 66 34 34 34
45200 - 14 14 14 6 6 6 0 0 0 0 0 0
45201 - 0 0 0 0 0 0 0 0 0 0 0 0
45202 - 0 0 0 0 0 0 0 0 0 0 0 0
45203 - 0 0 0 0 0 0 0 0 0 6 6 6
45204 - 26 26 26 70 70 70 163 133 67 213 154 11
45205 -236 178 12 246 190 14 246 190 14 246 190 14
45206 -246 190 14 246 190 14 246 190 14 246 190 14
45207 -246 190 14 246 190 14 246 190 14 246 190 14
45208 -246 190 14 246 190 14 246 190 14 241 196 14
45209 -190 146 13 18 14 6 2 2 6 2 2 6
45210 - 46 46 46 246 246 246 253 253 253 253 253 253
45211 -253 253 253 253 253 253 253 253 253 253 253 253
45212 -253 253 253 253 253 253 253 253 253 253 253 253
45213 -253 253 253 253 253 253 253 253 253 253 253 253
45214 -253 253 253 221 221 221 86 86 86 156 107 11
45215 -216 158 10 236 178 12 242 186 14 246 186 14
45216 -242 186 14 239 182 13 239 182 13 242 186 14
45217 -242 186 14 246 186 14 246 190 14 246 190 14
45218 -246 190 14 246 190 14 246 190 14 246 190 14
45219 -242 186 14 225 175 15 142 122 72 66 66 66
45220 - 30 30 30 10 10 10 0 0 0 0 0 0
45221 - 0 0 0 0 0 0 0 0 0 0 0 0
45222 - 0 0 0 0 0 0 0 0 0 0 0 0
45223 - 0 0 0 0 0 0 0 0 0 6 6 6
45224 - 26 26 26 70 70 70 163 133 67 210 150 10
45225 -236 178 12 246 190 14 246 190 14 246 190 14
45226 -246 190 14 246 190 14 246 190 14 246 190 14
45227 -246 190 14 246 190 14 246 190 14 246 190 14
45228 -246 190 14 246 190 14 246 190 14 246 190 14
45229 -232 195 16 121 92 8 34 34 34 106 106 106
45230 -221 221 221 253 253 253 253 253 253 253 253 253
45231 -253 253 253 253 253 253 253 253 253 253 253 253
45232 -253 253 253 253 253 253 253 253 253 253 253 253
45233 -253 253 253 253 253 253 253 253 253 253 253 253
45234 -242 242 242 82 82 82 18 14 6 163 110 8
45235 -216 158 10 236 178 12 242 186 14 246 190 14
45236 -246 190 14 246 190 14 246 190 14 246 190 14
45237 -246 190 14 246 190 14 246 190 14 246 190 14
45238 -246 190 14 246 190 14 246 190 14 246 190 14
45239 -246 190 14 246 190 14 242 186 14 163 133 67
45240 - 46 46 46 18 18 18 6 6 6 0 0 0
45241 - 0 0 0 0 0 0 0 0 0 0 0 0
45242 - 0 0 0 0 0 0 0 0 0 0 0 0
45243 - 0 0 0 0 0 0 0 0 0 10 10 10
45244 - 30 30 30 78 78 78 163 133 67 210 150 10
45245 -236 178 12 246 186 14 246 190 14 246 190 14
45246 -246 190 14 246 190 14 246 190 14 246 190 14
45247 -246 190 14 246 190 14 246 190 14 246 190 14
45248 -246 190 14 246 190 14 246 190 14 246 190 14
45249 -241 196 14 215 174 15 190 178 144 253 253 253
45250 -253 253 253 253 253 253 253 253 253 253 253 253
45251 -253 253 253 253 253 253 253 253 253 253 253 253
45252 -253 253 253 253 253 253 253 253 253 253 253 253
45253 -253 253 253 253 253 253 253 253 253 218 218 218
45254 - 58 58 58 2 2 6 22 18 6 167 114 7
45255 -216 158 10 236 178 12 246 186 14 246 190 14
45256 -246 190 14 246 190 14 246 190 14 246 190 14
45257 -246 190 14 246 190 14 246 190 14 246 190 14
45258 -246 190 14 246 190 14 246 190 14 246 190 14
45259 -246 190 14 246 186 14 242 186 14 190 150 46
45260 - 54 54 54 22 22 22 6 6 6 0 0 0
45261 - 0 0 0 0 0 0 0 0 0 0 0 0
45262 - 0 0 0 0 0 0 0 0 0 0 0 0
45263 - 0 0 0 0 0 0 0 0 0 14 14 14
45264 - 38 38 38 86 86 86 180 133 36 213 154 11
45265 -236 178 12 246 186 14 246 190 14 246 190 14
45266 -246 190 14 246 190 14 246 190 14 246 190 14
45267 -246 190 14 246 190 14 246 190 14 246 190 14
45268 -246 190 14 246 190 14 246 190 14 246 190 14
45269 -246 190 14 232 195 16 190 146 13 214 214 214
45270 -253 253 253 253 253 253 253 253 253 253 253 253
45271 -253 253 253 253 253 253 253 253 253 253 253 253
45272 -253 253 253 253 253 253 253 253 253 253 253 253
45273 -253 253 253 250 250 250 170 170 170 26 26 26
45274 - 2 2 6 2 2 6 37 26 9 163 110 8
45275 -219 162 10 239 182 13 246 186 14 246 190 14
45276 -246 190 14 246 190 14 246 190 14 246 190 14
45277 -246 190 14 246 190 14 246 190 14 246 190 14
45278 -246 190 14 246 190 14 246 190 14 246 190 14
45279 -246 186 14 236 178 12 224 166 10 142 122 72
45280 - 46 46 46 18 18 18 6 6 6 0 0 0
45281 - 0 0 0 0 0 0 0 0 0 0 0 0
45282 - 0 0 0 0 0 0 0 0 0 0 0 0
45283 - 0 0 0 0 0 0 6 6 6 18 18 18
45284 - 50 50 50 109 106 95 192 133 9 224 166 10
45285 -242 186 14 246 190 14 246 190 14 246 190 14
45286 -246 190 14 246 190 14 246 190 14 246 190 14
45287 -246 190 14 246 190 14 246 190 14 246 190 14
45288 -246 190 14 246 190 14 246 190 14 246 190 14
45289 -242 186 14 226 184 13 210 162 10 142 110 46
45290 -226 226 226 253 253 253 253 253 253 253 253 253
45291 -253 253 253 253 253 253 253 253 253 253 253 253
45292 -253 253 253 253 253 253 253 253 253 253 253 253
45293 -198 198 198 66 66 66 2 2 6 2 2 6
45294 - 2 2 6 2 2 6 50 34 6 156 107 11
45295 -219 162 10 239 182 13 246 186 14 246 190 14
45296 -246 190 14 246 190 14 246 190 14 246 190 14
45297 -246 190 14 246 190 14 246 190 14 246 190 14
45298 -246 190 14 246 190 14 246 190 14 242 186 14
45299 -234 174 13 213 154 11 154 122 46 66 66 66
45300 - 30 30 30 10 10 10 0 0 0 0 0 0
45301 - 0 0 0 0 0 0 0 0 0 0 0 0
45302 - 0 0 0 0 0 0 0 0 0 0 0 0
45303 - 0 0 0 0 0 0 6 6 6 22 22 22
45304 - 58 58 58 154 121 60 206 145 10 234 174 13
45305 -242 186 14 246 186 14 246 190 14 246 190 14
45306 -246 190 14 246 190 14 246 190 14 246 190 14
45307 -246 190 14 246 190 14 246 190 14 246 190 14
45308 -246 190 14 246 190 14 246 190 14 246 190 14
45309 -246 186 14 236 178 12 210 162 10 163 110 8
45310 - 61 42 6 138 138 138 218 218 218 250 250 250
45311 -253 253 253 253 253 253 253 253 253 250 250 250
45312 -242 242 242 210 210 210 144 144 144 66 66 66
45313 - 6 6 6 2 2 6 2 2 6 2 2 6
45314 - 2 2 6 2 2 6 61 42 6 163 110 8
45315 -216 158 10 236 178 12 246 190 14 246 190 14
45316 -246 190 14 246 190 14 246 190 14 246 190 14
45317 -246 190 14 246 190 14 246 190 14 246 190 14
45318 -246 190 14 239 182 13 230 174 11 216 158 10
45319 -190 142 34 124 112 88 70 70 70 38 38 38
45320 - 18 18 18 6 6 6 0 0 0 0 0 0
45321 - 0 0 0 0 0 0 0 0 0 0 0 0
45322 - 0 0 0 0 0 0 0 0 0 0 0 0
45323 - 0 0 0 0 0 0 6 6 6 22 22 22
45324 - 62 62 62 168 124 44 206 145 10 224 166 10
45325 -236 178 12 239 182 13 242 186 14 242 186 14
45326 -246 186 14 246 190 14 246 190 14 246 190 14
45327 -246 190 14 246 190 14 246 190 14 246 190 14
45328 -246 190 14 246 190 14 246 190 14 246 190 14
45329 -246 190 14 236 178 12 216 158 10 175 118 6
45330 - 80 54 7 2 2 6 6 6 6 30 30 30
45331 - 54 54 54 62 62 62 50 50 50 38 38 38
45332 - 14 14 14 2 2 6 2 2 6 2 2 6
45333 - 2 2 6 2 2 6 2 2 6 2 2 6
45334 - 2 2 6 6 6 6 80 54 7 167 114 7
45335 -213 154 11 236 178 12 246 190 14 246 190 14
45336 -246 190 14 246 190 14 246 190 14 246 190 14
45337 -246 190 14 242 186 14 239 182 13 239 182 13
45338 -230 174 11 210 150 10 174 135 50 124 112 88
45339 - 82 82 82 54 54 54 34 34 34 18 18 18
45340 - 6 6 6 0 0 0 0 0 0 0 0 0
45341 - 0 0 0 0 0 0 0 0 0 0 0 0
45342 - 0 0 0 0 0 0 0 0 0 0 0 0
45343 - 0 0 0 0 0 0 6 6 6 18 18 18
45344 - 50 50 50 158 118 36 192 133 9 200 144 11
45345 -216 158 10 219 162 10 224 166 10 226 170 11
45346 -230 174 11 236 178 12 239 182 13 239 182 13
45347 -242 186 14 246 186 14 246 190 14 246 190 14
45348 -246 190 14 246 190 14 246 190 14 246 190 14
45349 -246 186 14 230 174 11 210 150 10 163 110 8
45350 -104 69 6 10 10 10 2 2 6 2 2 6
45351 - 2 2 6 2 2 6 2 2 6 2 2 6
45352 - 2 2 6 2 2 6 2 2 6 2 2 6
45353 - 2 2 6 2 2 6 2 2 6 2 2 6
45354 - 2 2 6 6 6 6 91 60 6 167 114 7
45355 -206 145 10 230 174 11 242 186 14 246 190 14
45356 -246 190 14 246 190 14 246 186 14 242 186 14
45357 -239 182 13 230 174 11 224 166 10 213 154 11
45358 -180 133 36 124 112 88 86 86 86 58 58 58
45359 - 38 38 38 22 22 22 10 10 10 6 6 6
45360 - 0 0 0 0 0 0 0 0 0 0 0 0
45361 - 0 0 0 0 0 0 0 0 0 0 0 0
45362 - 0 0 0 0 0 0 0 0 0 0 0 0
45363 - 0 0 0 0 0 0 0 0 0 14 14 14
45364 - 34 34 34 70 70 70 138 110 50 158 118 36
45365 -167 114 7 180 123 7 192 133 9 197 138 11
45366 -200 144 11 206 145 10 213 154 11 219 162 10
45367 -224 166 10 230 174 11 239 182 13 242 186 14
45368 -246 186 14 246 186 14 246 186 14 246 186 14
45369 -239 182 13 216 158 10 185 133 11 152 99 6
45370 -104 69 6 18 14 6 2 2 6 2 2 6
45371 - 2 2 6 2 2 6 2 2 6 2 2 6
45372 - 2 2 6 2 2 6 2 2 6 2 2 6
45373 - 2 2 6 2 2 6 2 2 6 2 2 6
45374 - 2 2 6 6 6 6 80 54 7 152 99 6
45375 -192 133 9 219 162 10 236 178 12 239 182 13
45376 -246 186 14 242 186 14 239 182 13 236 178 12
45377 -224 166 10 206 145 10 192 133 9 154 121 60
45378 - 94 94 94 62 62 62 42 42 42 22 22 22
45379 - 14 14 14 6 6 6 0 0 0 0 0 0
45380 - 0 0 0 0 0 0 0 0 0 0 0 0
45381 - 0 0 0 0 0 0 0 0 0 0 0 0
45382 - 0 0 0 0 0 0 0 0 0 0 0 0
45383 - 0 0 0 0 0 0 0 0 0 6 6 6
45384 - 18 18 18 34 34 34 58 58 58 78 78 78
45385 -101 98 89 124 112 88 142 110 46 156 107 11
45386 -163 110 8 167 114 7 175 118 6 180 123 7
45387 -185 133 11 197 138 11 210 150 10 219 162 10
45388 -226 170 11 236 178 12 236 178 12 234 174 13
45389 -219 162 10 197 138 11 163 110 8 130 83 6
45390 - 91 60 6 10 10 10 2 2 6 2 2 6
45391 - 18 18 18 38 38 38 38 38 38 38 38 38
45392 - 38 38 38 38 38 38 38 38 38 38 38 38
45393 - 38 38 38 38 38 38 26 26 26 2 2 6
45394 - 2 2 6 6 6 6 70 47 6 137 92 6
45395 -175 118 6 200 144 11 219 162 10 230 174 11
45396 -234 174 13 230 174 11 219 162 10 210 150 10
45397 -192 133 9 163 110 8 124 112 88 82 82 82
45398 - 50 50 50 30 30 30 14 14 14 6 6 6
45399 - 0 0 0 0 0 0 0 0 0 0 0 0
45400 - 0 0 0 0 0 0 0 0 0 0 0 0
45401 - 0 0 0 0 0 0 0 0 0 0 0 0
45402 - 0 0 0 0 0 0 0 0 0 0 0 0
45403 - 0 0 0 0 0 0 0 0 0 0 0 0
45404 - 6 6 6 14 14 14 22 22 22 34 34 34
45405 - 42 42 42 58 58 58 74 74 74 86 86 86
45406 -101 98 89 122 102 70 130 98 46 121 87 25
45407 -137 92 6 152 99 6 163 110 8 180 123 7
45408 -185 133 11 197 138 11 206 145 10 200 144 11
45409 -180 123 7 156 107 11 130 83 6 104 69 6
45410 - 50 34 6 54 54 54 110 110 110 101 98 89
45411 - 86 86 86 82 82 82 78 78 78 78 78 78
45412 - 78 78 78 78 78 78 78 78 78 78 78 78
45413 - 78 78 78 82 82 82 86 86 86 94 94 94
45414 -106 106 106 101 101 101 86 66 34 124 80 6
45415 -156 107 11 180 123 7 192 133 9 200 144 11
45416 -206 145 10 200 144 11 192 133 9 175 118 6
45417 -139 102 15 109 106 95 70 70 70 42 42 42
45418 - 22 22 22 10 10 10 0 0 0 0 0 0
45419 - 0 0 0 0 0 0 0 0 0 0 0 0
45420 - 0 0 0 0 0 0 0 0 0 0 0 0
45421 - 0 0 0 0 0 0 0 0 0 0 0 0
45422 - 0 0 0 0 0 0 0 0 0 0 0 0
45423 - 0 0 0 0 0 0 0 0 0 0 0 0
45424 - 0 0 0 0 0 0 6 6 6 10 10 10
45425 - 14 14 14 22 22 22 30 30 30 38 38 38
45426 - 50 50 50 62 62 62 74 74 74 90 90 90
45427 -101 98 89 112 100 78 121 87 25 124 80 6
45428 -137 92 6 152 99 6 152 99 6 152 99 6
45429 -138 86 6 124 80 6 98 70 6 86 66 30
45430 -101 98 89 82 82 82 58 58 58 46 46 46
45431 - 38 38 38 34 34 34 34 34 34 34 34 34
45432 - 34 34 34 34 34 34 34 34 34 34 34 34
45433 - 34 34 34 34 34 34 38 38 38 42 42 42
45434 - 54 54 54 82 82 82 94 86 76 91 60 6
45435 -134 86 6 156 107 11 167 114 7 175 118 6
45436 -175 118 6 167 114 7 152 99 6 121 87 25
45437 -101 98 89 62 62 62 34 34 34 18 18 18
45438 - 6 6 6 0 0 0 0 0 0 0 0 0
45439 - 0 0 0 0 0 0 0 0 0 0 0 0
45440 - 0 0 0 0 0 0 0 0 0 0 0 0
45441 - 0 0 0 0 0 0 0 0 0 0 0 0
45442 - 0 0 0 0 0 0 0 0 0 0 0 0
45443 - 0 0 0 0 0 0 0 0 0 0 0 0
45444 - 0 0 0 0 0 0 0 0 0 0 0 0
45445 - 0 0 0 6 6 6 6 6 6 10 10 10
45446 - 18 18 18 22 22 22 30 30 30 42 42 42
45447 - 50 50 50 66 66 66 86 86 86 101 98 89
45448 -106 86 58 98 70 6 104 69 6 104 69 6
45449 -104 69 6 91 60 6 82 62 34 90 90 90
45450 - 62 62 62 38 38 38 22 22 22 14 14 14
45451 - 10 10 10 10 10 10 10 10 10 10 10 10
45452 - 10 10 10 10 10 10 6 6 6 10 10 10
45453 - 10 10 10 10 10 10 10 10 10 14 14 14
45454 - 22 22 22 42 42 42 70 70 70 89 81 66
45455 - 80 54 7 104 69 6 124 80 6 137 92 6
45456 -134 86 6 116 81 8 100 82 52 86 86 86
45457 - 58 58 58 30 30 30 14 14 14 6 6 6
45458 - 0 0 0 0 0 0 0 0 0 0 0 0
45459 - 0 0 0 0 0 0 0 0 0 0 0 0
45460 - 0 0 0 0 0 0 0 0 0 0 0 0
45461 - 0 0 0 0 0 0 0 0 0 0 0 0
45462 - 0 0 0 0 0 0 0 0 0 0 0 0
45463 - 0 0 0 0 0 0 0 0 0 0 0 0
45464 - 0 0 0 0 0 0 0 0 0 0 0 0
45465 - 0 0 0 0 0 0 0 0 0 0 0 0
45466 - 0 0 0 6 6 6 10 10 10 14 14 14
45467 - 18 18 18 26 26 26 38 38 38 54 54 54
45468 - 70 70 70 86 86 86 94 86 76 89 81 66
45469 - 89 81 66 86 86 86 74 74 74 50 50 50
45470 - 30 30 30 14 14 14 6 6 6 0 0 0
45471 - 0 0 0 0 0 0 0 0 0 0 0 0
45472 - 0 0 0 0 0 0 0 0 0 0 0 0
45473 - 0 0 0 0 0 0 0 0 0 0 0 0
45474 - 6 6 6 18 18 18 34 34 34 58 58 58
45475 - 82 82 82 89 81 66 89 81 66 89 81 66
45476 - 94 86 66 94 86 76 74 74 74 50 50 50
45477 - 26 26 26 14 14 14 6 6 6 0 0 0
45478 - 0 0 0 0 0 0 0 0 0 0 0 0
45479 - 0 0 0 0 0 0 0 0 0 0 0 0
45480 - 0 0 0 0 0 0 0 0 0 0 0 0
45481 - 0 0 0 0 0 0 0 0 0 0 0 0
45482 - 0 0 0 0 0 0 0 0 0 0 0 0
45483 - 0 0 0 0 0 0 0 0 0 0 0 0
45484 - 0 0 0 0 0 0 0 0 0 0 0 0
45485 - 0 0 0 0 0 0 0 0 0 0 0 0
45486 - 0 0 0 0 0 0 0 0 0 0 0 0
45487 - 6 6 6 6 6 6 14 14 14 18 18 18
45488 - 30 30 30 38 38 38 46 46 46 54 54 54
45489 - 50 50 50 42 42 42 30 30 30 18 18 18
45490 - 10 10 10 0 0 0 0 0 0 0 0 0
45491 - 0 0 0 0 0 0 0 0 0 0 0 0
45492 - 0 0 0 0 0 0 0 0 0 0 0 0
45493 - 0 0 0 0 0 0 0 0 0 0 0 0
45494 - 0 0 0 6 6 6 14 14 14 26 26 26
45495 - 38 38 38 50 50 50 58 58 58 58 58 58
45496 - 54 54 54 42 42 42 30 30 30 18 18 18
45497 - 10 10 10 0 0 0 0 0 0 0 0 0
45498 - 0 0 0 0 0 0 0 0 0 0 0 0
45499 - 0 0 0 0 0 0 0 0 0 0 0 0
45500 - 0 0 0 0 0 0 0 0 0 0 0 0
45501 - 0 0 0 0 0 0 0 0 0 0 0 0
45502 - 0 0 0 0 0 0 0 0 0 0 0 0
45503 - 0 0 0 0 0 0 0 0 0 0 0 0
45504 - 0 0 0 0 0 0 0 0 0 0 0 0
45505 - 0 0 0 0 0 0 0 0 0 0 0 0
45506 - 0 0 0 0 0 0 0 0 0 0 0 0
45507 - 0 0 0 0 0 0 0 0 0 6 6 6
45508 - 6 6 6 10 10 10 14 14 14 18 18 18
45509 - 18 18 18 14 14 14 10 10 10 6 6 6
45510 - 0 0 0 0 0 0 0 0 0 0 0 0
45511 - 0 0 0 0 0 0 0 0 0 0 0 0
45512 - 0 0 0 0 0 0 0 0 0 0 0 0
45513 - 0 0 0 0 0 0 0 0 0 0 0 0
45514 - 0 0 0 0 0 0 0 0 0 6 6 6
45515 - 14 14 14 18 18 18 22 22 22 22 22 22
45516 - 18 18 18 14 14 14 10 10 10 6 6 6
45517 - 0 0 0 0 0 0 0 0 0 0 0 0
45518 - 0 0 0 0 0 0 0 0 0 0 0 0
45519 - 0 0 0 0 0 0 0 0 0 0 0 0
45520 - 0 0 0 0 0 0 0 0 0 0 0 0
45521 - 0 0 0 0 0 0 0 0 0 0 0 0
45522 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45523 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45524 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45525 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45526 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45527 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45528 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45529 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45530 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45531 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45532 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45533 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45534 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45535 +4 4 4 4 4 4
45536 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45537 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45538 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45539 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45540 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45541 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45542 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45543 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45544 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45545 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45546 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45547 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45548 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45549 +4 4 4 4 4 4
45550 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45551 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45552 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45553 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45554 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45555 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45556 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45557 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45558 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45559 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45560 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45561 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45562 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45563 +4 4 4 4 4 4
45564 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45565 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45566 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45567 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45568 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45569 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45570 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45571 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45572 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45573 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45574 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45575 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45576 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45577 +4 4 4 4 4 4
45578 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45579 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45580 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45581 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45582 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45583 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45584 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45585 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45586 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45587 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45588 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45589 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45590 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45591 +4 4 4 4 4 4
45592 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45593 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45594 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45595 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45596 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45597 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45598 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45599 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45600 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45601 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45602 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45603 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45604 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45605 +4 4 4 4 4 4
45606 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45607 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45608 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45609 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45610 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
45611 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
45612 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45613 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45614 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45615 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
45616 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45617 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
45618 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45619 +4 4 4 4 4 4
45620 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45621 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45622 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45623 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45624 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
45625 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
45626 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45627 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45628 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45629 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
45630 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
45631 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
45632 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45633 +4 4 4 4 4 4
45634 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45635 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45636 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45637 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45638 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
45639 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
45640 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45641 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45642 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45643 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
45644 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
45645 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
45646 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
45647 +4 4 4 4 4 4
45648 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45649 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45650 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45651 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
45652 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
45653 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
45654 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
45655 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45656 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
45657 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
45658 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
45659 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
45660 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
45661 +4 4 4 4 4 4
45662 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45663 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45664 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45665 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
45666 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
45667 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
45668 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
45669 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45670 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
45671 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
45672 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
45673 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
45674 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
45675 +4 4 4 4 4 4
45676 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45677 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45678 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45679 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
45680 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
45681 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
45682 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
45683 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
45684 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
45685 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
45686 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
45687 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
45688 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
45689 +4 4 4 4 4 4
45690 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45691 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45692 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
45693 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
45694 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
45695 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
45696 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
45697 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
45698 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
45699 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
45700 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
45701 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
45702 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
45703 +4 4 4 4 4 4
45704 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45705 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45706 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
45707 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
45708 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
45709 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
45710 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
45711 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
45712 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
45713 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
45714 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
45715 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
45716 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
45717 +4 4 4 4 4 4
45718 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45719 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45720 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
45721 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
45722 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
45723 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
45724 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
45725 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
45726 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
45727 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
45728 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
45729 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
45730 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45731 +4 4 4 4 4 4
45732 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45733 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45734 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
45735 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
45736 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
45737 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
45738 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
45739 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
45740 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
45741 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
45742 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
45743 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
45744 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
45745 +4 4 4 4 4 4
45746 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45747 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
45748 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
45749 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
45750 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
45751 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
45752 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
45753 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
45754 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
45755 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
45756 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
45757 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
45758 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
45759 +4 4 4 4 4 4
45760 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45761 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
45762 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
45763 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
45764 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
45765 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
45766 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
45767 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
45768 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
45769 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
45770 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
45771 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
45772 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
45773 +0 0 0 4 4 4
45774 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
45775 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
45776 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
45777 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
45778 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
45779 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
45780 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
45781 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
45782 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
45783 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
45784 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
45785 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
45786 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
45787 +2 0 0 0 0 0
45788 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
45789 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
45790 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
45791 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
45792 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
45793 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
45794 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
45795 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
45796 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
45797 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
45798 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
45799 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
45800 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
45801 +37 38 37 0 0 0
45802 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
45803 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
45804 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
45805 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
45806 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
45807 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
45808 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
45809 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
45810 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
45811 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
45812 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
45813 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
45814 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
45815 +85 115 134 4 0 0
45816 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
45817 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
45818 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
45819 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
45820 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
45821 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
45822 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
45823 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
45824 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
45825 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
45826 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
45827 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
45828 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
45829 +60 73 81 4 0 0
45830 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
45831 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
45832 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
45833 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
45834 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
45835 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
45836 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
45837 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
45838 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
45839 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
45840 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
45841 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
45842 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
45843 +16 19 21 4 0 0
45844 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
45845 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
45846 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
45847 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
45848 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
45849 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
45850 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
45851 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
45852 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
45853 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
45854 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
45855 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
45856 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
45857 +4 0 0 4 3 3
45858 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
45859 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
45860 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
45861 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
45862 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
45863 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
45864 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
45865 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
45866 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
45867 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
45868 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
45869 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
45870 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
45871 +3 2 2 4 4 4
45872 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
45873 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
45874 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
45875 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
45876 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
45877 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
45878 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
45879 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
45880 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
45881 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
45882 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
45883 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
45884 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
45885 +4 4 4 4 4 4
45886 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
45887 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
45888 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
45889 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
45890 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
45891 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
45892 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
45893 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
45894 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
45895 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
45896 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
45897 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
45898 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
45899 +4 4 4 4 4 4
45900 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
45901 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
45902 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
45903 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
45904 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
45905 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
45906 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
45907 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
45908 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
45909 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
45910 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
45911 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
45912 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
45913 +5 5 5 5 5 5
45914 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
45915 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
45916 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
45917 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
45918 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
45919 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45920 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
45921 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
45922 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
45923 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
45924 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
45925 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
45926 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45927 +5 5 5 4 4 4
45928 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
45929 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
45930 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
45931 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
45932 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45933 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
45934 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
45935 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
45936 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
45937 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
45938 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
45939 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45940 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45941 +4 4 4 4 4 4
45942 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
45943 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
45944 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
45945 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
45946 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
45947 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45948 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45949 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
45950 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
45951 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
45952 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
45953 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
45954 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45955 +4 4 4 4 4 4
45956 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
45957 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
45958 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
45959 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
45960 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45961 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
45962 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
45963 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
45964 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
45965 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
45966 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
45967 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45968 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45969 +4 4 4 4 4 4
45970 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
45971 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
45972 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
45973 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
45974 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45975 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45976 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45977 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
45978 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
45979 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
45980 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
45981 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45982 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45983 +4 4 4 4 4 4
45984 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
45985 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
45986 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
45987 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
45988 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45989 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
45990 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
45991 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
45992 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
45993 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
45994 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45995 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45996 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45997 +4 4 4 4 4 4
45998 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
45999 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
46000 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
46001 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
46002 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
46003 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
46004 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
46005 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
46006 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
46007 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
46008 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
46009 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46010 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46011 +4 4 4 4 4 4
46012 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
46013 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
46014 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
46015 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
46016 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
46017 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
46018 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
46019 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
46020 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
46021 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
46022 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
46023 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46024 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46025 +4 4 4 4 4 4
46026 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
46027 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
46028 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
46029 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
46030 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
46031 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
46032 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
46033 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
46034 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
46035 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
46036 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46037 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46038 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46039 +4 4 4 4 4 4
46040 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
46041 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
46042 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
46043 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
46044 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46045 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
46046 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
46047 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
46048 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
46049 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
46050 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46051 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46052 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46053 +4 4 4 4 4 4
46054 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
46055 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
46056 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
46057 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
46058 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46059 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
46060 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
46061 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
46062 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
46063 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
46064 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46065 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46066 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46067 +4 4 4 4 4 4
46068 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
46069 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
46070 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
46071 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
46072 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46073 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
46074 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
46075 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
46076 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
46077 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46078 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46079 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46080 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46081 +4 4 4 4 4 4
46082 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
46083 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
46084 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
46085 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
46086 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
46087 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
46088 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
46089 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
46090 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
46091 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46092 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46093 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46094 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46095 +4 4 4 4 4 4
46096 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
46097 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
46098 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
46099 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
46100 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46101 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
46102 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
46103 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
46104 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
46105 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46106 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46107 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46108 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46109 +4 4 4 4 4 4
46110 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
46111 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
46112 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
46113 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
46114 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
46115 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
46116 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
46117 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
46118 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
46119 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46120 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46121 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46122 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46123 +4 4 4 4 4 4
46124 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
46125 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
46126 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
46127 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
46128 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
46129 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
46130 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
46131 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
46132 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
46133 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46134 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46135 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46136 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46137 +4 4 4 4 4 4
46138 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
46139 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
46140 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
46141 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
46142 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
46143 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
46144 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
46145 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
46146 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
46147 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46148 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46149 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46150 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46151 +4 4 4 4 4 4
46152 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
46153 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
46154 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
46155 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
46156 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
46157 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
46158 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
46159 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
46160 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
46161 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46162 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46163 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46164 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46165 +4 4 4 4 4 4
46166 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
46167 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
46168 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
46169 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
46170 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
46171 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
46172 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
46173 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
46174 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
46175 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46176 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46177 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46178 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46179 +4 4 4 4 4 4
46180 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
46181 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
46182 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
46183 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
46184 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
46185 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
46186 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
46187 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
46188 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
46189 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46190 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46191 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46192 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46193 +4 4 4 4 4 4
46194 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
46195 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
46196 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
46197 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
46198 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
46199 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
46200 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46201 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
46202 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
46203 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46204 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46205 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46206 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46207 +4 4 4 4 4 4
46208 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
46209 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
46210 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
46211 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
46212 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
46213 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
46214 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46215 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
46216 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
46217 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46218 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46219 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46220 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46221 +4 4 4 4 4 4
46222 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
46223 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
46224 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
46225 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
46226 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
46227 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
46228 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
46229 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
46230 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
46231 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46232 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46233 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46234 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46235 +4 4 4 4 4 4
46236 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
46237 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
46238 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
46239 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
46240 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
46241 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
46242 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
46243 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
46244 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
46245 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46246 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46247 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46248 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46249 +4 4 4 4 4 4
46250 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
46251 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
46252 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
46253 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
46254 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
46255 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
46256 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
46257 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
46258 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
46259 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46260 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46261 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46262 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46263 +4 4 4 4 4 4
46264 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
46265 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
46266 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
46267 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
46268 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
46269 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
46270 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
46271 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
46272 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
46273 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46274 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46275 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46276 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46277 +4 4 4 4 4 4
46278 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
46279 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
46280 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
46281 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
46282 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
46283 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
46284 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
46285 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
46286 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
46287 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
46288 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46289 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46290 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46291 +4 4 4 4 4 4
46292 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
46293 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
46294 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
46295 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
46296 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
46297 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
46298 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
46299 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
46300 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
46301 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
46302 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46303 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46304 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46305 +4 4 4 4 4 4
46306 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
46307 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
46308 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
46309 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
46310 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
46311 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
46312 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46313 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
46314 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
46315 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
46316 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
46317 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46318 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46319 +4 4 4 4 4 4
46320 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
46321 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
46322 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
46323 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
46324 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
46325 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
46326 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
46327 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
46328 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
46329 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
46330 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46331 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46332 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46333 +4 4 4 4 4 4
46334 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
46335 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
46336 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
46337 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
46338 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
46339 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
46340 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
46341 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
46342 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
46343 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
46344 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46345 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46346 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46347 +4 4 4 4 4 4
46348 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
46349 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
46350 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
46351 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
46352 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
46353 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
46354 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
46355 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
46356 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
46357 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
46358 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46359 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46360 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46361 +4 4 4 4 4 4
46362 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
46363 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
46364 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
46365 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
46366 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
46367 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
46368 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
46369 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
46370 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
46371 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
46372 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46373 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46374 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46375 +4 4 4 4 4 4
46376 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
46377 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
46378 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
46379 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
46380 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
46381 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
46382 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
46383 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
46384 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
46385 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
46386 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46387 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46388 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46389 +4 4 4 4 4 4
46390 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
46391 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
46392 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
46393 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
46394 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
46395 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
46396 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
46397 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
46398 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
46399 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46400 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46401 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46402 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46403 +4 4 4 4 4 4
46404 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
46405 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
46406 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
46407 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
46408 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
46409 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
46410 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
46411 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
46412 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
46413 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46414 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46415 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46416 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46417 +4 4 4 4 4 4
46418 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
46419 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
46420 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
46421 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
46422 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
46423 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
46424 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
46425 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
46426 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46427 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46428 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46429 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46430 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46431 +4 4 4 4 4 4
46432 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
46433 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
46434 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
46435 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
46436 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
46437 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
46438 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
46439 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
46440 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46441 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46442 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46443 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46444 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46445 +4 4 4 4 4 4
46446 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
46447 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
46448 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
46449 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
46450 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
46451 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
46452 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
46453 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
46454 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46455 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46456 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46457 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46458 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46459 +4 4 4 4 4 4
46460 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46461 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
46462 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
46463 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
46464 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
46465 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
46466 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
46467 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
46468 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46469 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46470 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46471 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46472 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46473 +4 4 4 4 4 4
46474 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46475 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
46476 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
46477 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
46478 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
46479 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
46480 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
46481 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
46482 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46483 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46484 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46485 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46486 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46487 +4 4 4 4 4 4
46488 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46489 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
46490 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
46491 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
46492 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
46493 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
46494 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
46495 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
46496 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46497 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46498 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46499 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46500 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46501 +4 4 4 4 4 4
46502 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46503 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46504 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
46505 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
46506 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
46507 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
46508 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
46509 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
46510 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46511 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46512 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46513 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46514 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46515 +4 4 4 4 4 4
46516 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46517 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46518 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46519 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
46520 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
46521 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
46522 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
46523 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46524 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46525 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46526 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46527 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46528 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46529 +4 4 4 4 4 4
46530 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46531 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46532 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46533 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
46534 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
46535 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
46536 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
46537 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46538 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46539 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46540 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46541 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46542 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46543 +4 4 4 4 4 4
46544 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46545 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46546 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46547 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
46548 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
46549 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
46550 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
46551 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46552 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46553 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46554 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46555 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46556 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46557 +4 4 4 4 4 4
46558 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46559 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46560 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46561 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
46562 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
46563 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
46564 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
46565 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46566 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46567 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46568 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46569 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46570 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46571 +4 4 4 4 4 4
46572 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46573 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46574 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46575 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46576 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
46577 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
46578 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46579 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46580 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46581 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46582 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46583 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46584 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46585 +4 4 4 4 4 4
46586 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46587 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46588 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46589 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46590 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
46591 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
46592 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
46593 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46594 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46595 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46596 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46597 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46598 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46599 +4 4 4 4 4 4
46600 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46601 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46602 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46603 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46604 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
46605 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
46606 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46607 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46608 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46609 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46610 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46611 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46612 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46613 +4 4 4 4 4 4
46614 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46615 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46616 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46617 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46618 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
46619 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
46620 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46621 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46622 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46623 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46624 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46625 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46626 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46627 +4 4 4 4 4 4
46628 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46629 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46630 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46631 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46632 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
46633 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
46634 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46635 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46636 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46637 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46638 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46639 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46640 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46641 +4 4 4 4 4 4
46642 diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
46643 index 443e3c8..c443d6a 100644
46644 --- a/drivers/video/nvidia/nv_backlight.c
46645 +++ b/drivers/video/nvidia/nv_backlight.c
46646 @@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(struct backlight_device *bd)
46647 return bd->props.brightness;
46648 }
46649
46650 -static struct backlight_ops nvidia_bl_ops = {
46651 +static const struct backlight_ops nvidia_bl_ops = {
46652 .get_brightness = nvidia_bl_get_brightness,
46653 .update_status = nvidia_bl_update_status,
46654 };
46655 diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
46656 index d94c57f..912984c 100644
46657 --- a/drivers/video/riva/fbdev.c
46658 +++ b/drivers/video/riva/fbdev.c
46659 @@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct backlight_device *bd)
46660 return bd->props.brightness;
46661 }
46662
46663 -static struct backlight_ops riva_bl_ops = {
46664 +static const struct backlight_ops riva_bl_ops = {
46665 .get_brightness = riva_bl_get_brightness,
46666 .update_status = riva_bl_update_status,
46667 };
46668 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
46669 index 54fbb29..2c108fc 100644
46670 --- a/drivers/video/uvesafb.c
46671 +++ b/drivers/video/uvesafb.c
46672 @@ -18,6 +18,7 @@
46673 #include <linux/fb.h>
46674 #include <linux/io.h>
46675 #include <linux/mutex.h>
46676 +#include <linux/moduleloader.h>
46677 #include <video/edid.h>
46678 #include <video/uvesafb.h>
46679 #ifdef CONFIG_X86
46680 @@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
46681 NULL,
46682 };
46683
46684 - return call_usermodehelper(v86d_path, argv, envp, 1);
46685 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
46686 }
46687
46688 /*
46689 @@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
46690 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
46691 par->pmi_setpal = par->ypan = 0;
46692 } else {
46693 +
46694 +#ifdef CONFIG_PAX_KERNEXEC
46695 +#ifdef CONFIG_MODULES
46696 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
46697 +#endif
46698 + if (!par->pmi_code) {
46699 + par->pmi_setpal = par->ypan = 0;
46700 + return 0;
46701 + }
46702 +#endif
46703 +
46704 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
46705 + task->t.regs.edi);
46706 +
46707 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46708 + pax_open_kernel();
46709 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
46710 + pax_close_kernel();
46711 +
46712 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
46713 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
46714 +#else
46715 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
46716 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
46717 +#endif
46718 +
46719 printk(KERN_INFO "uvesafb: protected mode interface info at "
46720 "%04x:%04x\n",
46721 (u16)task->t.regs.es, (u16)task->t.regs.edi);
46722 @@ -1799,6 +1822,11 @@ out:
46723 if (par->vbe_modes)
46724 kfree(par->vbe_modes);
46725
46726 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46727 + if (par->pmi_code)
46728 + module_free_exec(NULL, par->pmi_code);
46729 +#endif
46730 +
46731 framebuffer_release(info);
46732 return err;
46733 }
46734 @@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platform_device *dev)
46735 kfree(par->vbe_state_orig);
46736 if (par->vbe_state_saved)
46737 kfree(par->vbe_state_saved);
46738 +
46739 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46740 + if (par->pmi_code)
46741 + module_free_exec(NULL, par->pmi_code);
46742 +#endif
46743 +
46744 }
46745
46746 framebuffer_release(info);
46747 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
46748 index bd37ee1..cb827e8 100644
46749 --- a/drivers/video/vesafb.c
46750 +++ b/drivers/video/vesafb.c
46751 @@ -9,6 +9,7 @@
46752 */
46753
46754 #include <linux/module.h>
46755 +#include <linux/moduleloader.h>
46756 #include <linux/kernel.h>
46757 #include <linux/errno.h>
46758 #include <linux/string.h>
46759 @@ -53,8 +54,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
46760 static int vram_total __initdata; /* Set total amount of memory */
46761 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
46762 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
46763 -static void (*pmi_start)(void) __read_mostly;
46764 -static void (*pmi_pal) (void) __read_mostly;
46765 +static void (*pmi_start)(void) __read_only;
46766 +static void (*pmi_pal) (void) __read_only;
46767 static int depth __read_mostly;
46768 static int vga_compat __read_mostly;
46769 /* --------------------------------------------------------------------- */
46770 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
46771 unsigned int size_vmode;
46772 unsigned int size_remap;
46773 unsigned int size_total;
46774 + void *pmi_code = NULL;
46775
46776 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
46777 return -ENODEV;
46778 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
46779 size_remap = size_total;
46780 vesafb_fix.smem_len = size_remap;
46781
46782 -#ifndef __i386__
46783 - screen_info.vesapm_seg = 0;
46784 -#endif
46785 -
46786 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
46787 printk(KERN_WARNING
46788 "vesafb: cannot reserve video memory at 0x%lx\n",
46789 @@ -315,9 +313,21 @@ static int __init vesafb_probe(struct platform_device *dev)
46790 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
46791 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
46792
46793 +#ifdef __i386__
46794 +
46795 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46796 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
46797 + if (!pmi_code)
46798 +#elif !defined(CONFIG_PAX_KERNEXEC)
46799 + if (0)
46800 +#endif
46801 +
46802 +#endif
46803 + screen_info.vesapm_seg = 0;
46804 +
46805 if (screen_info.vesapm_seg) {
46806 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
46807 - screen_info.vesapm_seg,screen_info.vesapm_off);
46808 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
46809 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
46810 }
46811
46812 if (screen_info.vesapm_seg < 0xc000)
46813 @@ -325,9 +335,25 @@ static int __init vesafb_probe(struct platform_device *dev)
46814
46815 if (ypan || pmi_setpal) {
46816 unsigned short *pmi_base;
46817 +
46818 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
46819 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
46820 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
46821 +
46822 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46823 + pax_open_kernel();
46824 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
46825 +#else
46826 + pmi_code = pmi_base;
46827 +#endif
46828 +
46829 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
46830 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
46831 +
46832 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46833 + pmi_start = ktva_ktla(pmi_start);
46834 + pmi_pal = ktva_ktla(pmi_pal);
46835 + pax_close_kernel();
46836 +#endif
46837 +
46838 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
46839 if (pmi_base[3]) {
46840 printk(KERN_INFO "vesafb: pmi: ports = ");
46841 @@ -469,6 +495,11 @@ static int __init vesafb_probe(struct platform_device *dev)
46842 info->node, info->fix.id);
46843 return 0;
46844 err:
46845 +
46846 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46847 + module_free_exec(NULL, pmi_code);
46848 +#endif
46849 +
46850 if (info->screen_base)
46851 iounmap(info->screen_base);
46852 framebuffer_release(info);
46853 diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
46854 index 88a60e0..6783cc2 100644
46855 --- a/drivers/xen/sys-hypervisor.c
46856 +++ b/drivers/xen/sys-hypervisor.c
46857 @@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct kobject *kobj,
46858 return 0;
46859 }
46860
46861 -static struct sysfs_ops hyp_sysfs_ops = {
46862 +static const struct sysfs_ops hyp_sysfs_ops = {
46863 .show = hyp_sysfs_show,
46864 .store = hyp_sysfs_store,
46865 };
46866 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
46867 index 18f74ec..3227009 100644
46868 --- a/fs/9p/vfs_inode.c
46869 +++ b/fs/9p/vfs_inode.c
46870 @@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
46871 static void
46872 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46873 {
46874 - char *s = nd_get_link(nd);
46875 + const char *s = nd_get_link(nd);
46876
46877 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
46878 IS_ERR(s) ? "<error>" : s);
46879 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
46880 index bb4cc5b..df5eaa0 100644
46881 --- a/fs/Kconfig.binfmt
46882 +++ b/fs/Kconfig.binfmt
46883 @@ -86,7 +86,7 @@ config HAVE_AOUT
46884
46885 config BINFMT_AOUT
46886 tristate "Kernel support for a.out and ECOFF binaries"
46887 - depends on HAVE_AOUT
46888 + depends on HAVE_AOUT && BROKEN
46889 ---help---
46890 A.out (Assembler.OUTput) is a set of formats for libraries and
46891 executables used in the earliest versions of UNIX. Linux used
46892 diff --git a/fs/aio.c b/fs/aio.c
46893 index 22a19ad..d484e5b 100644
46894 --- a/fs/aio.c
46895 +++ b/fs/aio.c
46896 @@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx *ctx)
46897 size += sizeof(struct io_event) * nr_events;
46898 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
46899
46900 - if (nr_pages < 0)
46901 + if (nr_pages <= 0)
46902 return -EINVAL;
46903
46904 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
46905 @@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ctx,
46906 struct aio_timeout to;
46907 int retry = 0;
46908
46909 + pax_track_stack();
46910 +
46911 /* needed to zero any padding within an entry (there shouldn't be
46912 * any, but C is fun!
46913 */
46914 @@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *iocb)
46915 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
46916 {
46917 ssize_t ret;
46918 + struct iovec iovstack;
46919
46920 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
46921 kiocb->ki_nbytes, 1,
46922 - &kiocb->ki_inline_vec, &kiocb->ki_iovec);
46923 + &iovstack, &kiocb->ki_iovec);
46924 if (ret < 0)
46925 goto out;
46926
46927 + if (kiocb->ki_iovec == &iovstack) {
46928 + kiocb->ki_inline_vec = iovstack;
46929 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
46930 + }
46931 kiocb->ki_nr_segs = kiocb->ki_nbytes;
46932 kiocb->ki_cur_seg = 0;
46933 /* ki_nbytes/left now reflect bytes instead of segs */
46934 diff --git a/fs/attr.c b/fs/attr.c
46935 index 96d394b..33cf5b4 100644
46936 --- a/fs/attr.c
46937 +++ b/fs/attr.c
46938 @@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
46939 unsigned long limit;
46940
46941 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
46942 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
46943 if (limit != RLIM_INFINITY && offset > limit)
46944 goto out_sig;
46945 if (offset > inode->i_sb->s_maxbytes)
46946 diff --git a/fs/autofs/root.c b/fs/autofs/root.c
46947 index 4a1401c..05eb5ca 100644
46948 --- a/fs/autofs/root.c
46949 +++ b/fs/autofs/root.c
46950 @@ -299,7 +299,8 @@ static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const c
46951 set_bit(n,sbi->symlink_bitmap);
46952 sl = &sbi->symlink[n];
46953 sl->len = strlen(symname);
46954 - sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
46955 + slsize = sl->len+1;
46956 + sl->data = kmalloc(slsize, GFP_KERNEL);
46957 if (!sl->data) {
46958 clear_bit(n,sbi->symlink_bitmap);
46959 unlock_kernel();
46960 diff --git a/fs/autofs4/symlink.c b/fs/autofs4/symlink.c
46961 index b4ea829..e63ef18 100644
46962 --- a/fs/autofs4/symlink.c
46963 +++ b/fs/autofs4/symlink.c
46964 @@ -15,7 +15,7 @@
46965 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
46966 {
46967 struct autofs_info *ino = autofs4_dentry_ino(dentry);
46968 - nd_set_link(nd, (char *)ino->u.symlink);
46969 + nd_set_link(nd, ino->u.symlink);
46970 return NULL;
46971 }
46972
46973 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
46974 index 136a0d6..a287331 100644
46975 --- a/fs/autofs4/waitq.c
46976 +++ b/fs/autofs4/waitq.c
46977 @@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
46978 {
46979 unsigned long sigpipe, flags;
46980 mm_segment_t fs;
46981 - const char *data = (const char *)addr;
46982 + const char __user *data = (const char __force_user *)addr;
46983 ssize_t wr = 0;
46984
46985 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
46986 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
46987 index 9158c07..3f06659 100644
46988 --- a/fs/befs/linuxvfs.c
46989 +++ b/fs/befs/linuxvfs.c
46990 @@ -498,7 +498,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46991 {
46992 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
46993 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
46994 - char *link = nd_get_link(nd);
46995 + const char *link = nd_get_link(nd);
46996 if (!IS_ERR(link))
46997 kfree(link);
46998 }
46999 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
47000 index 0133b5a..3710d09 100644
47001 --- a/fs/binfmt_aout.c
47002 +++ b/fs/binfmt_aout.c
47003 @@ -16,6 +16,7 @@
47004 #include <linux/string.h>
47005 #include <linux/fs.h>
47006 #include <linux/file.h>
47007 +#include <linux/security.h>
47008 #include <linux/stat.h>
47009 #include <linux/fcntl.h>
47010 #include <linux/ptrace.h>
47011 @@ -102,6 +103,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
47012 #endif
47013 # define START_STACK(u) (u.start_stack)
47014
47015 + memset(&dump, 0, sizeof(dump));
47016 +
47017 fs = get_fs();
47018 set_fs(KERNEL_DS);
47019 has_dumped = 1;
47020 @@ -113,10 +116,12 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
47021
47022 /* If the size of the dump file exceeds the rlimit, then see what would happen
47023 if we wrote the stack, but not the data area. */
47024 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
47025 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
47026 dump.u_dsize = 0;
47027
47028 /* Make sure we have enough room to write the stack and data areas. */
47029 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
47030 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
47031 dump.u_ssize = 0;
47032
47033 @@ -146,9 +151,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
47034 dump_size = dump.u_ssize << PAGE_SHIFT;
47035 DUMP_WRITE(dump_start,dump_size);
47036 }
47037 -/* Finally dump the task struct. Not be used by gdb, but could be useful */
47038 - set_fs(KERNEL_DS);
47039 - DUMP_WRITE(current,sizeof(*current));
47040 +/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
47041 end_coredump:
47042 set_fs(fs);
47043 return has_dumped;
47044 @@ -249,6 +252,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
47045 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
47046 if (rlim >= RLIM_INFINITY)
47047 rlim = ~0;
47048 +
47049 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
47050 if (ex.a_data + ex.a_bss > rlim)
47051 return -ENOMEM;
47052
47053 @@ -274,9 +279,37 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
47054 current->mm->free_area_cache = current->mm->mmap_base;
47055 current->mm->cached_hole_size = 0;
47056
47057 + retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
47058 + if (retval < 0) {
47059 + /* Someone check-me: is this error path enough? */
47060 + send_sig(SIGKILL, current, 0);
47061 + return retval;
47062 + }
47063 +
47064 install_exec_creds(bprm);
47065 current->flags &= ~PF_FORKNOEXEC;
47066
47067 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47068 + current->mm->pax_flags = 0UL;
47069 +#endif
47070 +
47071 +#ifdef CONFIG_PAX_PAGEEXEC
47072 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
47073 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
47074 +
47075 +#ifdef CONFIG_PAX_EMUTRAMP
47076 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
47077 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
47078 +#endif
47079 +
47080 +#ifdef CONFIG_PAX_MPROTECT
47081 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
47082 + current->mm->pax_flags |= MF_PAX_MPROTECT;
47083 +#endif
47084 +
47085 + }
47086 +#endif
47087 +
47088 if (N_MAGIC(ex) == OMAGIC) {
47089 unsigned long text_addr, map_size;
47090 loff_t pos;
47091 @@ -349,7 +382,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
47092
47093 down_write(&current->mm->mmap_sem);
47094 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
47095 - PROT_READ | PROT_WRITE | PROT_EXEC,
47096 + PROT_READ | PROT_WRITE,
47097 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
47098 fd_offset + ex.a_text);
47099 up_write(&current->mm->mmap_sem);
47100 @@ -367,13 +400,6 @@ beyond_if:
47101 return retval;
47102 }
47103
47104 - retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
47105 - if (retval < 0) {
47106 - /* Someone check-me: is this error path enough? */
47107 - send_sig(SIGKILL, current, 0);
47108 - return retval;
47109 - }
47110 -
47111 current->mm->start_stack =
47112 (unsigned long) create_aout_tables((char __user *) bprm->p, bprm);
47113 #ifdef __alpha__
47114 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
47115 index 1ed37ba..66794b9 100644
47116 --- a/fs/binfmt_elf.c
47117 +++ b/fs/binfmt_elf.c
47118 @@ -31,6 +31,7 @@
47119 #include <linux/random.h>
47120 #include <linux/elf.h>
47121 #include <linux/utsname.h>
47122 +#include <linux/xattr.h>
47123 #include <asm/uaccess.h>
47124 #include <asm/param.h>
47125 #include <asm/page.h>
47126 @@ -50,6 +51,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47127 #define elf_core_dump NULL
47128 #endif
47129
47130 +#ifdef CONFIG_PAX_MPROTECT
47131 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
47132 +#endif
47133 +
47134 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
47135 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
47136 #else
47137 @@ -69,6 +74,11 @@ static struct linux_binfmt elf_format = {
47138 .load_binary = load_elf_binary,
47139 .load_shlib = load_elf_library,
47140 .core_dump = elf_core_dump,
47141 +
47142 +#ifdef CONFIG_PAX_MPROTECT
47143 + .handle_mprotect= elf_handle_mprotect,
47144 +#endif
47145 +
47146 .min_coredump = ELF_EXEC_PAGESIZE,
47147 .hasvdso = 1
47148 };
47149 @@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
47150
47151 static int set_brk(unsigned long start, unsigned long end)
47152 {
47153 + unsigned long e = end;
47154 +
47155 start = ELF_PAGEALIGN(start);
47156 end = ELF_PAGEALIGN(end);
47157 if (end > start) {
47158 @@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
47159 if (BAD_ADDR(addr))
47160 return addr;
47161 }
47162 - current->mm->start_brk = current->mm->brk = end;
47163 + current->mm->start_brk = current->mm->brk = e;
47164 return 0;
47165 }
47166
47167 @@ -148,12 +160,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
47168 elf_addr_t __user *u_rand_bytes;
47169 const char *k_platform = ELF_PLATFORM;
47170 const char *k_base_platform = ELF_BASE_PLATFORM;
47171 - unsigned char k_rand_bytes[16];
47172 + u32 k_rand_bytes[4];
47173 int items;
47174 elf_addr_t *elf_info;
47175 int ei_index = 0;
47176 const struct cred *cred = current_cred();
47177 struct vm_area_struct *vma;
47178 + unsigned long saved_auxv[AT_VECTOR_SIZE];
47179 +
47180 + pax_track_stack();
47181
47182 /*
47183 * In some cases (e.g. Hyper-Threading), we want to avoid L1
47184 @@ -195,8 +210,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
47185 * Generate 16 random bytes for userspace PRNG seeding.
47186 */
47187 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
47188 - u_rand_bytes = (elf_addr_t __user *)
47189 - STACK_ALLOC(p, sizeof(k_rand_bytes));
47190 + srandom32(k_rand_bytes[0] ^ random32());
47191 + srandom32(k_rand_bytes[1] ^ random32());
47192 + srandom32(k_rand_bytes[2] ^ random32());
47193 + srandom32(k_rand_bytes[3] ^ random32());
47194 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
47195 + u_rand_bytes = (elf_addr_t __user *) p;
47196 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
47197 return -EFAULT;
47198
47199 @@ -308,9 +327,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
47200 return -EFAULT;
47201 current->mm->env_end = p;
47202
47203 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
47204 +
47205 /* Put the elf_info on the stack in the right place. */
47206 sp = (elf_addr_t __user *)envp + 1;
47207 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
47208 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
47209 return -EFAULT;
47210 return 0;
47211 }
47212 @@ -385,10 +406,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
47213 {
47214 struct elf_phdr *elf_phdata;
47215 struct elf_phdr *eppnt;
47216 - unsigned long load_addr = 0;
47217 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
47218 int load_addr_set = 0;
47219 unsigned long last_bss = 0, elf_bss = 0;
47220 - unsigned long error = ~0UL;
47221 + unsigned long error = -EINVAL;
47222 unsigned long total_size;
47223 int retval, i, size;
47224
47225 @@ -434,6 +455,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
47226 goto out_close;
47227 }
47228
47229 +#ifdef CONFIG_PAX_SEGMEXEC
47230 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
47231 + pax_task_size = SEGMEXEC_TASK_SIZE;
47232 +#endif
47233 +
47234 eppnt = elf_phdata;
47235 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
47236 if (eppnt->p_type == PT_LOAD) {
47237 @@ -477,8 +503,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
47238 k = load_addr + eppnt->p_vaddr;
47239 if (BAD_ADDR(k) ||
47240 eppnt->p_filesz > eppnt->p_memsz ||
47241 - eppnt->p_memsz > TASK_SIZE ||
47242 - TASK_SIZE - eppnt->p_memsz < k) {
47243 + eppnt->p_memsz > pax_task_size ||
47244 + pax_task_size - eppnt->p_memsz < k) {
47245 error = -ENOMEM;
47246 goto out_close;
47247 }
47248 @@ -532,6 +558,351 @@ out:
47249 return error;
47250 }
47251
47252 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
47253 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
47254 +{
47255 + unsigned long pax_flags = 0UL;
47256 +
47257 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
47258 +
47259 +#ifdef CONFIG_PAX_PAGEEXEC
47260 + if (elf_phdata->p_flags & PF_PAGEEXEC)
47261 + pax_flags |= MF_PAX_PAGEEXEC;
47262 +#endif
47263 +
47264 +#ifdef CONFIG_PAX_SEGMEXEC
47265 + if (elf_phdata->p_flags & PF_SEGMEXEC)
47266 + pax_flags |= MF_PAX_SEGMEXEC;
47267 +#endif
47268 +
47269 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
47270 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47271 + if (nx_enabled)
47272 + pax_flags &= ~MF_PAX_SEGMEXEC;
47273 + else
47274 + pax_flags &= ~MF_PAX_PAGEEXEC;
47275 + }
47276 +#endif
47277 +
47278 +#ifdef CONFIG_PAX_EMUTRAMP
47279 + if (elf_phdata->p_flags & PF_EMUTRAMP)
47280 + pax_flags |= MF_PAX_EMUTRAMP;
47281 +#endif
47282 +
47283 +#ifdef CONFIG_PAX_MPROTECT
47284 + if (elf_phdata->p_flags & PF_MPROTECT)
47285 + pax_flags |= MF_PAX_MPROTECT;
47286 +#endif
47287 +
47288 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
47289 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
47290 + pax_flags |= MF_PAX_RANDMMAP;
47291 +#endif
47292 +
47293 +#endif
47294 +
47295 + return pax_flags;
47296 +}
47297 +
47298 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
47299 +{
47300 + unsigned long pax_flags = 0UL;
47301 +
47302 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
47303 +
47304 +#ifdef CONFIG_PAX_PAGEEXEC
47305 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
47306 + pax_flags |= MF_PAX_PAGEEXEC;
47307 +#endif
47308 +
47309 +#ifdef CONFIG_PAX_SEGMEXEC
47310 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
47311 + pax_flags |= MF_PAX_SEGMEXEC;
47312 +#endif
47313 +
47314 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
47315 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47316 + if (nx_enabled)
47317 + pax_flags &= ~MF_PAX_SEGMEXEC;
47318 + else
47319 + pax_flags &= ~MF_PAX_PAGEEXEC;
47320 + }
47321 +#endif
47322 +
47323 +#ifdef CONFIG_PAX_EMUTRAMP
47324 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
47325 + pax_flags |= MF_PAX_EMUTRAMP;
47326 +#endif
47327 +
47328 +#ifdef CONFIG_PAX_MPROTECT
47329 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
47330 + pax_flags |= MF_PAX_MPROTECT;
47331 +#endif
47332 +
47333 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
47334 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
47335 + pax_flags |= MF_PAX_RANDMMAP;
47336 +#endif
47337 +
47338 +#endif
47339 +
47340 + return pax_flags;
47341 +}
47342 +
47343 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
47344 +{
47345 + unsigned long pax_flags = 0UL;
47346 +
47347 +#ifdef CONFIG_PAX_EI_PAX
47348 +
47349 +#ifdef CONFIG_PAX_PAGEEXEC
47350 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
47351 + pax_flags |= MF_PAX_PAGEEXEC;
47352 +#endif
47353 +
47354 +#ifdef CONFIG_PAX_SEGMEXEC
47355 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
47356 + pax_flags |= MF_PAX_SEGMEXEC;
47357 +#endif
47358 +
47359 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
47360 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47361 + if (nx_enabled)
47362 + pax_flags &= ~MF_PAX_SEGMEXEC;
47363 + else
47364 + pax_flags &= ~MF_PAX_PAGEEXEC;
47365 + }
47366 +#endif
47367 +
47368 +#ifdef CONFIG_PAX_EMUTRAMP
47369 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
47370 + pax_flags |= MF_PAX_EMUTRAMP;
47371 +#endif
47372 +
47373 +#ifdef CONFIG_PAX_MPROTECT
47374 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
47375 + pax_flags |= MF_PAX_MPROTECT;
47376 +#endif
47377 +
47378 +#ifdef CONFIG_PAX_ASLR
47379 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
47380 + pax_flags |= MF_PAX_RANDMMAP;
47381 +#endif
47382 +
47383 +#else
47384 +
47385 +#ifdef CONFIG_PAX_PAGEEXEC
47386 + pax_flags |= MF_PAX_PAGEEXEC;
47387 +#endif
47388 +
47389 +#ifdef CONFIG_PAX_MPROTECT
47390 + pax_flags |= MF_PAX_MPROTECT;
47391 +#endif
47392 +
47393 +#ifdef CONFIG_PAX_RANDMMAP
47394 + pax_flags |= MF_PAX_RANDMMAP;
47395 +#endif
47396 +
47397 +#ifdef CONFIG_PAX_SEGMEXEC
47398 + if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
47399 + pax_flags &= ~MF_PAX_PAGEEXEC;
47400 + pax_flags |= MF_PAX_SEGMEXEC;
47401 + }
47402 +#endif
47403 +
47404 +#endif
47405 +
47406 + return pax_flags;
47407 +}
47408 +
47409 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
47410 +{
47411 +
47412 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
47413 + unsigned long i;
47414 +
47415 + for (i = 0UL; i < elf_ex->e_phnum; i++)
47416 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
47417 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
47418 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
47419 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
47420 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
47421 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
47422 + return ~0UL;
47423 +
47424 +#ifdef CONFIG_PAX_SOFTMODE
47425 + if (pax_softmode)
47426 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
47427 + else
47428 +#endif
47429 +
47430 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
47431 + break;
47432 + }
47433 +#endif
47434 +
47435 + return ~0UL;
47436 +}
47437 +
47438 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
47439 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
47440 +{
47441 + unsigned long pax_flags = 0UL;
47442 +
47443 +#ifdef CONFIG_PAX_PAGEEXEC
47444 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
47445 + pax_flags |= MF_PAX_PAGEEXEC;
47446 +#endif
47447 +
47448 +#ifdef CONFIG_PAX_SEGMEXEC
47449 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
47450 + pax_flags |= MF_PAX_SEGMEXEC;
47451 +#endif
47452 +
47453 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
47454 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47455 + if ((__supported_pte_mask & _PAGE_NX))
47456 + pax_flags &= ~MF_PAX_SEGMEXEC;
47457 + else
47458 + pax_flags &= ~MF_PAX_PAGEEXEC;
47459 + }
47460 +#endif
47461 +
47462 +#ifdef CONFIG_PAX_EMUTRAMP
47463 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
47464 + pax_flags |= MF_PAX_EMUTRAMP;
47465 +#endif
47466 +
47467 +#ifdef CONFIG_PAX_MPROTECT
47468 + if (pax_flags_softmode & MF_PAX_MPROTECT)
47469 + pax_flags |= MF_PAX_MPROTECT;
47470 +#endif
47471 +
47472 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
47473 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
47474 + pax_flags |= MF_PAX_RANDMMAP;
47475 +#endif
47476 +
47477 + return pax_flags;
47478 +}
47479 +
47480 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
47481 +{
47482 + unsigned long pax_flags = 0UL;
47483 +
47484 +#ifdef CONFIG_PAX_PAGEEXEC
47485 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
47486 + pax_flags |= MF_PAX_PAGEEXEC;
47487 +#endif
47488 +
47489 +#ifdef CONFIG_PAX_SEGMEXEC
47490 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
47491 + pax_flags |= MF_PAX_SEGMEXEC;
47492 +#endif
47493 +
47494 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
47495 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47496 + if ((__supported_pte_mask & _PAGE_NX))
47497 + pax_flags &= ~MF_PAX_SEGMEXEC;
47498 + else
47499 + pax_flags &= ~MF_PAX_PAGEEXEC;
47500 + }
47501 +#endif
47502 +
47503 +#ifdef CONFIG_PAX_EMUTRAMP
47504 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
47505 + pax_flags |= MF_PAX_EMUTRAMP;
47506 +#endif
47507 +
47508 +#ifdef CONFIG_PAX_MPROTECT
47509 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
47510 + pax_flags |= MF_PAX_MPROTECT;
47511 +#endif
47512 +
47513 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
47514 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
47515 + pax_flags |= MF_PAX_RANDMMAP;
47516 +#endif
47517 +
47518 + return pax_flags;
47519 +}
47520 +#endif
47521 +
47522 +static unsigned long pax_parse_xattr_pax(struct file * const file)
47523 +{
47524 +
47525 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
47526 + ssize_t xattr_size, i;
47527 + unsigned char xattr_value[5];
47528 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
47529 +
47530 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
47531 + if (xattr_size <= 0)
47532 + return ~0UL;
47533 +
47534 + for (i = 0; i < xattr_size; i++)
47535 + switch (xattr_value[i]) {
47536 + default:
47537 + return ~0UL;
47538 +
47539 +#define parse_flag(option1, option2, flag) \
47540 + case option1: \
47541 + pax_flags_hardmode |= MF_PAX_##flag; \
47542 + break; \
47543 + case option2: \
47544 + pax_flags_softmode |= MF_PAX_##flag; \
47545 + break;
47546 +
47547 + parse_flag('p', 'P', PAGEEXEC);
47548 + parse_flag('e', 'E', EMUTRAMP);
47549 + parse_flag('m', 'M', MPROTECT);
47550 + parse_flag('r', 'R', RANDMMAP);
47551 + parse_flag('s', 'S', SEGMEXEC);
47552 +
47553 +#undef parse_flag
47554 + }
47555 +
47556 + if (pax_flags_hardmode & pax_flags_softmode)
47557 + return ~0UL;
47558 +
47559 +#ifdef CONFIG_PAX_SOFTMODE
47560 + if (pax_softmode)
47561 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
47562 + else
47563 +#endif
47564 +
47565 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
47566 +#else
47567 + return ~0UL;
47568 +#endif
47569 +
47570 +}
47571 +
47572 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
47573 +{
47574 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
47575 +
47576 + pax_flags = pax_parse_ei_pax(elf_ex);
47577 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
47578 + xattr_pax_flags = pax_parse_xattr_pax(file);
47579 +
47580 + if (pt_pax_flags == ~0UL)
47581 + pt_pax_flags = xattr_pax_flags;
47582 + else if (xattr_pax_flags == ~0UL)
47583 + xattr_pax_flags = pt_pax_flags;
47584 + if (pt_pax_flags != xattr_pax_flags)
47585 + return -EINVAL;
47586 + if (pt_pax_flags != ~0UL)
47587 + pax_flags = pt_pax_flags;
47588 +
47589 + if (0 > pax_check_flags(&pax_flags))
47590 + return -EINVAL;
47591 +
47592 + current->mm->pax_flags = pax_flags;
47593 + return 0;
47594 +}
47595 +#endif
47596 +
47597 /*
47598 * These are the functions used to load ELF style executables and shared
47599 * libraries. There is no binary dependent code anywhere else.
47600 @@ -548,6 +919,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
47601 {
47602 unsigned int random_variable = 0;
47603
47604 +#ifdef CONFIG_PAX_RANDUSTACK
47605 + if (randomize_va_space)
47606 + return stack_top - current->mm->delta_stack;
47607 +#endif
47608 +
47609 if ((current->flags & PF_RANDOMIZE) &&
47610 !(current->personality & ADDR_NO_RANDOMIZE)) {
47611 random_variable = get_random_int() & STACK_RND_MASK;
47612 @@ -566,7 +942,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47613 unsigned long load_addr = 0, load_bias = 0;
47614 int load_addr_set = 0;
47615 char * elf_interpreter = NULL;
47616 - unsigned long error;
47617 + unsigned long error = 0;
47618 struct elf_phdr *elf_ppnt, *elf_phdata;
47619 unsigned long elf_bss, elf_brk;
47620 int retval, i;
47621 @@ -576,11 +952,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47622 unsigned long start_code, end_code, start_data, end_data;
47623 unsigned long reloc_func_desc = 0;
47624 int executable_stack = EXSTACK_DEFAULT;
47625 - unsigned long def_flags = 0;
47626 struct {
47627 struct elfhdr elf_ex;
47628 struct elfhdr interp_elf_ex;
47629 } *loc;
47630 + unsigned long pax_task_size = TASK_SIZE;
47631
47632 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
47633 if (!loc) {
47634 @@ -718,11 +1094,80 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47635
47636 /* OK, This is the point of no return */
47637 current->flags &= ~PF_FORKNOEXEC;
47638 - current->mm->def_flags = def_flags;
47639 +
47640 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47641 + current->mm->pax_flags = 0UL;
47642 +#endif
47643 +
47644 +#ifdef CONFIG_PAX_DLRESOLVE
47645 + current->mm->call_dl_resolve = 0UL;
47646 +#endif
47647 +
47648 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
47649 + current->mm->call_syscall = 0UL;
47650 +#endif
47651 +
47652 +#ifdef CONFIG_PAX_ASLR
47653 + current->mm->delta_mmap = 0UL;
47654 + current->mm->delta_stack = 0UL;
47655 +#endif
47656 +
47657 + current->mm->def_flags = 0;
47658 +
47659 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
47660 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
47661 + send_sig(SIGKILL, current, 0);
47662 + goto out_free_dentry;
47663 + }
47664 +#endif
47665 +
47666 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
47667 + pax_set_initial_flags(bprm);
47668 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
47669 + if (pax_set_initial_flags_func)
47670 + (pax_set_initial_flags_func)(bprm);
47671 +#endif
47672 +
47673 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
47674 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
47675 + current->mm->context.user_cs_limit = PAGE_SIZE;
47676 + current->mm->def_flags |= VM_PAGEEXEC;
47677 + }
47678 +#endif
47679 +
47680 +#ifdef CONFIG_PAX_SEGMEXEC
47681 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
47682 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
47683 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
47684 + pax_task_size = SEGMEXEC_TASK_SIZE;
47685 + }
47686 +#endif
47687 +
47688 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
47689 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47690 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
47691 + put_cpu();
47692 + }
47693 +#endif
47694
47695 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
47696 may depend on the personality. */
47697 SET_PERSONALITY(loc->elf_ex);
47698 +
47699 +#ifdef CONFIG_PAX_ASLR
47700 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
47701 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
47702 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
47703 + }
47704 +#endif
47705 +
47706 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
47707 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47708 + executable_stack = EXSTACK_DISABLE_X;
47709 + current->personality &= ~READ_IMPLIES_EXEC;
47710 + } else
47711 +#endif
47712 +
47713 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
47714 current->personality |= READ_IMPLIES_EXEC;
47715
47716 @@ -800,10 +1245,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47717 * might try to exec. This is because the brk will
47718 * follow the loader, and is not movable. */
47719 #ifdef CONFIG_X86
47720 - load_bias = 0;
47721 + if (current->flags & PF_RANDOMIZE)
47722 + load_bias = 0;
47723 + else
47724 + load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
47725 #else
47726 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
47727 #endif
47728 +
47729 +#ifdef CONFIG_PAX_RANDMMAP
47730 + /* PaX: randomize base address at the default exe base if requested */
47731 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
47732 +#ifdef CONFIG_SPARC64
47733 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
47734 +#else
47735 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
47736 +#endif
47737 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
47738 + elf_flags |= MAP_FIXED;
47739 + }
47740 +#endif
47741 +
47742 }
47743
47744 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
47745 @@ -836,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47746 * allowed task size. Note that p_filesz must always be
47747 * <= p_memsz so it is only necessary to check p_memsz.
47748 */
47749 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
47750 - elf_ppnt->p_memsz > TASK_SIZE ||
47751 - TASK_SIZE - elf_ppnt->p_memsz < k) {
47752 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
47753 + elf_ppnt->p_memsz > pax_task_size ||
47754 + pax_task_size - elf_ppnt->p_memsz < k) {
47755 /* set_brk can never work. Avoid overflows. */
47756 send_sig(SIGKILL, current, 0);
47757 retval = -EINVAL;
47758 @@ -866,6 +1328,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47759 start_data += load_bias;
47760 end_data += load_bias;
47761
47762 +#ifdef CONFIG_PAX_RANDMMAP
47763 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
47764 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
47765 +#endif
47766 +
47767 /* Calling set_brk effectively mmaps the pages that we need
47768 * for the bss and break sections. We must do this before
47769 * mapping in the interpreter, to make sure it doesn't wind
47770 @@ -877,9 +1344,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47771 goto out_free_dentry;
47772 }
47773 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
47774 - send_sig(SIGSEGV, current, 0);
47775 - retval = -EFAULT; /* Nobody gets to see this, but.. */
47776 - goto out_free_dentry;
47777 + /*
47778 + * This bss-zeroing can fail if the ELF
47779 + * file specifies odd protections. So
47780 + * we don't check the return value
47781 + */
47782 }
47783
47784 if (elf_interpreter) {
47785 @@ -1112,8 +1581,10 @@ static int dump_seek(struct file *file, loff_t off)
47786 unsigned long n = off;
47787 if (n > PAGE_SIZE)
47788 n = PAGE_SIZE;
47789 - if (!dump_write(file, buf, n))
47790 + if (!dump_write(file, buf, n)) {
47791 + free_page((unsigned long)buf);
47792 return 0;
47793 + }
47794 off -= n;
47795 }
47796 free_page((unsigned long)buf);
47797 @@ -1125,7 +1596,7 @@ static int dump_seek(struct file *file, loff_t off)
47798 * Decide what to dump of a segment, part, all or none.
47799 */
47800 static unsigned long vma_dump_size(struct vm_area_struct *vma,
47801 - unsigned long mm_flags)
47802 + unsigned long mm_flags, long signr)
47803 {
47804 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
47805
47806 @@ -1159,7 +1630,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
47807 if (vma->vm_file == NULL)
47808 return 0;
47809
47810 - if (FILTER(MAPPED_PRIVATE))
47811 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
47812 goto whole;
47813
47814 /*
47815 @@ -1255,8 +1726,11 @@ static int writenote(struct memelfnote *men, struct file *file,
47816 #undef DUMP_WRITE
47817
47818 #define DUMP_WRITE(addr, nr) \
47819 + do { \
47820 + gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
47821 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
47822 - goto end_coredump;
47823 + goto end_coredump; \
47824 + } while (0);
47825
47826 static void fill_elf_header(struct elfhdr *elf, int segs,
47827 u16 machine, u32 flags, u8 osabi)
47828 @@ -1385,9 +1859,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
47829 {
47830 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
47831 int i = 0;
47832 - do
47833 + do {
47834 i += 2;
47835 - while (auxv[i - 2] != AT_NULL);
47836 + } while (auxv[i - 2] != AT_NULL);
47837 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
47838 }
47839
47840 @@ -1452,7 +1926,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
47841 for (i = 1; i < view->n; ++i) {
47842 const struct user_regset *regset = &view->regsets[i];
47843 do_thread_regset_writeback(t->task, regset);
47844 - if (regset->core_note_type &&
47845 + if (regset->core_note_type && regset->get &&
47846 (!regset->active || regset->active(t->task, regset))) {
47847 int ret;
47848 size_t size = regset->n * regset->size;
47849 @@ -1973,7 +2447,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47850 phdr.p_offset = offset;
47851 phdr.p_vaddr = vma->vm_start;
47852 phdr.p_paddr = 0;
47853 - phdr.p_filesz = vma_dump_size(vma, mm_flags);
47854 + phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
47855 phdr.p_memsz = vma->vm_end - vma->vm_start;
47856 offset += phdr.p_filesz;
47857 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
47858 @@ -2006,7 +2480,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47859 unsigned long addr;
47860 unsigned long end;
47861
47862 - end = vma->vm_start + vma_dump_size(vma, mm_flags);
47863 + end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
47864
47865 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
47866 struct page *page;
47867 @@ -2015,6 +2489,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47868 page = get_dump_page(addr);
47869 if (page) {
47870 void *kaddr = kmap(page);
47871 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
47872 stop = ((size += PAGE_SIZE) > limit) ||
47873 !dump_write(file, kaddr, PAGE_SIZE);
47874 kunmap(page);
47875 @@ -2042,6 +2517,97 @@ out:
47876
47877 #endif /* USE_ELF_CORE_DUMP */
47878
47879 +#ifdef CONFIG_PAX_MPROTECT
47880 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
47881 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
47882 + * we'll remove VM_MAYWRITE for good on RELRO segments.
47883 + *
47884 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
47885 + * basis because we want to allow the common case and not the special ones.
47886 + */
47887 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
47888 +{
47889 + struct elfhdr elf_h;
47890 + struct elf_phdr elf_p;
47891 + unsigned long i;
47892 + unsigned long oldflags;
47893 + bool is_textrel_rw, is_textrel_rx, is_relro;
47894 +
47895 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
47896 + return;
47897 +
47898 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
47899 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
47900 +
47901 +#ifdef CONFIG_PAX_ELFRELOCS
47902 + /* possible TEXTREL */
47903 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
47904 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
47905 +#else
47906 + is_textrel_rw = false;
47907 + is_textrel_rx = false;
47908 +#endif
47909 +
47910 + /* possible RELRO */
47911 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
47912 +
47913 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
47914 + return;
47915 +
47916 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
47917 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
47918 +
47919 +#ifdef CONFIG_PAX_ETEXECRELOCS
47920 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
47921 +#else
47922 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
47923 +#endif
47924 +
47925 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
47926 + !elf_check_arch(&elf_h) ||
47927 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
47928 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
47929 + return;
47930 +
47931 + for (i = 0UL; i < elf_h.e_phnum; i++) {
47932 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
47933 + return;
47934 + switch (elf_p.p_type) {
47935 + case PT_DYNAMIC:
47936 + if (!is_textrel_rw && !is_textrel_rx)
47937 + continue;
47938 + i = 0UL;
47939 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
47940 + elf_dyn dyn;
47941 +
47942 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
47943 + return;
47944 + if (dyn.d_tag == DT_NULL)
47945 + return;
47946 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
47947 + gr_log_textrel(vma);
47948 + if (is_textrel_rw)
47949 + vma->vm_flags |= VM_MAYWRITE;
47950 + else
47951 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
47952 + vma->vm_flags &= ~VM_MAYWRITE;
47953 + return;
47954 + }
47955 + i++;
47956 + }
47957 + return;
47958 +
47959 + case PT_GNU_RELRO:
47960 + if (!is_relro)
47961 + continue;
47962 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
47963 + vma->vm_flags &= ~VM_MAYWRITE;
47964 + return;
47965 + }
47966 + }
47967 +}
47968 +#endif
47969 +
47970 static int __init init_elf_binfmt(void)
47971 {
47972 return register_binfmt(&elf_format);
47973 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
47974 index ca88c46..f155a60 100644
47975 --- a/fs/binfmt_flat.c
47976 +++ b/fs/binfmt_flat.c
47977 @@ -564,7 +564,9 @@ static int load_flat_file(struct linux_binprm * bprm,
47978 realdatastart = (unsigned long) -ENOMEM;
47979 printk("Unable to allocate RAM for process data, errno %d\n",
47980 (int)-realdatastart);
47981 + down_write(&current->mm->mmap_sem);
47982 do_munmap(current->mm, textpos, text_len);
47983 + up_write(&current->mm->mmap_sem);
47984 ret = realdatastart;
47985 goto err;
47986 }
47987 @@ -588,8 +590,10 @@ static int load_flat_file(struct linux_binprm * bprm,
47988 }
47989 if (IS_ERR_VALUE(result)) {
47990 printk("Unable to read data+bss, errno %d\n", (int)-result);
47991 + down_write(&current->mm->mmap_sem);
47992 do_munmap(current->mm, textpos, text_len);
47993 do_munmap(current->mm, realdatastart, data_len + extra);
47994 + up_write(&current->mm->mmap_sem);
47995 ret = result;
47996 goto err;
47997 }
47998 @@ -658,8 +662,10 @@ static int load_flat_file(struct linux_binprm * bprm,
47999 }
48000 if (IS_ERR_VALUE(result)) {
48001 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
48002 + down_write(&current->mm->mmap_sem);
48003 do_munmap(current->mm, textpos, text_len + data_len + extra +
48004 MAX_SHARED_LIBS * sizeof(unsigned long));
48005 + up_write(&current->mm->mmap_sem);
48006 ret = result;
48007 goto err;
48008 }
48009 diff --git a/fs/bio.c b/fs/bio.c
48010 index e696713..83de133 100644
48011 --- a/fs/bio.c
48012 +++ b/fs/bio.c
48013 @@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
48014
48015 i = 0;
48016 while (i < bio_slab_nr) {
48017 - struct bio_slab *bslab = &bio_slabs[i];
48018 + bslab = &bio_slabs[i];
48019
48020 if (!bslab->slab && entry == -1)
48021 entry = i;
48022 @@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
48023 const int read = bio_data_dir(bio) == READ;
48024 struct bio_map_data *bmd = bio->bi_private;
48025 int i;
48026 - char *p = bmd->sgvecs[0].iov_base;
48027 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
48028
48029 __bio_for_each_segment(bvec, bio, i, 0) {
48030 char *addr = page_address(bvec->bv_page);
48031 diff --git a/fs/block_dev.c b/fs/block_dev.c
48032 index e65efa2..04fae57 100644
48033 --- a/fs/block_dev.c
48034 +++ b/fs/block_dev.c
48035 @@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev, void *holder)
48036 else if (bdev->bd_contains == bdev)
48037 res = 0; /* is a whole device which isn't held */
48038
48039 - else if (bdev->bd_contains->bd_holder == bd_claim)
48040 + else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
48041 res = 0; /* is a partition of a device that is being partitioned */
48042 else if (bdev->bd_contains->bd_holder != NULL)
48043 res = -EBUSY; /* is a partition of a held device */
48044 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
48045 index c4bc570..42acd8d 100644
48046 --- a/fs/btrfs/ctree.c
48047 +++ b/fs/btrfs/ctree.c
48048 @@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
48049 free_extent_buffer(buf);
48050 add_root_to_dirty_list(root);
48051 } else {
48052 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
48053 - parent_start = parent->start;
48054 - else
48055 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
48056 + if (parent)
48057 + parent_start = parent->start;
48058 + else
48059 + parent_start = 0;
48060 + } else
48061 parent_start = 0;
48062
48063 WARN_ON(trans->transid != btrfs_header_generation(parent));
48064 @@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
48065
48066 ret = 0;
48067 if (slot == 0) {
48068 - struct btrfs_disk_key disk_key;
48069 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
48070 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
48071 }
48072 diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
48073 index f447188..59c17c5 100644
48074 --- a/fs/btrfs/disk-io.c
48075 +++ b/fs/btrfs/disk-io.c
48076 @@ -39,7 +39,7 @@
48077 #include "tree-log.h"
48078 #include "free-space-cache.h"
48079
48080 -static struct extent_io_ops btree_extent_io_ops;
48081 +static const struct extent_io_ops btree_extent_io_ops;
48082 static void end_workqueue_fn(struct btrfs_work *work);
48083 static void free_fs_root(struct btrfs_root *root);
48084
48085 @@ -2607,7 +2607,7 @@ out:
48086 return 0;
48087 }
48088
48089 -static struct extent_io_ops btree_extent_io_ops = {
48090 +static const struct extent_io_ops btree_extent_io_ops = {
48091 .write_cache_pages_lock_hook = btree_lock_page_hook,
48092 .readpage_end_io_hook = btree_readpage_end_io_hook,
48093 .submit_bio_hook = btree_submit_bio_hook,
48094 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
48095 index 559f724..a026171 100644
48096 --- a/fs/btrfs/extent-tree.c
48097 +++ b/fs/btrfs/extent-tree.c
48098 @@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
48099 u64 group_start = group->key.objectid;
48100 new_extents = kmalloc(sizeof(*new_extents),
48101 GFP_NOFS);
48102 + if (!new_extents) {
48103 + ret = -ENOMEM;
48104 + goto out;
48105 + }
48106 nr_extents = 1;
48107 ret = get_new_locations(reloc_inode,
48108 extent_key,
48109 diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
48110 index 36de250..7ec75c7 100644
48111 --- a/fs/btrfs/extent_io.h
48112 +++ b/fs/btrfs/extent_io.h
48113 @@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
48114 struct bio *bio, int mirror_num,
48115 unsigned long bio_flags);
48116 struct extent_io_ops {
48117 - int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
48118 + int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
48119 u64 start, u64 end, int *page_started,
48120 unsigned long *nr_written);
48121 - int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
48122 - int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
48123 + int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
48124 + int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
48125 extent_submit_bio_hook_t *submit_bio_hook;
48126 - int (*merge_bio_hook)(struct page *page, unsigned long offset,
48127 + int (* const merge_bio_hook)(struct page *page, unsigned long offset,
48128 size_t size, struct bio *bio,
48129 unsigned long bio_flags);
48130 - int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
48131 - int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
48132 + int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
48133 + int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
48134 u64 start, u64 end,
48135 struct extent_state *state);
48136 - int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
48137 + int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
48138 u64 start, u64 end,
48139 struct extent_state *state);
48140 - int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
48141 + int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
48142 struct extent_state *state);
48143 - int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
48144 + int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
48145 struct extent_state *state, int uptodate);
48146 - int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
48147 + int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
48148 unsigned long old, unsigned long bits);
48149 - int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
48150 + int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
48151 unsigned long bits);
48152 - int (*merge_extent_hook)(struct inode *inode,
48153 + int (* const merge_extent_hook)(struct inode *inode,
48154 struct extent_state *new,
48155 struct extent_state *other);
48156 - int (*split_extent_hook)(struct inode *inode,
48157 + int (* const split_extent_hook)(struct inode *inode,
48158 struct extent_state *orig, u64 split);
48159 - int (*write_cache_pages_lock_hook)(struct page *page);
48160 + int (* const write_cache_pages_lock_hook)(struct page *page);
48161 };
48162
48163 struct extent_io_tree {
48164 @@ -88,7 +88,7 @@ struct extent_io_tree {
48165 u64 dirty_bytes;
48166 spinlock_t lock;
48167 spinlock_t buffer_lock;
48168 - struct extent_io_ops *ops;
48169 + const struct extent_io_ops *ops;
48170 };
48171
48172 struct extent_state {
48173 diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
48174 index cb2849f..3718fb4 100644
48175 --- a/fs/btrfs/free-space-cache.c
48176 +++ b/fs/btrfs/free-space-cache.c
48177 @@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
48178
48179 while(1) {
48180 if (entry->bytes < bytes || entry->offset < min_start) {
48181 - struct rb_node *node;
48182 -
48183 node = rb_next(&entry->offset_index);
48184 if (!node)
48185 break;
48186 @@ -1226,7 +1224,7 @@ again:
48187 */
48188 while (entry->bitmap || found_bitmap ||
48189 (!entry->bitmap && entry->bytes < min_bytes)) {
48190 - struct rb_node *node = rb_next(&entry->offset_index);
48191 + node = rb_next(&entry->offset_index);
48192
48193 if (entry->bitmap && entry->bytes > bytes + empty_size) {
48194 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
48195 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
48196 index e03a836..323837e 100644
48197 --- a/fs/btrfs/inode.c
48198 +++ b/fs/btrfs/inode.c
48199 @@ -63,7 +63,7 @@ static const struct inode_operations btrfs_file_inode_operations;
48200 static const struct address_space_operations btrfs_aops;
48201 static const struct address_space_operations btrfs_symlink_aops;
48202 static const struct file_operations btrfs_dir_file_operations;
48203 -static struct extent_io_ops btrfs_extent_io_ops;
48204 +static const struct extent_io_ops btrfs_extent_io_ops;
48205
48206 static struct kmem_cache *btrfs_inode_cachep;
48207 struct kmem_cache *btrfs_trans_handle_cachep;
48208 @@ -925,6 +925,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
48209 1, 0, NULL, GFP_NOFS);
48210 while (start < end) {
48211 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
48212 + BUG_ON(!async_cow);
48213 async_cow->inode = inode;
48214 async_cow->root = root;
48215 async_cow->locked_page = locked_page;
48216 @@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(struct btrfs_path *path,
48217 inline_size = btrfs_file_extent_inline_item_len(leaf,
48218 btrfs_item_nr(leaf, path->slots[0]));
48219 tmp = kmalloc(inline_size, GFP_NOFS);
48220 + if (!tmp)
48221 + return -ENOMEM;
48222 ptr = btrfs_file_extent_inline_start(item);
48223
48224 read_extent_buffer(leaf, tmp, ptr, inline_size);
48225 @@ -5410,7 +5413,7 @@ fail:
48226 return -ENOMEM;
48227 }
48228
48229 -static int btrfs_getattr(struct vfsmount *mnt,
48230 +int btrfs_getattr(struct vfsmount *mnt,
48231 struct dentry *dentry, struct kstat *stat)
48232 {
48233 struct inode *inode = dentry->d_inode;
48234 @@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
48235 return 0;
48236 }
48237
48238 +EXPORT_SYMBOL(btrfs_getattr);
48239 +
48240 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
48241 +{
48242 + return BTRFS_I(inode)->root->anon_super.s_dev;
48243 +}
48244 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
48245 +
48246 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
48247 struct inode *new_dir, struct dentry *new_dentry)
48248 {
48249 @@ -5972,7 +5983,7 @@ static const struct file_operations btrfs_dir_file_operations = {
48250 .fsync = btrfs_sync_file,
48251 };
48252
48253 -static struct extent_io_ops btrfs_extent_io_ops = {
48254 +static const struct extent_io_ops btrfs_extent_io_ops = {
48255 .fill_delalloc = run_delalloc_range,
48256 .submit_bio_hook = btrfs_submit_bio_hook,
48257 .merge_bio_hook = btrfs_merge_bio_hook,
48258 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
48259 index ab7ab53..94e0781 100644
48260 --- a/fs/btrfs/relocation.c
48261 +++ b/fs/btrfs/relocation.c
48262 @@ -884,7 +884,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
48263 }
48264 spin_unlock(&rc->reloc_root_tree.lock);
48265
48266 - BUG_ON((struct btrfs_root *)node->data != root);
48267 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
48268
48269 if (!del) {
48270 spin_lock(&rc->reloc_root_tree.lock);
48271 diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
48272 index a240b6f..4ce16ef 100644
48273 --- a/fs/btrfs/sysfs.c
48274 +++ b/fs/btrfs/sysfs.c
48275 @@ -164,12 +164,12 @@ static void btrfs_root_release(struct kobject *kobj)
48276 complete(&root->kobj_unregister);
48277 }
48278
48279 -static struct sysfs_ops btrfs_super_attr_ops = {
48280 +static const struct sysfs_ops btrfs_super_attr_ops = {
48281 .show = btrfs_super_attr_show,
48282 .store = btrfs_super_attr_store,
48283 };
48284
48285 -static struct sysfs_ops btrfs_root_attr_ops = {
48286 +static const struct sysfs_ops btrfs_root_attr_ops = {
48287 .show = btrfs_root_attr_show,
48288 .store = btrfs_root_attr_store,
48289 };
48290 diff --git a/fs/buffer.c b/fs/buffer.c
48291 index 6fa5302..395d9f6 100644
48292 --- a/fs/buffer.c
48293 +++ b/fs/buffer.c
48294 @@ -25,6 +25,7 @@
48295 #include <linux/percpu.h>
48296 #include <linux/slab.h>
48297 #include <linux/capability.h>
48298 +#include <linux/security.h>
48299 #include <linux/blkdev.h>
48300 #include <linux/file.h>
48301 #include <linux/quotaops.h>
48302 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
48303 index 3797e00..ce776f6 100644
48304 --- a/fs/cachefiles/bind.c
48305 +++ b/fs/cachefiles/bind.c
48306 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
48307 args);
48308
48309 /* start by checking things over */
48310 - ASSERT(cache->fstop_percent >= 0 &&
48311 - cache->fstop_percent < cache->fcull_percent &&
48312 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
48313 cache->fcull_percent < cache->frun_percent &&
48314 cache->frun_percent < 100);
48315
48316 - ASSERT(cache->bstop_percent >= 0 &&
48317 - cache->bstop_percent < cache->bcull_percent &&
48318 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
48319 cache->bcull_percent < cache->brun_percent &&
48320 cache->brun_percent < 100);
48321
48322 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
48323 index 4618516..bb30d01 100644
48324 --- a/fs/cachefiles/daemon.c
48325 +++ b/fs/cachefiles/daemon.c
48326 @@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
48327 if (test_bit(CACHEFILES_DEAD, &cache->flags))
48328 return -EIO;
48329
48330 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
48331 + if (datalen > PAGE_SIZE - 1)
48332 return -EOPNOTSUPP;
48333
48334 /* drag the command string into the kernel so we can parse it */
48335 @@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
48336 if (args[0] != '%' || args[1] != '\0')
48337 return -EINVAL;
48338
48339 - if (fstop < 0 || fstop >= cache->fcull_percent)
48340 + if (fstop >= cache->fcull_percent)
48341 return cachefiles_daemon_range_error(cache, args);
48342
48343 cache->fstop_percent = fstop;
48344 @@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
48345 if (args[0] != '%' || args[1] != '\0')
48346 return -EINVAL;
48347
48348 - if (bstop < 0 || bstop >= cache->bcull_percent)
48349 + if (bstop >= cache->bcull_percent)
48350 return cachefiles_daemon_range_error(cache, args);
48351
48352 cache->bstop_percent = bstop;
48353 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
48354 index f7c255f..fcd61de 100644
48355 --- a/fs/cachefiles/internal.h
48356 +++ b/fs/cachefiles/internal.h
48357 @@ -56,7 +56,7 @@ struct cachefiles_cache {
48358 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
48359 struct rb_root active_nodes; /* active nodes (can't be culled) */
48360 rwlock_t active_lock; /* lock for active_nodes */
48361 - atomic_t gravecounter; /* graveyard uniquifier */
48362 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
48363 unsigned frun_percent; /* when to stop culling (% files) */
48364 unsigned fcull_percent; /* when to start culling (% files) */
48365 unsigned fstop_percent; /* when to stop allocating (% files) */
48366 @@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
48367 * proc.c
48368 */
48369 #ifdef CONFIG_CACHEFILES_HISTOGRAM
48370 -extern atomic_t cachefiles_lookup_histogram[HZ];
48371 -extern atomic_t cachefiles_mkdir_histogram[HZ];
48372 -extern atomic_t cachefiles_create_histogram[HZ];
48373 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
48374 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
48375 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
48376
48377 extern int __init cachefiles_proc_init(void);
48378 extern void cachefiles_proc_cleanup(void);
48379 static inline
48380 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
48381 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
48382 {
48383 unsigned long jif = jiffies - start_jif;
48384 if (jif >= HZ)
48385 jif = HZ - 1;
48386 - atomic_inc(&histogram[jif]);
48387 + atomic_inc_unchecked(&histogram[jif]);
48388 }
48389
48390 #else
48391 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
48392 index 14ac480..a62766c 100644
48393 --- a/fs/cachefiles/namei.c
48394 +++ b/fs/cachefiles/namei.c
48395 @@ -250,7 +250,7 @@ try_again:
48396 /* first step is to make up a grave dentry in the graveyard */
48397 sprintf(nbuffer, "%08x%08x",
48398 (uint32_t) get_seconds(),
48399 - (uint32_t) atomic_inc_return(&cache->gravecounter));
48400 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
48401
48402 /* do the multiway lock magic */
48403 trap = lock_rename(cache->graveyard, dir);
48404 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
48405 index eccd339..4c1d995 100644
48406 --- a/fs/cachefiles/proc.c
48407 +++ b/fs/cachefiles/proc.c
48408 @@ -14,9 +14,9 @@
48409 #include <linux/seq_file.h>
48410 #include "internal.h"
48411
48412 -atomic_t cachefiles_lookup_histogram[HZ];
48413 -atomic_t cachefiles_mkdir_histogram[HZ];
48414 -atomic_t cachefiles_create_histogram[HZ];
48415 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
48416 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
48417 +atomic_unchecked_t cachefiles_create_histogram[HZ];
48418
48419 /*
48420 * display the latency histogram
48421 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
48422 return 0;
48423 default:
48424 index = (unsigned long) v - 3;
48425 - x = atomic_read(&cachefiles_lookup_histogram[index]);
48426 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
48427 - z = atomic_read(&cachefiles_create_histogram[index]);
48428 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
48429 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
48430 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
48431 if (x == 0 && y == 0 && z == 0)
48432 return 0;
48433
48434 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
48435 index a6c8c6f..5cf8517 100644
48436 --- a/fs/cachefiles/rdwr.c
48437 +++ b/fs/cachefiles/rdwr.c
48438 @@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
48439 old_fs = get_fs();
48440 set_fs(KERNEL_DS);
48441 ret = file->f_op->write(
48442 - file, (const void __user *) data, len, &pos);
48443 + file, (const void __force_user *) data, len, &pos);
48444 set_fs(old_fs);
48445 kunmap(page);
48446 if (ret != len)
48447 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
48448 index 42cec2a..2aba466 100644
48449 --- a/fs/cifs/cifs_debug.c
48450 +++ b/fs/cifs/cifs_debug.c
48451 @@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
48452 tcon = list_entry(tmp3,
48453 struct cifsTconInfo,
48454 tcon_list);
48455 - atomic_set(&tcon->num_smbs_sent, 0);
48456 - atomic_set(&tcon->num_writes, 0);
48457 - atomic_set(&tcon->num_reads, 0);
48458 - atomic_set(&tcon->num_oplock_brks, 0);
48459 - atomic_set(&tcon->num_opens, 0);
48460 - atomic_set(&tcon->num_posixopens, 0);
48461 - atomic_set(&tcon->num_posixmkdirs, 0);
48462 - atomic_set(&tcon->num_closes, 0);
48463 - atomic_set(&tcon->num_deletes, 0);
48464 - atomic_set(&tcon->num_mkdirs, 0);
48465 - atomic_set(&tcon->num_rmdirs, 0);
48466 - atomic_set(&tcon->num_renames, 0);
48467 - atomic_set(&tcon->num_t2renames, 0);
48468 - atomic_set(&tcon->num_ffirst, 0);
48469 - atomic_set(&tcon->num_fnext, 0);
48470 - atomic_set(&tcon->num_fclose, 0);
48471 - atomic_set(&tcon->num_hardlinks, 0);
48472 - atomic_set(&tcon->num_symlinks, 0);
48473 - atomic_set(&tcon->num_locks, 0);
48474 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
48475 + atomic_set_unchecked(&tcon->num_writes, 0);
48476 + atomic_set_unchecked(&tcon->num_reads, 0);
48477 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
48478 + atomic_set_unchecked(&tcon->num_opens, 0);
48479 + atomic_set_unchecked(&tcon->num_posixopens, 0);
48480 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
48481 + atomic_set_unchecked(&tcon->num_closes, 0);
48482 + atomic_set_unchecked(&tcon->num_deletes, 0);
48483 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
48484 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
48485 + atomic_set_unchecked(&tcon->num_renames, 0);
48486 + atomic_set_unchecked(&tcon->num_t2renames, 0);
48487 + atomic_set_unchecked(&tcon->num_ffirst, 0);
48488 + atomic_set_unchecked(&tcon->num_fnext, 0);
48489 + atomic_set_unchecked(&tcon->num_fclose, 0);
48490 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
48491 + atomic_set_unchecked(&tcon->num_symlinks, 0);
48492 + atomic_set_unchecked(&tcon->num_locks, 0);
48493 }
48494 }
48495 }
48496 @@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
48497 if (tcon->need_reconnect)
48498 seq_puts(m, "\tDISCONNECTED ");
48499 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
48500 - atomic_read(&tcon->num_smbs_sent),
48501 - atomic_read(&tcon->num_oplock_brks));
48502 + atomic_read_unchecked(&tcon->num_smbs_sent),
48503 + atomic_read_unchecked(&tcon->num_oplock_brks));
48504 seq_printf(m, "\nReads: %d Bytes: %lld",
48505 - atomic_read(&tcon->num_reads),
48506 + atomic_read_unchecked(&tcon->num_reads),
48507 (long long)(tcon->bytes_read));
48508 seq_printf(m, "\nWrites: %d Bytes: %lld",
48509 - atomic_read(&tcon->num_writes),
48510 + atomic_read_unchecked(&tcon->num_writes),
48511 (long long)(tcon->bytes_written));
48512 seq_printf(m, "\nFlushes: %d",
48513 - atomic_read(&tcon->num_flushes));
48514 + atomic_read_unchecked(&tcon->num_flushes));
48515 seq_printf(m, "\nLocks: %d HardLinks: %d "
48516 "Symlinks: %d",
48517 - atomic_read(&tcon->num_locks),
48518 - atomic_read(&tcon->num_hardlinks),
48519 - atomic_read(&tcon->num_symlinks));
48520 + atomic_read_unchecked(&tcon->num_locks),
48521 + atomic_read_unchecked(&tcon->num_hardlinks),
48522 + atomic_read_unchecked(&tcon->num_symlinks));
48523 seq_printf(m, "\nOpens: %d Closes: %d "
48524 "Deletes: %d",
48525 - atomic_read(&tcon->num_opens),
48526 - atomic_read(&tcon->num_closes),
48527 - atomic_read(&tcon->num_deletes));
48528 + atomic_read_unchecked(&tcon->num_opens),
48529 + atomic_read_unchecked(&tcon->num_closes),
48530 + atomic_read_unchecked(&tcon->num_deletes));
48531 seq_printf(m, "\nPosix Opens: %d "
48532 "Posix Mkdirs: %d",
48533 - atomic_read(&tcon->num_posixopens),
48534 - atomic_read(&tcon->num_posixmkdirs));
48535 + atomic_read_unchecked(&tcon->num_posixopens),
48536 + atomic_read_unchecked(&tcon->num_posixmkdirs));
48537 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
48538 - atomic_read(&tcon->num_mkdirs),
48539 - atomic_read(&tcon->num_rmdirs));
48540 + atomic_read_unchecked(&tcon->num_mkdirs),
48541 + atomic_read_unchecked(&tcon->num_rmdirs));
48542 seq_printf(m, "\nRenames: %d T2 Renames %d",
48543 - atomic_read(&tcon->num_renames),
48544 - atomic_read(&tcon->num_t2renames));
48545 + atomic_read_unchecked(&tcon->num_renames),
48546 + atomic_read_unchecked(&tcon->num_t2renames));
48547 seq_printf(m, "\nFindFirst: %d FNext %d "
48548 "FClose %d",
48549 - atomic_read(&tcon->num_ffirst),
48550 - atomic_read(&tcon->num_fnext),
48551 - atomic_read(&tcon->num_fclose));
48552 + atomic_read_unchecked(&tcon->num_ffirst),
48553 + atomic_read_unchecked(&tcon->num_fnext),
48554 + atomic_read_unchecked(&tcon->num_fclose));
48555 }
48556 }
48557 }
48558 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
48559 index 1445407..68cb0dc 100644
48560 --- a/fs/cifs/cifsfs.c
48561 +++ b/fs/cifs/cifsfs.c
48562 @@ -869,7 +869,7 @@ cifs_init_request_bufs(void)
48563 cifs_req_cachep = kmem_cache_create("cifs_request",
48564 CIFSMaxBufSize +
48565 MAX_CIFS_HDR_SIZE, 0,
48566 - SLAB_HWCACHE_ALIGN, NULL);
48567 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
48568 if (cifs_req_cachep == NULL)
48569 return -ENOMEM;
48570
48571 @@ -896,7 +896,7 @@ cifs_init_request_bufs(void)
48572 efficient to alloc 1 per page off the slab compared to 17K (5page)
48573 alloc of large cifs buffers even when page debugging is on */
48574 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
48575 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
48576 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
48577 NULL);
48578 if (cifs_sm_req_cachep == NULL) {
48579 mempool_destroy(cifs_req_poolp);
48580 @@ -991,8 +991,8 @@ init_cifs(void)
48581 atomic_set(&bufAllocCount, 0);
48582 atomic_set(&smBufAllocCount, 0);
48583 #ifdef CONFIG_CIFS_STATS2
48584 - atomic_set(&totBufAllocCount, 0);
48585 - atomic_set(&totSmBufAllocCount, 0);
48586 + atomic_set_unchecked(&totBufAllocCount, 0);
48587 + atomic_set_unchecked(&totSmBufAllocCount, 0);
48588 #endif /* CONFIG_CIFS_STATS2 */
48589
48590 atomic_set(&midCount, 0);
48591 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
48592 index e29581e..1c22bab 100644
48593 --- a/fs/cifs/cifsglob.h
48594 +++ b/fs/cifs/cifsglob.h
48595 @@ -252,28 +252,28 @@ struct cifsTconInfo {
48596 __u16 Flags; /* optional support bits */
48597 enum statusEnum tidStatus;
48598 #ifdef CONFIG_CIFS_STATS
48599 - atomic_t num_smbs_sent;
48600 - atomic_t num_writes;
48601 - atomic_t num_reads;
48602 - atomic_t num_flushes;
48603 - atomic_t num_oplock_brks;
48604 - atomic_t num_opens;
48605 - atomic_t num_closes;
48606 - atomic_t num_deletes;
48607 - atomic_t num_mkdirs;
48608 - atomic_t num_posixopens;
48609 - atomic_t num_posixmkdirs;
48610 - atomic_t num_rmdirs;
48611 - atomic_t num_renames;
48612 - atomic_t num_t2renames;
48613 - atomic_t num_ffirst;
48614 - atomic_t num_fnext;
48615 - atomic_t num_fclose;
48616 - atomic_t num_hardlinks;
48617 - atomic_t num_symlinks;
48618 - atomic_t num_locks;
48619 - atomic_t num_acl_get;
48620 - atomic_t num_acl_set;
48621 + atomic_unchecked_t num_smbs_sent;
48622 + atomic_unchecked_t num_writes;
48623 + atomic_unchecked_t num_reads;
48624 + atomic_unchecked_t num_flushes;
48625 + atomic_unchecked_t num_oplock_brks;
48626 + atomic_unchecked_t num_opens;
48627 + atomic_unchecked_t num_closes;
48628 + atomic_unchecked_t num_deletes;
48629 + atomic_unchecked_t num_mkdirs;
48630 + atomic_unchecked_t num_posixopens;
48631 + atomic_unchecked_t num_posixmkdirs;
48632 + atomic_unchecked_t num_rmdirs;
48633 + atomic_unchecked_t num_renames;
48634 + atomic_unchecked_t num_t2renames;
48635 + atomic_unchecked_t num_ffirst;
48636 + atomic_unchecked_t num_fnext;
48637 + atomic_unchecked_t num_fclose;
48638 + atomic_unchecked_t num_hardlinks;
48639 + atomic_unchecked_t num_symlinks;
48640 + atomic_unchecked_t num_locks;
48641 + atomic_unchecked_t num_acl_get;
48642 + atomic_unchecked_t num_acl_set;
48643 #ifdef CONFIG_CIFS_STATS2
48644 unsigned long long time_writes;
48645 unsigned long long time_reads;
48646 @@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
48647 }
48648
48649 #ifdef CONFIG_CIFS_STATS
48650 -#define cifs_stats_inc atomic_inc
48651 +#define cifs_stats_inc atomic_inc_unchecked
48652
48653 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
48654 unsigned int bytes)
48655 @@ -701,8 +701,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
48656 /* Various Debug counters */
48657 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
48658 #ifdef CONFIG_CIFS_STATS2
48659 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
48660 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
48661 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
48662 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
48663 #endif
48664 GLOBAL_EXTERN atomic_t smBufAllocCount;
48665 GLOBAL_EXTERN atomic_t midCount;
48666 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
48667 index fc1e048..28b3441 100644
48668 --- a/fs/cifs/link.c
48669 +++ b/fs/cifs/link.c
48670 @@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
48671
48672 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
48673 {
48674 - char *p = nd_get_link(nd);
48675 + const char *p = nd_get_link(nd);
48676 if (!IS_ERR(p))
48677 kfree(p);
48678 }
48679 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
48680 index 95b82e8..12a538d 100644
48681 --- a/fs/cifs/misc.c
48682 +++ b/fs/cifs/misc.c
48683 @@ -155,7 +155,7 @@ cifs_buf_get(void)
48684 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
48685 atomic_inc(&bufAllocCount);
48686 #ifdef CONFIG_CIFS_STATS2
48687 - atomic_inc(&totBufAllocCount);
48688 + atomic_inc_unchecked(&totBufAllocCount);
48689 #endif /* CONFIG_CIFS_STATS2 */
48690 }
48691
48692 @@ -190,7 +190,7 @@ cifs_small_buf_get(void)
48693 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
48694 atomic_inc(&smBufAllocCount);
48695 #ifdef CONFIG_CIFS_STATS2
48696 - atomic_inc(&totSmBufAllocCount);
48697 + atomic_inc_unchecked(&totSmBufAllocCount);
48698 #endif /* CONFIG_CIFS_STATS2 */
48699
48700 }
48701 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
48702 index a5bf577..6d19845 100644
48703 --- a/fs/coda/cache.c
48704 +++ b/fs/coda/cache.c
48705 @@ -24,14 +24,14 @@
48706 #include <linux/coda_fs_i.h>
48707 #include <linux/coda_cache.h>
48708
48709 -static atomic_t permission_epoch = ATOMIC_INIT(0);
48710 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
48711
48712 /* replace or extend an acl cache hit */
48713 void coda_cache_enter(struct inode *inode, int mask)
48714 {
48715 struct coda_inode_info *cii = ITOC(inode);
48716
48717 - cii->c_cached_epoch = atomic_read(&permission_epoch);
48718 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
48719 if (cii->c_uid != current_fsuid()) {
48720 cii->c_uid = current_fsuid();
48721 cii->c_cached_perm = mask;
48722 @@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inode, int mask)
48723 void coda_cache_clear_inode(struct inode *inode)
48724 {
48725 struct coda_inode_info *cii = ITOC(inode);
48726 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
48727 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
48728 }
48729
48730 /* remove all acl caches */
48731 void coda_cache_clear_all(struct super_block *sb)
48732 {
48733 - atomic_inc(&permission_epoch);
48734 + atomic_inc_unchecked(&permission_epoch);
48735 }
48736
48737
48738 @@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode, int mask)
48739
48740 hit = (mask & cii->c_cached_perm) == mask &&
48741 cii->c_uid == current_fsuid() &&
48742 - cii->c_cached_epoch == atomic_read(&permission_epoch);
48743 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
48744
48745 return hit;
48746 }
48747 diff --git a/fs/compat.c b/fs/compat.c
48748 index d1e2411..9a958d2 100644
48749 --- a/fs/compat.c
48750 +++ b/fs/compat.c
48751 @@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval _
48752 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
48753 {
48754 compat_ino_t ino = stat->ino;
48755 - typeof(ubuf->st_uid) uid = 0;
48756 - typeof(ubuf->st_gid) gid = 0;
48757 + typeof(((struct compat_stat *)0)->st_uid) uid = 0;
48758 + typeof(((struct compat_stat *)0)->st_gid) gid = 0;
48759 int err;
48760
48761 SET_UID(uid, stat->uid);
48762 @@ -533,7 +533,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
48763
48764 set_fs(KERNEL_DS);
48765 /* The __user pointer cast is valid because of the set_fs() */
48766 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
48767 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
48768 set_fs(oldfs);
48769 /* truncating is ok because it's a user address */
48770 if (!ret)
48771 @@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
48772
48773 struct compat_readdir_callback {
48774 struct compat_old_linux_dirent __user *dirent;
48775 + struct file * file;
48776 int result;
48777 };
48778
48779 @@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
48780 buf->result = -EOVERFLOW;
48781 return -EOVERFLOW;
48782 }
48783 +
48784 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48785 + return 0;
48786 +
48787 buf->result++;
48788 dirent = buf->dirent;
48789 if (!access_ok(VERIFY_WRITE, dirent,
48790 @@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
48791
48792 buf.result = 0;
48793 buf.dirent = dirent;
48794 + buf.file = file;
48795
48796 error = vfs_readdir(file, compat_fillonedir, &buf);
48797 if (buf.result)
48798 @@ -899,6 +905,7 @@ struct compat_linux_dirent {
48799 struct compat_getdents_callback {
48800 struct compat_linux_dirent __user *current_dir;
48801 struct compat_linux_dirent __user *previous;
48802 + struct file * file;
48803 int count;
48804 int error;
48805 };
48806 @@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
48807 buf->error = -EOVERFLOW;
48808 return -EOVERFLOW;
48809 }
48810 +
48811 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48812 + return 0;
48813 +
48814 dirent = buf->previous;
48815 if (dirent) {
48816 if (__put_user(offset, &dirent->d_off))
48817 @@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
48818 buf.previous = NULL;
48819 buf.count = count;
48820 buf.error = 0;
48821 + buf.file = file;
48822
48823 error = vfs_readdir(file, compat_filldir, &buf);
48824 if (error >= 0)
48825 @@ -987,6 +999,7 @@ out:
48826 struct compat_getdents_callback64 {
48827 struct linux_dirent64 __user *current_dir;
48828 struct linux_dirent64 __user *previous;
48829 + struct file * file;
48830 int count;
48831 int error;
48832 };
48833 @@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
48834 buf->error = -EINVAL; /* only used if we fail.. */
48835 if (reclen > buf->count)
48836 return -EINVAL;
48837 +
48838 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48839 + return 0;
48840 +
48841 dirent = buf->previous;
48842
48843 if (dirent) {
48844 @@ -1054,13 +1071,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
48845 buf.previous = NULL;
48846 buf.count = count;
48847 buf.error = 0;
48848 + buf.file = file;
48849
48850 error = vfs_readdir(file, compat_filldir64, &buf);
48851 if (error >= 0)
48852 error = buf.error;
48853 lastdirent = buf.previous;
48854 if (lastdirent) {
48855 - typeof(lastdirent->d_off) d_off = file->f_pos;
48856 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
48857 if (__put_user_unaligned(d_off, &lastdirent->d_off))
48858 error = -EFAULT;
48859 else
48860 @@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
48861 * verify all the pointers
48862 */
48863 ret = -EINVAL;
48864 - if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
48865 + if (nr_segs > UIO_MAXIOV)
48866 goto out;
48867 if (!file->f_op)
48868 goto out;
48869 @@ -1463,11 +1481,35 @@ int compat_do_execve(char * filename,
48870 compat_uptr_t __user *envp,
48871 struct pt_regs * regs)
48872 {
48873 +#ifdef CONFIG_GRKERNSEC
48874 + struct file *old_exec_file;
48875 + struct acl_subject_label *old_acl;
48876 + struct rlimit old_rlim[RLIM_NLIMITS];
48877 +#endif
48878 struct linux_binprm *bprm;
48879 struct file *file;
48880 struct files_struct *displaced;
48881 bool clear_in_exec;
48882 int retval;
48883 + const struct cred *cred = current_cred();
48884 +
48885 + /*
48886 + * We move the actual failure in case of RLIMIT_NPROC excess from
48887 + * set*uid() to execve() because too many poorly written programs
48888 + * don't check setuid() return code. Here we additionally recheck
48889 + * whether NPROC limit is still exceeded.
48890 + */
48891 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
48892 +
48893 + if ((current->flags & PF_NPROC_EXCEEDED) &&
48894 + atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
48895 + retval = -EAGAIN;
48896 + goto out_ret;
48897 + }
48898 +
48899 + /* We're below the limit (still or again), so we don't want to make
48900 + * further execve() calls fail. */
48901 + current->flags &= ~PF_NPROC_EXCEEDED;
48902
48903 retval = unshare_files(&displaced);
48904 if (retval)
48905 @@ -1493,12 +1535,26 @@ int compat_do_execve(char * filename,
48906 if (IS_ERR(file))
48907 goto out_unmark;
48908
48909 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
48910 + retval = -EPERM;
48911 + goto out_file;
48912 + }
48913 +
48914 sched_exec();
48915
48916 bprm->file = file;
48917 bprm->filename = filename;
48918 bprm->interp = filename;
48919
48920 + if (gr_process_user_ban()) {
48921 + retval = -EPERM;
48922 + goto out_file;
48923 + }
48924 +
48925 + retval = -EACCES;
48926 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
48927 + goto out_file;
48928 +
48929 retval = bprm_mm_init(bprm);
48930 if (retval)
48931 goto out_file;
48932 @@ -1515,24 +1571,63 @@ int compat_do_execve(char * filename,
48933 if (retval < 0)
48934 goto out;
48935
48936 +#ifdef CONFIG_GRKERNSEC
48937 + old_acl = current->acl;
48938 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
48939 + old_exec_file = current->exec_file;
48940 + get_file(file);
48941 + current->exec_file = file;
48942 +#endif
48943 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48944 + /* limit suid stack to 8MB
48945 + we saved the old limits above and will restore them if this exec fails
48946 + */
48947 + if ((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid()))
48948 + current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
48949 +#endif
48950 +
48951 + if (!gr_tpe_allow(file)) {
48952 + retval = -EACCES;
48953 + goto out_fail;
48954 + }
48955 +
48956 + if (gr_check_crash_exec(file)) {
48957 + retval = -EACCES;
48958 + goto out_fail;
48959 + }
48960 +
48961 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
48962 + bprm->unsafe);
48963 + if (retval < 0)
48964 + goto out_fail;
48965 +
48966 retval = copy_strings_kernel(1, &bprm->filename, bprm);
48967 if (retval < 0)
48968 - goto out;
48969 + goto out_fail;
48970
48971 bprm->exec = bprm->p;
48972 retval = compat_copy_strings(bprm->envc, envp, bprm);
48973 if (retval < 0)
48974 - goto out;
48975 + goto out_fail;
48976
48977 retval = compat_copy_strings(bprm->argc, argv, bprm);
48978 if (retval < 0)
48979 - goto out;
48980 + goto out_fail;
48981 +
48982 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
48983 +
48984 + gr_handle_exec_args_compat(bprm, argv);
48985
48986 retval = search_binary_handler(bprm, regs);
48987 if (retval < 0)
48988 - goto out;
48989 + goto out_fail;
48990 +#ifdef CONFIG_GRKERNSEC
48991 + if (old_exec_file)
48992 + fput(old_exec_file);
48993 +#endif
48994
48995 /* execve succeeded */
48996 + increment_exec_counter();
48997 current->fs->in_exec = 0;
48998 current->in_execve = 0;
48999 acct_update_integrals(current);
49000 @@ -1541,6 +1636,14 @@ int compat_do_execve(char * filename,
49001 put_files_struct(displaced);
49002 return retval;
49003
49004 +out_fail:
49005 +#ifdef CONFIG_GRKERNSEC
49006 + current->acl = old_acl;
49007 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
49008 + fput(current->exec_file);
49009 + current->exec_file = old_exec_file;
49010 +#endif
49011 +
49012 out:
49013 if (bprm->mm) {
49014 acct_arg_size(bprm, 0);
49015 @@ -1711,6 +1814,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
49016 struct fdtable *fdt;
49017 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
49018
49019 + pax_track_stack();
49020 +
49021 if (n < 0)
49022 goto out_nofds;
49023
49024 @@ -2151,7 +2256,7 @@ asmlinkage long compat_sys_nfsservctl(int cmd,
49025 oldfs = get_fs();
49026 set_fs(KERNEL_DS);
49027 /* The __user pointer casts are valid because of the set_fs() */
49028 - err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
49029 + err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
49030 set_fs(oldfs);
49031
49032 if (err)
49033 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
49034 index 0adced2..bbb1b0d 100644
49035 --- a/fs/compat_binfmt_elf.c
49036 +++ b/fs/compat_binfmt_elf.c
49037 @@ -29,10 +29,12 @@
49038 #undef elfhdr
49039 #undef elf_phdr
49040 #undef elf_note
49041 +#undef elf_dyn
49042 #undef elf_addr_t
49043 #define elfhdr elf32_hdr
49044 #define elf_phdr elf32_phdr
49045 #define elf_note elf32_note
49046 +#define elf_dyn Elf32_Dyn
49047 #define elf_addr_t Elf32_Addr
49048
49049 /*
49050 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
49051 index d84e705..d8c364c 100644
49052 --- a/fs/compat_ioctl.c
49053 +++ b/fs/compat_ioctl.c
49054 @@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned
49055 up = (struct compat_video_spu_palette __user *) arg;
49056 err = get_user(palp, &up->palette);
49057 err |= get_user(length, &up->length);
49058 + if (err)
49059 + return -EFAULT;
49060
49061 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
49062 err = put_user(compat_ptr(palp), &up_native->palette);
49063 @@ -1513,7 +1515,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
49064 return -EFAULT;
49065 if (__get_user(udata, &ss32->iomem_base))
49066 return -EFAULT;
49067 - ss.iomem_base = compat_ptr(udata);
49068 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
49069 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
49070 __get_user(ss.port_high, &ss32->port_high))
49071 return -EFAULT;
49072 @@ -1809,7 +1811,7 @@ static int compat_ioctl_preallocate(struct file *file, unsigned long arg)
49073 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
49074 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
49075 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
49076 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
49077 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
49078 return -EFAULT;
49079
49080 return ioctl_preallocate(file, p);
49081 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
49082 index 8e48b52..f01ed91 100644
49083 --- a/fs/configfs/dir.c
49084 +++ b/fs/configfs/dir.c
49085 @@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
49086 }
49087 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
49088 struct configfs_dirent *next;
49089 - const char * name;
49090 + const unsigned char * name;
49091 + char d_name[sizeof(next->s_dentry->d_iname)];
49092 int len;
49093
49094 next = list_entry(p, struct configfs_dirent,
49095 @@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
49096 continue;
49097
49098 name = configfs_get_name(next);
49099 - len = strlen(name);
49100 + if (next->s_dentry && name == next->s_dentry->d_iname) {
49101 + len = next->s_dentry->d_name.len;
49102 + memcpy(d_name, name, len);
49103 + name = d_name;
49104 + } else
49105 + len = strlen(name);
49106 if (next->s_dentry)
49107 ino = next->s_dentry->d_inode->i_ino;
49108 else
49109 diff --git a/fs/dcache.c b/fs/dcache.c
49110 index 44c0aea..2529092 100644
49111 --- a/fs/dcache.c
49112 +++ b/fs/dcache.c
49113 @@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
49114
49115 static struct kmem_cache *dentry_cache __read_mostly;
49116
49117 -#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
49118 -
49119 /*
49120 * This is the single most critical data structure when it comes
49121 * to the dcache: the hashtable for lookups. Somebody should try
49122 @@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned long mempages)
49123 mempages -= reserve;
49124
49125 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
49126 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
49127 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
49128
49129 dcache_init();
49130 inode_init();
49131 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
49132 index 39c6ee8..dcee0f1 100644
49133 --- a/fs/debugfs/inode.c
49134 +++ b/fs/debugfs/inode.c
49135 @@ -269,7 +269,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
49136 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
49137 {
49138 return debugfs_create_file(name,
49139 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
49140 + S_IFDIR | S_IRWXU,
49141 +#else
49142 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
49143 +#endif
49144 parent, NULL, NULL);
49145 }
49146 EXPORT_SYMBOL_GPL(debugfs_create_dir);
49147 diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
49148 index c010ecf..a8d8c59 100644
49149 --- a/fs/dlm/lockspace.c
49150 +++ b/fs/dlm/lockspace.c
49151 @@ -148,7 +148,7 @@ static void lockspace_kobj_release(struct kobject *k)
49152 kfree(ls);
49153 }
49154
49155 -static struct sysfs_ops dlm_attr_ops = {
49156 +static const struct sysfs_ops dlm_attr_ops = {
49157 .show = dlm_attr_show,
49158 .store = dlm_attr_store,
49159 };
49160 diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
49161 index 7a5f1ac..62fa913 100644
49162 --- a/fs/ecryptfs/crypto.c
49163 +++ b/fs/ecryptfs/crypto.c
49164 @@ -418,17 +418,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
49165 rc);
49166 goto out;
49167 }
49168 - if (unlikely(ecryptfs_verbosity > 0)) {
49169 - ecryptfs_printk(KERN_DEBUG, "Encrypting extent "
49170 - "with iv:\n");
49171 - ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
49172 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
49173 - "encryption:\n");
49174 - ecryptfs_dump_hex((char *)
49175 - (page_address(page)
49176 - + (extent_offset * crypt_stat->extent_size)),
49177 - 8);
49178 - }
49179 rc = ecryptfs_encrypt_page_offset(crypt_stat, enc_extent_page, 0,
49180 page, (extent_offset
49181 * crypt_stat->extent_size),
49182 @@ -441,14 +430,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
49183 goto out;
49184 }
49185 rc = 0;
49186 - if (unlikely(ecryptfs_verbosity > 0)) {
49187 - ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16x]; "
49188 - "rc = [%d]\n", (extent_base + extent_offset),
49189 - rc);
49190 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
49191 - "encryption:\n");
49192 - ecryptfs_dump_hex((char *)(page_address(enc_extent_page)), 8);
49193 - }
49194 out:
49195 return rc;
49196 }
49197 @@ -545,17 +526,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
49198 rc);
49199 goto out;
49200 }
49201 - if (unlikely(ecryptfs_verbosity > 0)) {
49202 - ecryptfs_printk(KERN_DEBUG, "Decrypting extent "
49203 - "with iv:\n");
49204 - ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
49205 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
49206 - "decryption:\n");
49207 - ecryptfs_dump_hex((char *)
49208 - (page_address(enc_extent_page)
49209 - + (extent_offset * crypt_stat->extent_size)),
49210 - 8);
49211 - }
49212 rc = ecryptfs_decrypt_page_offset(crypt_stat, page,
49213 (extent_offset
49214 * crypt_stat->extent_size),
49215 @@ -569,16 +539,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
49216 goto out;
49217 }
49218 rc = 0;
49219 - if (unlikely(ecryptfs_verbosity > 0)) {
49220 - ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16x]; "
49221 - "rc = [%d]\n", (extent_base + extent_offset),
49222 - rc);
49223 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
49224 - "decryption:\n");
49225 - ecryptfs_dump_hex((char *)(page_address(page)
49226 - + (extent_offset
49227 - * crypt_stat->extent_size)), 8);
49228 - }
49229 out:
49230 return rc;
49231 }
49232 @@ -1455,6 +1415,25 @@ static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat)
49233 ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
49234 }
49235
49236 +void ecryptfs_i_size_init(const char *page_virt, struct inode *inode)
49237 +{
49238 + struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
49239 + struct ecryptfs_crypt_stat *crypt_stat;
49240 + u64 file_size;
49241 +
49242 + crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
49243 + mount_crypt_stat =
49244 + &ecryptfs_superblock_to_private(inode->i_sb)->mount_crypt_stat;
49245 + if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) {
49246 + file_size = i_size_read(ecryptfs_inode_to_lower(inode));
49247 + if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
49248 + file_size += crypt_stat->num_header_bytes_at_front;
49249 + } else
49250 + file_size = get_unaligned_be64(page_virt);
49251 + i_size_write(inode, (loff_t)file_size);
49252 + crypt_stat->flags |= ECRYPTFS_I_SIZE_INITIALIZED;
49253 +}
49254 +
49255 /**
49256 * ecryptfs_read_headers_virt
49257 * @page_virt: The virtual address into which to read the headers
49258 @@ -1485,6 +1464,8 @@ static int ecryptfs_read_headers_virt(char *page_virt,
49259 rc = -EINVAL;
49260 goto out;
49261 }
49262 + if (!(crypt_stat->flags & ECRYPTFS_I_SIZE_INITIALIZED))
49263 + ecryptfs_i_size_init(page_virt, ecryptfs_dentry->d_inode);
49264 offset += MAGIC_ECRYPTFS_MARKER_SIZE_BYTES;
49265 rc = ecryptfs_process_flags(crypt_stat, (page_virt + offset),
49266 &bytes_read);
49267 diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
49268 index 542f625..9685315 100644
49269 --- a/fs/ecryptfs/ecryptfs_kernel.h
49270 +++ b/fs/ecryptfs/ecryptfs_kernel.h
49271 @@ -270,6 +270,7 @@ struct ecryptfs_crypt_stat {
49272 #define ECRYPTFS_ENCFN_USE_MOUNT_FNEK 0x00001000
49273 #define ECRYPTFS_ENCFN_USE_FEK 0x00002000
49274 #define ECRYPTFS_UNLINK_SIGS 0x00004000
49275 +#define ECRYPTFS_I_SIZE_INITIALIZED 0x00008000
49276 u32 flags;
49277 unsigned int file_version;
49278 size_t iv_bytes;
49279 @@ -619,6 +620,7 @@ struct ecryptfs_open_req {
49280 int ecryptfs_interpose(struct dentry *hidden_dentry,
49281 struct dentry *this_dentry, struct super_block *sb,
49282 u32 flags);
49283 +void ecryptfs_i_size_init(const char *page_virt, struct inode *inode);
49284 int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
49285 struct dentry *lower_dentry,
49286 struct inode *ecryptfs_dir_inode,
49287 diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
49288 index 3015389..49129f4 100644
49289 --- a/fs/ecryptfs/file.c
49290 +++ b/fs/ecryptfs/file.c
49291 @@ -237,7 +237,8 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
49292 goto out_free;
49293 }
49294 rc = 0;
49295 - crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
49296 + crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
49297 + | ECRYPTFS_ENCRYPTED);
49298 mutex_unlock(&crypt_stat->cs_mutex);
49299 goto out;
49300 }
49301 @@ -347,7 +348,6 @@ const struct file_operations ecryptfs_main_fops = {
49302 #ifdef CONFIG_COMPAT
49303 .compat_ioctl = ecryptfs_compat_ioctl,
49304 #endif
49305 - .mmap = generic_file_mmap,
49306 .open = ecryptfs_open,
49307 .flush = ecryptfs_flush,
49308 .release = ecryptfs_release,
49309 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
49310 index 4434e8f..fa05803 100644
49311 --- a/fs/ecryptfs/inode.c
49312 +++ b/fs/ecryptfs/inode.c
49313 @@ -256,10 +256,8 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
49314 struct dentry *lower_dir_dentry;
49315 struct vfsmount *lower_mnt;
49316 struct inode *lower_inode;
49317 - struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
49318 struct ecryptfs_crypt_stat *crypt_stat;
49319 char *page_virt = NULL;
49320 - u64 file_size;
49321 int rc = 0;
49322
49323 lower_dir_dentry = lower_dentry->d_parent;
49324 @@ -334,18 +332,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
49325 }
49326 crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR;
49327 }
49328 - mount_crypt_stat = &ecryptfs_superblock_to_private(
49329 - ecryptfs_dentry->d_sb)->mount_crypt_stat;
49330 - if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) {
49331 - if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
49332 - file_size = (crypt_stat->num_header_bytes_at_front
49333 - + i_size_read(lower_dentry->d_inode));
49334 - else
49335 - file_size = i_size_read(lower_dentry->d_inode);
49336 - } else {
49337 - file_size = get_unaligned_be64(page_virt);
49338 - }
49339 - i_size_write(ecryptfs_dentry->d_inode, (loff_t)file_size);
49340 + ecryptfs_i_size_init(page_virt, ecryptfs_dentry->d_inode);
49341 out_free_kmem:
49342 kmem_cache_free(ecryptfs_header_cache_2, page_virt);
49343 goto out;
49344 @@ -660,7 +647,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
49345 old_fs = get_fs();
49346 set_fs(get_ds());
49347 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
49348 - (char __user *)lower_buf,
49349 + (char __force_user *)lower_buf,
49350 lower_bufsiz);
49351 set_fs(old_fs);
49352 if (rc < 0)
49353 @@ -706,7 +693,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
49354 }
49355 old_fs = get_fs();
49356 set_fs(get_ds());
49357 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
49358 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
49359 set_fs(old_fs);
49360 if (rc < 0)
49361 goto out_free;
49362 @@ -964,7 +951,8 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
49363 goto out;
49364 }
49365 rc = 0;
49366 - crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
49367 + crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
49368 + | ECRYPTFS_ENCRYPTED);
49369 }
49370 }
49371 mutex_unlock(&crypt_stat->cs_mutex);
49372 diff --git a/fs/exec.c b/fs/exec.c
49373 index 86fafc6..23b09e5 100644
49374 --- a/fs/exec.c
49375 +++ b/fs/exec.c
49376 @@ -56,12 +56,28 @@
49377 #include <linux/fsnotify.h>
49378 #include <linux/fs_struct.h>
49379 #include <linux/pipe_fs_i.h>
49380 +#include <linux/random.h>
49381 +#include <linux/seq_file.h>
49382 +
49383 +#ifdef CONFIG_PAX_REFCOUNT
49384 +#include <linux/kallsyms.h>
49385 +#include <linux/kdebug.h>
49386 +#endif
49387
49388 #include <asm/uaccess.h>
49389 #include <asm/mmu_context.h>
49390 #include <asm/tlb.h>
49391 #include "internal.h"
49392
49393 +#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
49394 +void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
49395 +#endif
49396 +
49397 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
49398 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
49399 +EXPORT_SYMBOL(pax_set_initial_flags_func);
49400 +#endif
49401 +
49402 int core_uses_pid;
49403 char core_pattern[CORENAME_MAX_SIZE] = "core";
49404 unsigned int core_pipe_limit;
49405 @@ -178,18 +194,10 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
49406 int write)
49407 {
49408 struct page *page;
49409 - int ret;
49410
49411 -#ifdef CONFIG_STACK_GROWSUP
49412 - if (write) {
49413 - ret = expand_stack_downwards(bprm->vma, pos);
49414 - if (ret < 0)
49415 - return NULL;
49416 - }
49417 -#endif
49418 - ret = get_user_pages(current, bprm->mm, pos,
49419 - 1, write, 1, &page, NULL);
49420 - if (ret <= 0)
49421 + if (0 > expand_stack_downwards(bprm->vma, pos))
49422 + return NULL;
49423 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
49424 return NULL;
49425
49426 if (write) {
49427 @@ -205,6 +213,17 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
49428 if (size <= ARG_MAX)
49429 return page;
49430
49431 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49432 + // only allow 512KB for argv+env on suid/sgid binaries
49433 + // to prevent easy ASLR exhaustion
49434 + if (((bprm->cred->euid != current_euid()) ||
49435 + (bprm->cred->egid != current_egid())) &&
49436 + (size > (512 * 1024))) {
49437 + put_page(page);
49438 + return NULL;
49439 + }
49440 +#endif
49441 +
49442 /*
49443 * Limit to 1/4-th the stack size for the argv+env strings.
49444 * This ensures that:
49445 @@ -263,6 +282,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
49446 vma->vm_end = STACK_TOP_MAX;
49447 vma->vm_start = vma->vm_end - PAGE_SIZE;
49448 vma->vm_flags = VM_STACK_FLAGS;
49449 +
49450 +#ifdef CONFIG_PAX_SEGMEXEC
49451 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
49452 +#endif
49453 +
49454 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
49455
49456 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
49457 @@ -276,6 +300,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
49458 mm->stack_vm = mm->total_vm = 1;
49459 up_write(&mm->mmap_sem);
49460 bprm->p = vma->vm_end - sizeof(void *);
49461 +
49462 +#ifdef CONFIG_PAX_RANDUSTACK
49463 + if (randomize_va_space)
49464 + bprm->p ^= random32() & ~PAGE_MASK;
49465 +#endif
49466 +
49467 return 0;
49468 err:
49469 up_write(&mm->mmap_sem);
49470 @@ -510,7 +540,7 @@ int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
49471 int r;
49472 mm_segment_t oldfs = get_fs();
49473 set_fs(KERNEL_DS);
49474 - r = copy_strings(argc, (char __user * __user *)argv, bprm);
49475 + r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
49476 set_fs(oldfs);
49477 return r;
49478 }
49479 @@ -540,7 +570,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
49480 unsigned long new_end = old_end - shift;
49481 struct mmu_gather *tlb;
49482
49483 - BUG_ON(new_start > new_end);
49484 + if (new_start >= new_end || new_start < mmap_min_addr)
49485 + return -ENOMEM;
49486
49487 /*
49488 * ensure there are no vmas between where we want to go
49489 @@ -549,6 +580,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
49490 if (vma != find_vma(mm, new_start))
49491 return -EFAULT;
49492
49493 +#ifdef CONFIG_PAX_SEGMEXEC
49494 + BUG_ON(pax_find_mirror_vma(vma));
49495 +#endif
49496 +
49497 /*
49498 * cover the whole range: [new_start, old_end)
49499 */
49500 @@ -630,10 +665,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
49501 stack_top = arch_align_stack(stack_top);
49502 stack_top = PAGE_ALIGN(stack_top);
49503
49504 - if (unlikely(stack_top < mmap_min_addr) ||
49505 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
49506 - return -ENOMEM;
49507 -
49508 stack_shift = vma->vm_end - stack_top;
49509
49510 bprm->p -= stack_shift;
49511 @@ -645,6 +676,14 @@ int setup_arg_pages(struct linux_binprm *bprm,
49512 bprm->exec -= stack_shift;
49513
49514 down_write(&mm->mmap_sem);
49515 +
49516 + /* Move stack pages down in memory. */
49517 + if (stack_shift) {
49518 + ret = shift_arg_pages(vma, stack_shift);
49519 + if (ret)
49520 + goto out_unlock;
49521 + }
49522 +
49523 vm_flags = VM_STACK_FLAGS;
49524
49525 /*
49526 @@ -658,19 +697,24 @@ int setup_arg_pages(struct linux_binprm *bprm,
49527 vm_flags &= ~VM_EXEC;
49528 vm_flags |= mm->def_flags;
49529
49530 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
49531 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
49532 + vm_flags &= ~VM_EXEC;
49533 +
49534 +#ifdef CONFIG_PAX_MPROTECT
49535 + if (mm->pax_flags & MF_PAX_MPROTECT)
49536 + vm_flags &= ~VM_MAYEXEC;
49537 +#endif
49538 +
49539 + }
49540 +#endif
49541 +
49542 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
49543 vm_flags);
49544 if (ret)
49545 goto out_unlock;
49546 BUG_ON(prev != vma);
49547
49548 - /* Move stack pages down in memory. */
49549 - if (stack_shift) {
49550 - ret = shift_arg_pages(vma, stack_shift);
49551 - if (ret)
49552 - goto out_unlock;
49553 - }
49554 -
49555 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
49556 stack_size = vma->vm_end - vma->vm_start;
49557 /*
49558 @@ -744,7 +788,7 @@ int kernel_read(struct file *file, loff_t offset,
49559 old_fs = get_fs();
49560 set_fs(get_ds());
49561 /* The cast to a user pointer is valid due to the set_fs() */
49562 - result = vfs_read(file, (void __user *)addr, count, &pos);
49563 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
49564 set_fs(old_fs);
49565 return result;
49566 }
49567 @@ -985,6 +1029,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
49568 perf_event_comm(tsk);
49569 }
49570
49571 +static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
49572 +{
49573 + int i, ch;
49574 +
49575 + /* Copies the binary name from after last slash */
49576 + for (i = 0; (ch = *(fn++)) != '\0';) {
49577 + if (ch == '/')
49578 + i = 0; /* overwrite what we wrote */
49579 + else
49580 + if (i < len - 1)
49581 + tcomm[i++] = ch;
49582 + }
49583 + tcomm[i] = '\0';
49584 +}
49585 +
49586 int flush_old_exec(struct linux_binprm * bprm)
49587 {
49588 int retval;
49589 @@ -999,6 +1058,7 @@ int flush_old_exec(struct linux_binprm * bprm)
49590
49591 set_mm_exe_file(bprm->mm, bprm->file);
49592
49593 + filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
49594 /*
49595 * Release all of the old mmap stuff
49596 */
49597 @@ -1023,10 +1083,6 @@ EXPORT_SYMBOL(flush_old_exec);
49598
49599 void setup_new_exec(struct linux_binprm * bprm)
49600 {
49601 - int i, ch;
49602 - char * name;
49603 - char tcomm[sizeof(current->comm)];
49604 -
49605 arch_pick_mmap_layout(current->mm);
49606
49607 /* This is the point of no return */
49608 @@ -1037,18 +1093,7 @@ void setup_new_exec(struct linux_binprm * bprm)
49609 else
49610 set_dumpable(current->mm, suid_dumpable);
49611
49612 - name = bprm->filename;
49613 -
49614 - /* Copies the binary name from after last slash */
49615 - for (i=0; (ch = *(name++)) != '\0';) {
49616 - if (ch == '/')
49617 - i = 0; /* overwrite what we wrote */
49618 - else
49619 - if (i < (sizeof(tcomm) - 1))
49620 - tcomm[i++] = ch;
49621 - }
49622 - tcomm[i] = '\0';
49623 - set_task_comm(current, tcomm);
49624 + set_task_comm(current, bprm->tcomm);
49625
49626 /* Set the new mm task size. We have to do that late because it may
49627 * depend on TIF_32BIT which is only updated in flush_thread() on
49628 @@ -1152,7 +1197,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
49629 }
49630 rcu_read_unlock();
49631
49632 - if (p->fs->users > n_fs) {
49633 + if (atomic_read(&p->fs->users) > n_fs) {
49634 bprm->unsafe |= LSM_UNSAFE_SHARE;
49635 } else {
49636 res = -EAGAIN;
49637 @@ -1339,6 +1384,21 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
49638
49639 EXPORT_SYMBOL(search_binary_handler);
49640
49641 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49642 +DEFINE_PER_CPU(u64, exec_counter);
49643 +static int __init init_exec_counters(void)
49644 +{
49645 + unsigned int cpu;
49646 +
49647 + for_each_possible_cpu(cpu) {
49648 + per_cpu(exec_counter, cpu) = (u64)cpu;
49649 + }
49650 +
49651 + return 0;
49652 +}
49653 +early_initcall(init_exec_counters);
49654 +#endif
49655 +
49656 /*
49657 * sys_execve() executes a new program.
49658 */
49659 @@ -1347,11 +1407,35 @@ int do_execve(char * filename,
49660 char __user *__user *envp,
49661 struct pt_regs * regs)
49662 {
49663 +#ifdef CONFIG_GRKERNSEC
49664 + struct file *old_exec_file;
49665 + struct acl_subject_label *old_acl;
49666 + struct rlimit old_rlim[RLIM_NLIMITS];
49667 +#endif
49668 struct linux_binprm *bprm;
49669 struct file *file;
49670 struct files_struct *displaced;
49671 bool clear_in_exec;
49672 int retval;
49673 + const struct cred *cred = current_cred();
49674 +
49675 + /*
49676 + * We move the actual failure in case of RLIMIT_NPROC excess from
49677 + * set*uid() to execve() because too many poorly written programs
49678 + * don't check setuid() return code. Here we additionally recheck
49679 + * whether NPROC limit is still exceeded.
49680 + */
49681 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
49682 +
49683 + if ((current->flags & PF_NPROC_EXCEEDED) &&
49684 + atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
49685 + retval = -EAGAIN;
49686 + goto out_ret;
49687 + }
49688 +
49689 + /* We're below the limit (still or again), so we don't want to make
49690 + * further execve() calls fail. */
49691 + current->flags &= ~PF_NPROC_EXCEEDED;
49692
49693 retval = unshare_files(&displaced);
49694 if (retval)
49695 @@ -1377,12 +1461,27 @@ int do_execve(char * filename,
49696 if (IS_ERR(file))
49697 goto out_unmark;
49698
49699 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
49700 + retval = -EPERM;
49701 + goto out_file;
49702 + }
49703 +
49704 sched_exec();
49705
49706 bprm->file = file;
49707 bprm->filename = filename;
49708 bprm->interp = filename;
49709
49710 + if (gr_process_user_ban()) {
49711 + retval = -EPERM;
49712 + goto out_file;
49713 + }
49714 +
49715 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
49716 + retval = -EACCES;
49717 + goto out_file;
49718 + }
49719 +
49720 retval = bprm_mm_init(bprm);
49721 if (retval)
49722 goto out_file;
49723 @@ -1399,25 +1498,66 @@ int do_execve(char * filename,
49724 if (retval < 0)
49725 goto out;
49726
49727 +#ifdef CONFIG_GRKERNSEC
49728 + old_acl = current->acl;
49729 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
49730 + old_exec_file = current->exec_file;
49731 + get_file(file);
49732 + current->exec_file = file;
49733 +#endif
49734 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49735 + /* limit suid stack to 8MB
49736 + we saved the old limits above and will restore them if this exec fails
49737 + */
49738 + if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
49739 + (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
49740 + current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
49741 +#endif
49742 +
49743 + if (!gr_tpe_allow(file)) {
49744 + retval = -EACCES;
49745 + goto out_fail;
49746 + }
49747 +
49748 + if (gr_check_crash_exec(file)) {
49749 + retval = -EACCES;
49750 + goto out_fail;
49751 + }
49752 +
49753 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
49754 + bprm->unsafe);
49755 + if (retval < 0)
49756 + goto out_fail;
49757 +
49758 retval = copy_strings_kernel(1, &bprm->filename, bprm);
49759 if (retval < 0)
49760 - goto out;
49761 + goto out_fail;
49762
49763 bprm->exec = bprm->p;
49764 retval = copy_strings(bprm->envc, envp, bprm);
49765 if (retval < 0)
49766 - goto out;
49767 + goto out_fail;
49768
49769 retval = copy_strings(bprm->argc, argv, bprm);
49770 if (retval < 0)
49771 - goto out;
49772 + goto out_fail;
49773 +
49774 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
49775 +
49776 + gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
49777
49778 current->flags &= ~PF_KTHREAD;
49779 retval = search_binary_handler(bprm,regs);
49780 if (retval < 0)
49781 - goto out;
49782 + goto out_fail;
49783 +#ifdef CONFIG_GRKERNSEC
49784 + if (old_exec_file)
49785 + fput(old_exec_file);
49786 +#endif
49787
49788 /* execve succeeded */
49789 +
49790 + increment_exec_counter();
49791 current->fs->in_exec = 0;
49792 current->in_execve = 0;
49793 acct_update_integrals(current);
49794 @@ -1426,6 +1566,14 @@ int do_execve(char * filename,
49795 put_files_struct(displaced);
49796 return retval;
49797
49798 +out_fail:
49799 +#ifdef CONFIG_GRKERNSEC
49800 + current->acl = old_acl;
49801 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
49802 + fput(current->exec_file);
49803 + current->exec_file = old_exec_file;
49804 +#endif
49805 +
49806 out:
49807 if (bprm->mm) {
49808 acct_arg_size(bprm, 0);
49809 @@ -1591,6 +1739,220 @@ out:
49810 return ispipe;
49811 }
49812
49813 +int pax_check_flags(unsigned long *flags)
49814 +{
49815 + int retval = 0;
49816 +
49817 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
49818 + if (*flags & MF_PAX_SEGMEXEC)
49819 + {
49820 + *flags &= ~MF_PAX_SEGMEXEC;
49821 + retval = -EINVAL;
49822 + }
49823 +#endif
49824 +
49825 + if ((*flags & MF_PAX_PAGEEXEC)
49826 +
49827 +#ifdef CONFIG_PAX_PAGEEXEC
49828 + && (*flags & MF_PAX_SEGMEXEC)
49829 +#endif
49830 +
49831 + )
49832 + {
49833 + *flags &= ~MF_PAX_PAGEEXEC;
49834 + retval = -EINVAL;
49835 + }
49836 +
49837 + if ((*flags & MF_PAX_MPROTECT)
49838 +
49839 +#ifdef CONFIG_PAX_MPROTECT
49840 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
49841 +#endif
49842 +
49843 + )
49844 + {
49845 + *flags &= ~MF_PAX_MPROTECT;
49846 + retval = -EINVAL;
49847 + }
49848 +
49849 + if ((*flags & MF_PAX_EMUTRAMP)
49850 +
49851 +#ifdef CONFIG_PAX_EMUTRAMP
49852 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
49853 +#endif
49854 +
49855 + )
49856 + {
49857 + *flags &= ~MF_PAX_EMUTRAMP;
49858 + retval = -EINVAL;
49859 + }
49860 +
49861 + return retval;
49862 +}
49863 +
49864 +EXPORT_SYMBOL(pax_check_flags);
49865 +
49866 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
49867 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
49868 +{
49869 + struct task_struct *tsk = current;
49870 + struct mm_struct *mm = current->mm;
49871 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
49872 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
49873 + char *path_exec = NULL;
49874 + char *path_fault = NULL;
49875 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
49876 +
49877 + if (buffer_exec && buffer_fault) {
49878 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
49879 +
49880 + down_read(&mm->mmap_sem);
49881 + vma = mm->mmap;
49882 + while (vma && (!vma_exec || !vma_fault)) {
49883 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
49884 + vma_exec = vma;
49885 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
49886 + vma_fault = vma;
49887 + vma = vma->vm_next;
49888 + }
49889 + if (vma_exec) {
49890 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
49891 + if (IS_ERR(path_exec))
49892 + path_exec = "<path too long>";
49893 + else {
49894 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
49895 + if (path_exec) {
49896 + *path_exec = 0;
49897 + path_exec = buffer_exec;
49898 + } else
49899 + path_exec = "<path too long>";
49900 + }
49901 + }
49902 + if (vma_fault) {
49903 + start = vma_fault->vm_start;
49904 + end = vma_fault->vm_end;
49905 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
49906 + if (vma_fault->vm_file) {
49907 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
49908 + if (IS_ERR(path_fault))
49909 + path_fault = "<path too long>";
49910 + else {
49911 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
49912 + if (path_fault) {
49913 + *path_fault = 0;
49914 + path_fault = buffer_fault;
49915 + } else
49916 + path_fault = "<path too long>";
49917 + }
49918 + } else
49919 + path_fault = "<anonymous mapping>";
49920 + }
49921 + up_read(&mm->mmap_sem);
49922 + }
49923 + if (tsk->signal->curr_ip)
49924 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
49925 + else
49926 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
49927 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
49928 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
49929 + task_uid(tsk), task_euid(tsk), pc, sp);
49930 + free_page((unsigned long)buffer_exec);
49931 + free_page((unsigned long)buffer_fault);
49932 + pax_report_insns(regs, pc, sp);
49933 + do_coredump(SIGKILL, SIGKILL, regs);
49934 +}
49935 +#endif
49936 +
49937 +#ifdef CONFIG_PAX_REFCOUNT
49938 +void pax_report_refcount_overflow(struct pt_regs *regs)
49939 +{
49940 + if (current->signal->curr_ip)
49941 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
49942 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
49943 + else
49944 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
49945 + current->comm, task_pid_nr(current), current_uid(), current_euid());
49946 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
49947 + show_regs(regs);
49948 + force_sig_specific(SIGKILL, current);
49949 +}
49950 +#endif
49951 +
49952 +#ifdef CONFIG_PAX_USERCOPY
49953 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
49954 +int object_is_on_stack(const void *obj, unsigned long len)
49955 +{
49956 + const void * const stack = task_stack_page(current);
49957 + const void * const stackend = stack + THREAD_SIZE;
49958 +
49959 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
49960 + const void *frame = NULL;
49961 + const void *oldframe;
49962 +#endif
49963 +
49964 + if (obj + len < obj)
49965 + return -1;
49966 +
49967 + if (obj + len <= stack || stackend <= obj)
49968 + return 0;
49969 +
49970 + if (obj < stack || stackend < obj + len)
49971 + return -1;
49972 +
49973 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
49974 + oldframe = __builtin_frame_address(1);
49975 + if (oldframe)
49976 + frame = __builtin_frame_address(2);
49977 + /*
49978 + low ----------------------------------------------> high
49979 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
49980 + ^----------------^
49981 + allow copies only within here
49982 + */
49983 + while (stack <= frame && frame < stackend) {
49984 + /* if obj + len extends past the last frame, this
49985 + check won't pass and the next frame will be 0,
49986 + causing us to bail out and correctly report
49987 + the copy as invalid
49988 + */
49989 + if (obj + len <= frame)
49990 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
49991 + oldframe = frame;
49992 + frame = *(const void * const *)frame;
49993 + }
49994 + return -1;
49995 +#else
49996 + return 1;
49997 +#endif
49998 +}
49999 +
50000 +
50001 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
50002 +{
50003 + if (current->signal->curr_ip)
50004 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
50005 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
50006 + else
50007 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
50008 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
50009 +
50010 + dump_stack();
50011 + gr_handle_kernel_exploit();
50012 + do_group_exit(SIGKILL);
50013 +}
50014 +#endif
50015 +
50016 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
50017 +void pax_track_stack(void)
50018 +{
50019 + unsigned long sp = (unsigned long)&sp;
50020 + if (sp < current_thread_info()->lowest_stack &&
50021 + sp > (unsigned long)task_stack_page(current))
50022 + current_thread_info()->lowest_stack = sp;
50023 +}
50024 +EXPORT_SYMBOL(pax_track_stack);
50025 +#endif
50026 +
50027 static int zap_process(struct task_struct *start)
50028 {
50029 struct task_struct *t;
50030 @@ -1793,17 +2155,17 @@ static void wait_for_dump_helpers(struct file *file)
50031 pipe = file->f_path.dentry->d_inode->i_pipe;
50032
50033 pipe_lock(pipe);
50034 - pipe->readers++;
50035 - pipe->writers--;
50036 + atomic_inc(&pipe->readers);
50037 + atomic_dec(&pipe->writers);
50038
50039 - while ((pipe->readers > 1) && (!signal_pending(current))) {
50040 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
50041 wake_up_interruptible_sync(&pipe->wait);
50042 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
50043 pipe_wait(pipe);
50044 }
50045
50046 - pipe->readers--;
50047 - pipe->writers++;
50048 + atomic_dec(&pipe->readers);
50049 + atomic_inc(&pipe->writers);
50050 pipe_unlock(pipe);
50051
50052 }
50053 @@ -1826,10 +2188,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
50054 char **helper_argv = NULL;
50055 int helper_argc = 0;
50056 int dump_count = 0;
50057 - static atomic_t core_dump_count = ATOMIC_INIT(0);
50058 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
50059
50060 audit_core_dumps(signr);
50061
50062 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
50063 + gr_handle_brute_attach(current, mm->flags);
50064 +
50065 binfmt = mm->binfmt;
50066 if (!binfmt || !binfmt->core_dump)
50067 goto fail;
50068 @@ -1874,6 +2239,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
50069 */
50070 clear_thread_flag(TIF_SIGPENDING);
50071
50072 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
50073 +
50074 /*
50075 * lock_kernel() because format_corename() is controlled by sysctl, which
50076 * uses lock_kernel()
50077 @@ -1908,7 +2275,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
50078 goto fail_unlock;
50079 }
50080
50081 - dump_count = atomic_inc_return(&core_dump_count);
50082 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
50083 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
50084 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
50085 task_tgid_vnr(current), current->comm);
50086 @@ -1972,7 +2339,7 @@ close_fail:
50087 filp_close(file, NULL);
50088 fail_dropcount:
50089 if (dump_count)
50090 - atomic_dec(&core_dump_count);
50091 + atomic_dec_unchecked(&core_dump_count);
50092 fail_unlock:
50093 if (helper_argv)
50094 argv_free(helper_argv);
50095 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
50096 index 7f8d2e5..a1abdbb 100644
50097 --- a/fs/ext2/balloc.c
50098 +++ b/fs/ext2/balloc.c
50099 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
50100
50101 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
50102 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
50103 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
50104 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
50105 sbi->s_resuid != current_fsuid() &&
50106 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
50107 return 0;
50108 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
50109 index 27967f9..9f2a5fb 100644
50110 --- a/fs/ext3/balloc.c
50111 +++ b/fs/ext3/balloc.c
50112 @@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
50113
50114 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
50115 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
50116 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
50117 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
50118 sbi->s_resuid != current_fsuid() &&
50119 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
50120 return 0;
50121 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
50122 index e85b63c..80398e6 100644
50123 --- a/fs/ext4/balloc.c
50124 +++ b/fs/ext4/balloc.c
50125 @@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
50126 /* Hm, nope. Are (enough) root reserved blocks available? */
50127 if (sbi->s_resuid == current_fsuid() ||
50128 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
50129 - capable(CAP_SYS_RESOURCE)) {
50130 + capable_nolog(CAP_SYS_RESOURCE)) {
50131 if (free_blocks >= (nblocks + dirty_blocks))
50132 return 1;
50133 }
50134 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
50135 index 67c46ed..1f237e5 100644
50136 --- a/fs/ext4/ext4.h
50137 +++ b/fs/ext4/ext4.h
50138 @@ -1077,19 +1077,19 @@ struct ext4_sb_info {
50139
50140 /* stats for buddy allocator */
50141 spinlock_t s_mb_pa_lock;
50142 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
50143 - atomic_t s_bal_success; /* we found long enough chunks */
50144 - atomic_t s_bal_allocated; /* in blocks */
50145 - atomic_t s_bal_ex_scanned; /* total extents scanned */
50146 - atomic_t s_bal_goals; /* goal hits */
50147 - atomic_t s_bal_breaks; /* too long searches */
50148 - atomic_t s_bal_2orders; /* 2^order hits */
50149 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
50150 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
50151 + atomic_unchecked_t s_bal_allocated; /* in blocks */
50152 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
50153 + atomic_unchecked_t s_bal_goals; /* goal hits */
50154 + atomic_unchecked_t s_bal_breaks; /* too long searches */
50155 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
50156 spinlock_t s_bal_lock;
50157 unsigned long s_mb_buddies_generated;
50158 unsigned long long s_mb_generation_time;
50159 - atomic_t s_mb_lost_chunks;
50160 - atomic_t s_mb_preallocated;
50161 - atomic_t s_mb_discarded;
50162 + atomic_unchecked_t s_mb_lost_chunks;
50163 + atomic_unchecked_t s_mb_preallocated;
50164 + atomic_unchecked_t s_mb_discarded;
50165 atomic_t s_lock_busy;
50166
50167 /* locality groups */
50168 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
50169 index 2a60541..7439d61 100644
50170 --- a/fs/ext4/file.c
50171 +++ b/fs/ext4/file.c
50172 @@ -122,8 +122,8 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
50173 cp = d_path(&path, buf, sizeof(buf));
50174 path_put(&path);
50175 if (!IS_ERR(cp)) {
50176 - memcpy(sbi->s_es->s_last_mounted, cp,
50177 - sizeof(sbi->s_es->s_last_mounted));
50178 + strlcpy(sbi->s_es->s_last_mounted, cp,
50179 + sizeof(sbi->s_es->s_last_mounted));
50180 sb->s_dirt = 1;
50181 }
50182 }
50183 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
50184 index 42bac1b..0aab9d8 100644
50185 --- a/fs/ext4/mballoc.c
50186 +++ b/fs/ext4/mballoc.c
50187 @@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
50188 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
50189
50190 if (EXT4_SB(sb)->s_mb_stats)
50191 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
50192 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
50193
50194 break;
50195 }
50196 @@ -2131,7 +2131,7 @@ repeat:
50197 ac->ac_status = AC_STATUS_CONTINUE;
50198 ac->ac_flags |= EXT4_MB_HINT_FIRST;
50199 cr = 3;
50200 - atomic_inc(&sbi->s_mb_lost_chunks);
50201 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
50202 goto repeat;
50203 }
50204 }
50205 @@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
50206 ext4_grpblk_t counters[16];
50207 } sg;
50208
50209 + pax_track_stack();
50210 +
50211 group--;
50212 if (group == 0)
50213 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
50214 @@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *sb)
50215 if (sbi->s_mb_stats) {
50216 printk(KERN_INFO
50217 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
50218 - atomic_read(&sbi->s_bal_allocated),
50219 - atomic_read(&sbi->s_bal_reqs),
50220 - atomic_read(&sbi->s_bal_success));
50221 + atomic_read_unchecked(&sbi->s_bal_allocated),
50222 + atomic_read_unchecked(&sbi->s_bal_reqs),
50223 + atomic_read_unchecked(&sbi->s_bal_success));
50224 printk(KERN_INFO
50225 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
50226 "%u 2^N hits, %u breaks, %u lost\n",
50227 - atomic_read(&sbi->s_bal_ex_scanned),
50228 - atomic_read(&sbi->s_bal_goals),
50229 - atomic_read(&sbi->s_bal_2orders),
50230 - atomic_read(&sbi->s_bal_breaks),
50231 - atomic_read(&sbi->s_mb_lost_chunks));
50232 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
50233 + atomic_read_unchecked(&sbi->s_bal_goals),
50234 + atomic_read_unchecked(&sbi->s_bal_2orders),
50235 + atomic_read_unchecked(&sbi->s_bal_breaks),
50236 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
50237 printk(KERN_INFO
50238 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
50239 sbi->s_mb_buddies_generated++,
50240 sbi->s_mb_generation_time);
50241 printk(KERN_INFO
50242 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
50243 - atomic_read(&sbi->s_mb_preallocated),
50244 - atomic_read(&sbi->s_mb_discarded));
50245 + atomic_read_unchecked(&sbi->s_mb_preallocated),
50246 + atomic_read_unchecked(&sbi->s_mb_discarded));
50247 }
50248
50249 free_percpu(sbi->s_locality_groups);
50250 @@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
50251 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
50252
50253 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
50254 - atomic_inc(&sbi->s_bal_reqs);
50255 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
50256 + atomic_inc_unchecked(&sbi->s_bal_reqs);
50257 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
50258 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
50259 - atomic_inc(&sbi->s_bal_success);
50260 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
50261 + atomic_inc_unchecked(&sbi->s_bal_success);
50262 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
50263 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
50264 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
50265 - atomic_inc(&sbi->s_bal_goals);
50266 + atomic_inc_unchecked(&sbi->s_bal_goals);
50267 if (ac->ac_found > sbi->s_mb_max_to_scan)
50268 - atomic_inc(&sbi->s_bal_breaks);
50269 + atomic_inc_unchecked(&sbi->s_bal_breaks);
50270 }
50271
50272 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
50273 @@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
50274 trace_ext4_mb_new_inode_pa(ac, pa);
50275
50276 ext4_mb_use_inode_pa(ac, pa);
50277 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
50278 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
50279
50280 ei = EXT4_I(ac->ac_inode);
50281 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
50282 @@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
50283 trace_ext4_mb_new_group_pa(ac, pa);
50284
50285 ext4_mb_use_group_pa(ac, pa);
50286 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
50287 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
50288
50289 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
50290 lg = ac->ac_lg;
50291 @@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
50292 * from the bitmap and continue.
50293 */
50294 }
50295 - atomic_add(free, &sbi->s_mb_discarded);
50296 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
50297
50298 return err;
50299 }
50300 @@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
50301 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
50302 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
50303 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
50304 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
50305 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
50306
50307 if (ac) {
50308 ac->ac_sb = sb;
50309 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
50310 index f1e7077..edd86b2 100644
50311 --- a/fs/ext4/super.c
50312 +++ b/fs/ext4/super.c
50313 @@ -2286,7 +2286,7 @@ static void ext4_sb_release(struct kobject *kobj)
50314 }
50315
50316
50317 -static struct sysfs_ops ext4_attr_ops = {
50318 +static const struct sysfs_ops ext4_attr_ops = {
50319 .show = ext4_attr_show,
50320 .store = ext4_attr_store,
50321 };
50322 diff --git a/fs/fcntl.c b/fs/fcntl.c
50323 index 97e01dc..e9aab2d 100644
50324 --- a/fs/fcntl.c
50325 +++ b/fs/fcntl.c
50326 @@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
50327 if (err)
50328 return err;
50329
50330 + if (gr_handle_chroot_fowner(pid, type))
50331 + return -ENOENT;
50332 + if (gr_check_protected_task_fowner(pid, type))
50333 + return -EACCES;
50334 +
50335 f_modown(filp, pid, type, force);
50336 return 0;
50337 }
50338 @@ -265,7 +270,7 @@ pid_t f_getown(struct file *filp)
50339
50340 static int f_setown_ex(struct file *filp, unsigned long arg)
50341 {
50342 - struct f_owner_ex * __user owner_p = (void * __user)arg;
50343 + struct f_owner_ex __user *owner_p = (void __user *)arg;
50344 struct f_owner_ex owner;
50345 struct pid *pid;
50346 int type;
50347 @@ -305,7 +310,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
50348
50349 static int f_getown_ex(struct file *filp, unsigned long arg)
50350 {
50351 - struct f_owner_ex * __user owner_p = (void * __user)arg;
50352 + struct f_owner_ex __user *owner_p = (void __user *)arg;
50353 struct f_owner_ex owner;
50354 int ret = 0;
50355
50356 @@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
50357 switch (cmd) {
50358 case F_DUPFD:
50359 case F_DUPFD_CLOEXEC:
50360 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
50361 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
50362 break;
50363 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
50364 diff --git a/fs/fifo.c b/fs/fifo.c
50365 index f8f97b8..b1f2259 100644
50366 --- a/fs/fifo.c
50367 +++ b/fs/fifo.c
50368 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
50369 */
50370 filp->f_op = &read_pipefifo_fops;
50371 pipe->r_counter++;
50372 - if (pipe->readers++ == 0)
50373 + if (atomic_inc_return(&pipe->readers) == 1)
50374 wake_up_partner(inode);
50375
50376 - if (!pipe->writers) {
50377 + if (!atomic_read(&pipe->writers)) {
50378 if ((filp->f_flags & O_NONBLOCK)) {
50379 /* suppress POLLHUP until we have
50380 * seen a writer */
50381 @@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
50382 * errno=ENXIO when there is no process reading the FIFO.
50383 */
50384 ret = -ENXIO;
50385 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
50386 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
50387 goto err;
50388
50389 filp->f_op = &write_pipefifo_fops;
50390 pipe->w_counter++;
50391 - if (!pipe->writers++)
50392 + if (atomic_inc_return(&pipe->writers) == 1)
50393 wake_up_partner(inode);
50394
50395 - if (!pipe->readers) {
50396 + if (!atomic_read(&pipe->readers)) {
50397 wait_for_partner(inode, &pipe->r_counter);
50398 if (signal_pending(current))
50399 goto err_wr;
50400 @@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
50401 */
50402 filp->f_op = &rdwr_pipefifo_fops;
50403
50404 - pipe->readers++;
50405 - pipe->writers++;
50406 + atomic_inc(&pipe->readers);
50407 + atomic_inc(&pipe->writers);
50408 pipe->r_counter++;
50409 pipe->w_counter++;
50410 - if (pipe->readers == 1 || pipe->writers == 1)
50411 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
50412 wake_up_partner(inode);
50413 break;
50414
50415 @@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
50416 return 0;
50417
50418 err_rd:
50419 - if (!--pipe->readers)
50420 + if (atomic_dec_and_test(&pipe->readers))
50421 wake_up_interruptible(&pipe->wait);
50422 ret = -ERESTARTSYS;
50423 goto err;
50424
50425 err_wr:
50426 - if (!--pipe->writers)
50427 + if (atomic_dec_and_test(&pipe->writers))
50428 wake_up_interruptible(&pipe->wait);
50429 ret = -ERESTARTSYS;
50430 goto err;
50431
50432 err:
50433 - if (!pipe->readers && !pipe->writers)
50434 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
50435 free_pipe_info(inode);
50436
50437 err_nocleanup:
50438 diff --git a/fs/file.c b/fs/file.c
50439 index 87e1290..a930cc4 100644
50440 --- a/fs/file.c
50441 +++ b/fs/file.c
50442 @@ -14,6 +14,7 @@
50443 #include <linux/slab.h>
50444 #include <linux/vmalloc.h>
50445 #include <linux/file.h>
50446 +#include <linux/security.h>
50447 #include <linux/fdtable.h>
50448 #include <linux/bitops.h>
50449 #include <linux/interrupt.h>
50450 @@ -257,6 +258,8 @@ int expand_files(struct files_struct *files, int nr)
50451 * N.B. For clone tasks sharing a files structure, this test
50452 * will limit the total number of files that can be opened.
50453 */
50454 +
50455 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
50456 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
50457 return -EMFILE;
50458
50459 diff --git a/fs/filesystems.c b/fs/filesystems.c
50460 index a24c58e..53f91ee 100644
50461 --- a/fs/filesystems.c
50462 +++ b/fs/filesystems.c
50463 @@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(const char *name)
50464 int len = dot ? dot - name : strlen(name);
50465
50466 fs = __get_fs_type(name, len);
50467 +
50468 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
50469 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
50470 +#else
50471 if (!fs && (request_module("%.*s", len, name) == 0))
50472 +#endif
50473 fs = __get_fs_type(name, len);
50474
50475 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
50476 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
50477 index eee0590..1181166 100644
50478 --- a/fs/fs_struct.c
50479 +++ b/fs/fs_struct.c
50480 @@ -4,6 +4,7 @@
50481 #include <linux/path.h>
50482 #include <linux/slab.h>
50483 #include <linux/fs_struct.h>
50484 +#include <linux/grsecurity.h>
50485
50486 /*
50487 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
50488 @@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
50489 old_root = fs->root;
50490 fs->root = *path;
50491 path_get(path);
50492 + gr_set_chroot_entries(current, path);
50493 write_unlock(&fs->lock);
50494 if (old_root.dentry)
50495 path_put(&old_root);
50496 @@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
50497 && fs->root.mnt == old_root->mnt) {
50498 path_get(new_root);
50499 fs->root = *new_root;
50500 + gr_set_chroot_entries(p, new_root);
50501 count++;
50502 }
50503 if (fs->pwd.dentry == old_root->dentry
50504 @@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
50505 task_lock(tsk);
50506 write_lock(&fs->lock);
50507 tsk->fs = NULL;
50508 - kill = !--fs->users;
50509 + gr_clear_chroot_entries(tsk);
50510 + kill = !atomic_dec_return(&fs->users);
50511 write_unlock(&fs->lock);
50512 task_unlock(tsk);
50513 if (kill)
50514 @@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
50515 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
50516 /* We don't need to lock fs - think why ;-) */
50517 if (fs) {
50518 - fs->users = 1;
50519 + atomic_set(&fs->users, 1);
50520 fs->in_exec = 0;
50521 rwlock_init(&fs->lock);
50522 fs->umask = old->umask;
50523 @@ -127,8 +131,9 @@ int unshare_fs_struct(void)
50524
50525 task_lock(current);
50526 write_lock(&fs->lock);
50527 - kill = !--fs->users;
50528 + kill = !atomic_dec_return(&fs->users);
50529 current->fs = new_fs;
50530 + gr_set_chroot_entries(current, &new_fs->root);
50531 write_unlock(&fs->lock);
50532 task_unlock(current);
50533
50534 @@ -141,13 +146,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
50535
50536 int current_umask(void)
50537 {
50538 - return current->fs->umask;
50539 + return current->fs->umask | gr_acl_umask();
50540 }
50541 EXPORT_SYMBOL(current_umask);
50542
50543 /* to be mentioned only in INIT_TASK */
50544 struct fs_struct init_fs = {
50545 - .users = 1,
50546 + .users = ATOMIC_INIT(1),
50547 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
50548 .umask = 0022,
50549 };
50550 @@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
50551 task_lock(current);
50552
50553 write_lock(&init_fs.lock);
50554 - init_fs.users++;
50555 + atomic_inc(&init_fs.users);
50556 write_unlock(&init_fs.lock);
50557
50558 write_lock(&fs->lock);
50559 current->fs = &init_fs;
50560 - kill = !--fs->users;
50561 + gr_set_chroot_entries(current, &current->fs->root);
50562 + kill = !atomic_dec_return(&fs->users);
50563 write_unlock(&fs->lock);
50564
50565 task_unlock(current);
50566 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
50567 index 9905350..02eaec4 100644
50568 --- a/fs/fscache/cookie.c
50569 +++ b/fs/fscache/cookie.c
50570 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
50571 parent ? (char *) parent->def->name : "<no-parent>",
50572 def->name, netfs_data);
50573
50574 - fscache_stat(&fscache_n_acquires);
50575 + fscache_stat_unchecked(&fscache_n_acquires);
50576
50577 /* if there's no parent cookie, then we don't create one here either */
50578 if (!parent) {
50579 - fscache_stat(&fscache_n_acquires_null);
50580 + fscache_stat_unchecked(&fscache_n_acquires_null);
50581 _leave(" [no parent]");
50582 return NULL;
50583 }
50584 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
50585 /* allocate and initialise a cookie */
50586 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
50587 if (!cookie) {
50588 - fscache_stat(&fscache_n_acquires_oom);
50589 + fscache_stat_unchecked(&fscache_n_acquires_oom);
50590 _leave(" [ENOMEM]");
50591 return NULL;
50592 }
50593 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
50594
50595 switch (cookie->def->type) {
50596 case FSCACHE_COOKIE_TYPE_INDEX:
50597 - fscache_stat(&fscache_n_cookie_index);
50598 + fscache_stat_unchecked(&fscache_n_cookie_index);
50599 break;
50600 case FSCACHE_COOKIE_TYPE_DATAFILE:
50601 - fscache_stat(&fscache_n_cookie_data);
50602 + fscache_stat_unchecked(&fscache_n_cookie_data);
50603 break;
50604 default:
50605 - fscache_stat(&fscache_n_cookie_special);
50606 + fscache_stat_unchecked(&fscache_n_cookie_special);
50607 break;
50608 }
50609
50610 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
50611 if (fscache_acquire_non_index_cookie(cookie) < 0) {
50612 atomic_dec(&parent->n_children);
50613 __fscache_cookie_put(cookie);
50614 - fscache_stat(&fscache_n_acquires_nobufs);
50615 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
50616 _leave(" = NULL");
50617 return NULL;
50618 }
50619 }
50620
50621 - fscache_stat(&fscache_n_acquires_ok);
50622 + fscache_stat_unchecked(&fscache_n_acquires_ok);
50623 _leave(" = %p", cookie);
50624 return cookie;
50625 }
50626 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
50627 cache = fscache_select_cache_for_object(cookie->parent);
50628 if (!cache) {
50629 up_read(&fscache_addremove_sem);
50630 - fscache_stat(&fscache_n_acquires_no_cache);
50631 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
50632 _leave(" = -ENOMEDIUM [no cache]");
50633 return -ENOMEDIUM;
50634 }
50635 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
50636 object = cache->ops->alloc_object(cache, cookie);
50637 fscache_stat_d(&fscache_n_cop_alloc_object);
50638 if (IS_ERR(object)) {
50639 - fscache_stat(&fscache_n_object_no_alloc);
50640 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
50641 ret = PTR_ERR(object);
50642 goto error;
50643 }
50644
50645 - fscache_stat(&fscache_n_object_alloc);
50646 + fscache_stat_unchecked(&fscache_n_object_alloc);
50647
50648 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
50649
50650 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
50651 struct fscache_object *object;
50652 struct hlist_node *_p;
50653
50654 - fscache_stat(&fscache_n_updates);
50655 + fscache_stat_unchecked(&fscache_n_updates);
50656
50657 if (!cookie) {
50658 - fscache_stat(&fscache_n_updates_null);
50659 + fscache_stat_unchecked(&fscache_n_updates_null);
50660 _leave(" [no cookie]");
50661 return;
50662 }
50663 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
50664 struct fscache_object *object;
50665 unsigned long event;
50666
50667 - fscache_stat(&fscache_n_relinquishes);
50668 + fscache_stat_unchecked(&fscache_n_relinquishes);
50669 if (retire)
50670 - fscache_stat(&fscache_n_relinquishes_retire);
50671 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
50672
50673 if (!cookie) {
50674 - fscache_stat(&fscache_n_relinquishes_null);
50675 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
50676 _leave(" [no cookie]");
50677 return;
50678 }
50679 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
50680
50681 /* wait for the cookie to finish being instantiated (or to fail) */
50682 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
50683 - fscache_stat(&fscache_n_relinquishes_waitcrt);
50684 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
50685 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
50686 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
50687 }
50688 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
50689 index edd7434..0725e66 100644
50690 --- a/fs/fscache/internal.h
50691 +++ b/fs/fscache/internal.h
50692 @@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
50693 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
50694 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
50695
50696 -extern atomic_t fscache_n_op_pend;
50697 -extern atomic_t fscache_n_op_run;
50698 -extern atomic_t fscache_n_op_enqueue;
50699 -extern atomic_t fscache_n_op_deferred_release;
50700 -extern atomic_t fscache_n_op_release;
50701 -extern atomic_t fscache_n_op_gc;
50702 -extern atomic_t fscache_n_op_cancelled;
50703 -extern atomic_t fscache_n_op_rejected;
50704 +extern atomic_unchecked_t fscache_n_op_pend;
50705 +extern atomic_unchecked_t fscache_n_op_run;
50706 +extern atomic_unchecked_t fscache_n_op_enqueue;
50707 +extern atomic_unchecked_t fscache_n_op_deferred_release;
50708 +extern atomic_unchecked_t fscache_n_op_release;
50709 +extern atomic_unchecked_t fscache_n_op_gc;
50710 +extern atomic_unchecked_t fscache_n_op_cancelled;
50711 +extern atomic_unchecked_t fscache_n_op_rejected;
50712
50713 -extern atomic_t fscache_n_attr_changed;
50714 -extern atomic_t fscache_n_attr_changed_ok;
50715 -extern atomic_t fscache_n_attr_changed_nobufs;
50716 -extern atomic_t fscache_n_attr_changed_nomem;
50717 -extern atomic_t fscache_n_attr_changed_calls;
50718 +extern atomic_unchecked_t fscache_n_attr_changed;
50719 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
50720 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
50721 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
50722 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
50723
50724 -extern atomic_t fscache_n_allocs;
50725 -extern atomic_t fscache_n_allocs_ok;
50726 -extern atomic_t fscache_n_allocs_wait;
50727 -extern atomic_t fscache_n_allocs_nobufs;
50728 -extern atomic_t fscache_n_allocs_intr;
50729 -extern atomic_t fscache_n_allocs_object_dead;
50730 -extern atomic_t fscache_n_alloc_ops;
50731 -extern atomic_t fscache_n_alloc_op_waits;
50732 +extern atomic_unchecked_t fscache_n_allocs;
50733 +extern atomic_unchecked_t fscache_n_allocs_ok;
50734 +extern atomic_unchecked_t fscache_n_allocs_wait;
50735 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
50736 +extern atomic_unchecked_t fscache_n_allocs_intr;
50737 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
50738 +extern atomic_unchecked_t fscache_n_alloc_ops;
50739 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
50740
50741 -extern atomic_t fscache_n_retrievals;
50742 -extern atomic_t fscache_n_retrievals_ok;
50743 -extern atomic_t fscache_n_retrievals_wait;
50744 -extern atomic_t fscache_n_retrievals_nodata;
50745 -extern atomic_t fscache_n_retrievals_nobufs;
50746 -extern atomic_t fscache_n_retrievals_intr;
50747 -extern atomic_t fscache_n_retrievals_nomem;
50748 -extern atomic_t fscache_n_retrievals_object_dead;
50749 -extern atomic_t fscache_n_retrieval_ops;
50750 -extern atomic_t fscache_n_retrieval_op_waits;
50751 +extern atomic_unchecked_t fscache_n_retrievals;
50752 +extern atomic_unchecked_t fscache_n_retrievals_ok;
50753 +extern atomic_unchecked_t fscache_n_retrievals_wait;
50754 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
50755 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
50756 +extern atomic_unchecked_t fscache_n_retrievals_intr;
50757 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
50758 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
50759 +extern atomic_unchecked_t fscache_n_retrieval_ops;
50760 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
50761
50762 -extern atomic_t fscache_n_stores;
50763 -extern atomic_t fscache_n_stores_ok;
50764 -extern atomic_t fscache_n_stores_again;
50765 -extern atomic_t fscache_n_stores_nobufs;
50766 -extern atomic_t fscache_n_stores_oom;
50767 -extern atomic_t fscache_n_store_ops;
50768 -extern atomic_t fscache_n_store_calls;
50769 -extern atomic_t fscache_n_store_pages;
50770 -extern atomic_t fscache_n_store_radix_deletes;
50771 -extern atomic_t fscache_n_store_pages_over_limit;
50772 +extern atomic_unchecked_t fscache_n_stores;
50773 +extern atomic_unchecked_t fscache_n_stores_ok;
50774 +extern atomic_unchecked_t fscache_n_stores_again;
50775 +extern atomic_unchecked_t fscache_n_stores_nobufs;
50776 +extern atomic_unchecked_t fscache_n_stores_oom;
50777 +extern atomic_unchecked_t fscache_n_store_ops;
50778 +extern atomic_unchecked_t fscache_n_store_calls;
50779 +extern atomic_unchecked_t fscache_n_store_pages;
50780 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
50781 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
50782
50783 -extern atomic_t fscache_n_store_vmscan_not_storing;
50784 -extern atomic_t fscache_n_store_vmscan_gone;
50785 -extern atomic_t fscache_n_store_vmscan_busy;
50786 -extern atomic_t fscache_n_store_vmscan_cancelled;
50787 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
50788 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
50789 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
50790 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
50791
50792 -extern atomic_t fscache_n_marks;
50793 -extern atomic_t fscache_n_uncaches;
50794 +extern atomic_unchecked_t fscache_n_marks;
50795 +extern atomic_unchecked_t fscache_n_uncaches;
50796
50797 -extern atomic_t fscache_n_acquires;
50798 -extern atomic_t fscache_n_acquires_null;
50799 -extern atomic_t fscache_n_acquires_no_cache;
50800 -extern atomic_t fscache_n_acquires_ok;
50801 -extern atomic_t fscache_n_acquires_nobufs;
50802 -extern atomic_t fscache_n_acquires_oom;
50803 +extern atomic_unchecked_t fscache_n_acquires;
50804 +extern atomic_unchecked_t fscache_n_acquires_null;
50805 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
50806 +extern atomic_unchecked_t fscache_n_acquires_ok;
50807 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
50808 +extern atomic_unchecked_t fscache_n_acquires_oom;
50809
50810 -extern atomic_t fscache_n_updates;
50811 -extern atomic_t fscache_n_updates_null;
50812 -extern atomic_t fscache_n_updates_run;
50813 +extern atomic_unchecked_t fscache_n_updates;
50814 +extern atomic_unchecked_t fscache_n_updates_null;
50815 +extern atomic_unchecked_t fscache_n_updates_run;
50816
50817 -extern atomic_t fscache_n_relinquishes;
50818 -extern atomic_t fscache_n_relinquishes_null;
50819 -extern atomic_t fscache_n_relinquishes_waitcrt;
50820 -extern atomic_t fscache_n_relinquishes_retire;
50821 +extern atomic_unchecked_t fscache_n_relinquishes;
50822 +extern atomic_unchecked_t fscache_n_relinquishes_null;
50823 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
50824 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
50825
50826 -extern atomic_t fscache_n_cookie_index;
50827 -extern atomic_t fscache_n_cookie_data;
50828 -extern atomic_t fscache_n_cookie_special;
50829 +extern atomic_unchecked_t fscache_n_cookie_index;
50830 +extern atomic_unchecked_t fscache_n_cookie_data;
50831 +extern atomic_unchecked_t fscache_n_cookie_special;
50832
50833 -extern atomic_t fscache_n_object_alloc;
50834 -extern atomic_t fscache_n_object_no_alloc;
50835 -extern atomic_t fscache_n_object_lookups;
50836 -extern atomic_t fscache_n_object_lookups_negative;
50837 -extern atomic_t fscache_n_object_lookups_positive;
50838 -extern atomic_t fscache_n_object_lookups_timed_out;
50839 -extern atomic_t fscache_n_object_created;
50840 -extern atomic_t fscache_n_object_avail;
50841 -extern atomic_t fscache_n_object_dead;
50842 +extern atomic_unchecked_t fscache_n_object_alloc;
50843 +extern atomic_unchecked_t fscache_n_object_no_alloc;
50844 +extern atomic_unchecked_t fscache_n_object_lookups;
50845 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
50846 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
50847 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
50848 +extern atomic_unchecked_t fscache_n_object_created;
50849 +extern atomic_unchecked_t fscache_n_object_avail;
50850 +extern atomic_unchecked_t fscache_n_object_dead;
50851
50852 -extern atomic_t fscache_n_checkaux_none;
50853 -extern atomic_t fscache_n_checkaux_okay;
50854 -extern atomic_t fscache_n_checkaux_update;
50855 -extern atomic_t fscache_n_checkaux_obsolete;
50856 +extern atomic_unchecked_t fscache_n_checkaux_none;
50857 +extern atomic_unchecked_t fscache_n_checkaux_okay;
50858 +extern atomic_unchecked_t fscache_n_checkaux_update;
50859 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
50860
50861 extern atomic_t fscache_n_cop_alloc_object;
50862 extern atomic_t fscache_n_cop_lookup_object;
50863 @@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t *stat)
50864 atomic_inc(stat);
50865 }
50866
50867 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
50868 +{
50869 + atomic_inc_unchecked(stat);
50870 +}
50871 +
50872 static inline void fscache_stat_d(atomic_t *stat)
50873 {
50874 atomic_dec(stat);
50875 @@ -259,6 +264,7 @@ extern const struct file_operations fscache_stats_fops;
50876
50877 #define __fscache_stat(stat) (NULL)
50878 #define fscache_stat(stat) do {} while (0)
50879 +#define fscache_stat_unchecked(stat) do {} while (0)
50880 #define fscache_stat_d(stat) do {} while (0)
50881 #endif
50882
50883 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
50884 index e513ac5..e888d34 100644
50885 --- a/fs/fscache/object.c
50886 +++ b/fs/fscache/object.c
50887 @@ -144,7 +144,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
50888 /* update the object metadata on disk */
50889 case FSCACHE_OBJECT_UPDATING:
50890 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
50891 - fscache_stat(&fscache_n_updates_run);
50892 + fscache_stat_unchecked(&fscache_n_updates_run);
50893 fscache_stat(&fscache_n_cop_update_object);
50894 object->cache->ops->update_object(object);
50895 fscache_stat_d(&fscache_n_cop_update_object);
50896 @@ -233,7 +233,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
50897 spin_lock(&object->lock);
50898 object->state = FSCACHE_OBJECT_DEAD;
50899 spin_unlock(&object->lock);
50900 - fscache_stat(&fscache_n_object_dead);
50901 + fscache_stat_unchecked(&fscache_n_object_dead);
50902 goto terminal_transit;
50903
50904 /* handle the parent cache of this object being withdrawn from
50905 @@ -248,7 +248,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
50906 spin_lock(&object->lock);
50907 object->state = FSCACHE_OBJECT_DEAD;
50908 spin_unlock(&object->lock);
50909 - fscache_stat(&fscache_n_object_dead);
50910 + fscache_stat_unchecked(&fscache_n_object_dead);
50911 goto terminal_transit;
50912
50913 /* complain about the object being woken up once it is
50914 @@ -492,7 +492,7 @@ static void fscache_lookup_object(struct fscache_object *object)
50915 parent->cookie->def->name, cookie->def->name,
50916 object->cache->tag->name);
50917
50918 - fscache_stat(&fscache_n_object_lookups);
50919 + fscache_stat_unchecked(&fscache_n_object_lookups);
50920 fscache_stat(&fscache_n_cop_lookup_object);
50921 ret = object->cache->ops->lookup_object(object);
50922 fscache_stat_d(&fscache_n_cop_lookup_object);
50923 @@ -503,7 +503,7 @@ static void fscache_lookup_object(struct fscache_object *object)
50924 if (ret == -ETIMEDOUT) {
50925 /* probably stuck behind another object, so move this one to
50926 * the back of the queue */
50927 - fscache_stat(&fscache_n_object_lookups_timed_out);
50928 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
50929 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
50930 }
50931
50932 @@ -526,7 +526,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
50933
50934 spin_lock(&object->lock);
50935 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
50936 - fscache_stat(&fscache_n_object_lookups_negative);
50937 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
50938
50939 /* transit here to allow write requests to begin stacking up
50940 * and read requests to begin returning ENODATA */
50941 @@ -572,7 +572,7 @@ void fscache_obtained_object(struct fscache_object *object)
50942 * result, in which case there may be data available */
50943 spin_lock(&object->lock);
50944 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
50945 - fscache_stat(&fscache_n_object_lookups_positive);
50946 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
50947
50948 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
50949
50950 @@ -586,7 +586,7 @@ void fscache_obtained_object(struct fscache_object *object)
50951 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
50952 } else {
50953 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
50954 - fscache_stat(&fscache_n_object_created);
50955 + fscache_stat_unchecked(&fscache_n_object_created);
50956
50957 object->state = FSCACHE_OBJECT_AVAILABLE;
50958 spin_unlock(&object->lock);
50959 @@ -633,7 +633,7 @@ static void fscache_object_available(struct fscache_object *object)
50960 fscache_enqueue_dependents(object);
50961
50962 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
50963 - fscache_stat(&fscache_n_object_avail);
50964 + fscache_stat_unchecked(&fscache_n_object_avail);
50965
50966 _leave("");
50967 }
50968 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
50969 enum fscache_checkaux result;
50970
50971 if (!object->cookie->def->check_aux) {
50972 - fscache_stat(&fscache_n_checkaux_none);
50973 + fscache_stat_unchecked(&fscache_n_checkaux_none);
50974 return FSCACHE_CHECKAUX_OKAY;
50975 }
50976
50977 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
50978 switch (result) {
50979 /* entry okay as is */
50980 case FSCACHE_CHECKAUX_OKAY:
50981 - fscache_stat(&fscache_n_checkaux_okay);
50982 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
50983 break;
50984
50985 /* entry requires update */
50986 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
50987 - fscache_stat(&fscache_n_checkaux_update);
50988 + fscache_stat_unchecked(&fscache_n_checkaux_update);
50989 break;
50990
50991 /* entry requires deletion */
50992 case FSCACHE_CHECKAUX_OBSOLETE:
50993 - fscache_stat(&fscache_n_checkaux_obsolete);
50994 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
50995 break;
50996
50997 default:
50998 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
50999 index 313e79a..775240f 100644
51000 --- a/fs/fscache/operation.c
51001 +++ b/fs/fscache/operation.c
51002 @@ -16,7 +16,7 @@
51003 #include <linux/seq_file.h>
51004 #include "internal.h"
51005
51006 -atomic_t fscache_op_debug_id;
51007 +atomic_unchecked_t fscache_op_debug_id;
51008 EXPORT_SYMBOL(fscache_op_debug_id);
51009
51010 /**
51011 @@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
51012 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
51013 ASSERTCMP(atomic_read(&op->usage), >, 0);
51014
51015 - fscache_stat(&fscache_n_op_enqueue);
51016 + fscache_stat_unchecked(&fscache_n_op_enqueue);
51017 switch (op->flags & FSCACHE_OP_TYPE) {
51018 case FSCACHE_OP_FAST:
51019 _debug("queue fast");
51020 @@ -76,7 +76,7 @@ static void fscache_run_op(struct fscache_object *object,
51021 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
51022 if (op->processor)
51023 fscache_enqueue_operation(op);
51024 - fscache_stat(&fscache_n_op_run);
51025 + fscache_stat_unchecked(&fscache_n_op_run);
51026 }
51027
51028 /*
51029 @@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
51030 if (object->n_ops > 0) {
51031 atomic_inc(&op->usage);
51032 list_add_tail(&op->pend_link, &object->pending_ops);
51033 - fscache_stat(&fscache_n_op_pend);
51034 + fscache_stat_unchecked(&fscache_n_op_pend);
51035 } else if (!list_empty(&object->pending_ops)) {
51036 atomic_inc(&op->usage);
51037 list_add_tail(&op->pend_link, &object->pending_ops);
51038 - fscache_stat(&fscache_n_op_pend);
51039 + fscache_stat_unchecked(&fscache_n_op_pend);
51040 fscache_start_operations(object);
51041 } else {
51042 ASSERTCMP(object->n_in_progress, ==, 0);
51043 @@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
51044 object->n_exclusive++; /* reads and writes must wait */
51045 atomic_inc(&op->usage);
51046 list_add_tail(&op->pend_link, &object->pending_ops);
51047 - fscache_stat(&fscache_n_op_pend);
51048 + fscache_stat_unchecked(&fscache_n_op_pend);
51049 ret = 0;
51050 } else {
51051 /* not allowed to submit ops in any other state */
51052 @@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_object *object,
51053 if (object->n_exclusive > 0) {
51054 atomic_inc(&op->usage);
51055 list_add_tail(&op->pend_link, &object->pending_ops);
51056 - fscache_stat(&fscache_n_op_pend);
51057 + fscache_stat_unchecked(&fscache_n_op_pend);
51058 } else if (!list_empty(&object->pending_ops)) {
51059 atomic_inc(&op->usage);
51060 list_add_tail(&op->pend_link, &object->pending_ops);
51061 - fscache_stat(&fscache_n_op_pend);
51062 + fscache_stat_unchecked(&fscache_n_op_pend);
51063 fscache_start_operations(object);
51064 } else {
51065 ASSERTCMP(object->n_exclusive, ==, 0);
51066 @@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_object *object,
51067 object->n_ops++;
51068 atomic_inc(&op->usage);
51069 list_add_tail(&op->pend_link, &object->pending_ops);
51070 - fscache_stat(&fscache_n_op_pend);
51071 + fscache_stat_unchecked(&fscache_n_op_pend);
51072 ret = 0;
51073 } else if (object->state == FSCACHE_OBJECT_DYING ||
51074 object->state == FSCACHE_OBJECT_LC_DYING ||
51075 object->state == FSCACHE_OBJECT_WITHDRAWING) {
51076 - fscache_stat(&fscache_n_op_rejected);
51077 + fscache_stat_unchecked(&fscache_n_op_rejected);
51078 ret = -ENOBUFS;
51079 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
51080 fscache_report_unexpected_submission(object, op, ostate);
51081 @@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_operation *op)
51082
51083 ret = -EBUSY;
51084 if (!list_empty(&op->pend_link)) {
51085 - fscache_stat(&fscache_n_op_cancelled);
51086 + fscache_stat_unchecked(&fscache_n_op_cancelled);
51087 list_del_init(&op->pend_link);
51088 object->n_ops--;
51089 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
51090 @@ -344,7 +344,7 @@ void fscache_put_operation(struct fscache_operation *op)
51091 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
51092 BUG();
51093
51094 - fscache_stat(&fscache_n_op_release);
51095 + fscache_stat_unchecked(&fscache_n_op_release);
51096
51097 if (op->release) {
51098 op->release(op);
51099 @@ -361,7 +361,7 @@ void fscache_put_operation(struct fscache_operation *op)
51100 * lock, and defer it otherwise */
51101 if (!spin_trylock(&object->lock)) {
51102 _debug("defer put");
51103 - fscache_stat(&fscache_n_op_deferred_release);
51104 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
51105
51106 cache = object->cache;
51107 spin_lock(&cache->op_gc_list_lock);
51108 @@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_struct *work)
51109
51110 _debug("GC DEFERRED REL OBJ%x OP%x",
51111 object->debug_id, op->debug_id);
51112 - fscache_stat(&fscache_n_op_gc);
51113 + fscache_stat_unchecked(&fscache_n_op_gc);
51114
51115 ASSERTCMP(atomic_read(&op->usage), ==, 0);
51116
51117 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
51118 index c598ea4..6aac13e 100644
51119 --- a/fs/fscache/page.c
51120 +++ b/fs/fscache/page.c
51121 @@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
51122 val = radix_tree_lookup(&cookie->stores, page->index);
51123 if (!val) {
51124 rcu_read_unlock();
51125 - fscache_stat(&fscache_n_store_vmscan_not_storing);
51126 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
51127 __fscache_uncache_page(cookie, page);
51128 return true;
51129 }
51130 @@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
51131 spin_unlock(&cookie->stores_lock);
51132
51133 if (xpage) {
51134 - fscache_stat(&fscache_n_store_vmscan_cancelled);
51135 - fscache_stat(&fscache_n_store_radix_deletes);
51136 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
51137 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
51138 ASSERTCMP(xpage, ==, page);
51139 } else {
51140 - fscache_stat(&fscache_n_store_vmscan_gone);
51141 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
51142 }
51143
51144 wake_up_bit(&cookie->flags, 0);
51145 @@ -106,7 +106,7 @@ page_busy:
51146 /* we might want to wait here, but that could deadlock the allocator as
51147 * the slow-work threads writing to the cache may all end up sleeping
51148 * on memory allocation */
51149 - fscache_stat(&fscache_n_store_vmscan_busy);
51150 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
51151 return false;
51152 }
51153 EXPORT_SYMBOL(__fscache_maybe_release_page);
51154 @@ -130,7 +130,7 @@ static void fscache_end_page_write(struct fscache_object *object,
51155 FSCACHE_COOKIE_STORING_TAG);
51156 if (!radix_tree_tag_get(&cookie->stores, page->index,
51157 FSCACHE_COOKIE_PENDING_TAG)) {
51158 - fscache_stat(&fscache_n_store_radix_deletes);
51159 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
51160 xpage = radix_tree_delete(&cookie->stores, page->index);
51161 }
51162 spin_unlock(&cookie->stores_lock);
51163 @@ -151,7 +151,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
51164
51165 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
51166
51167 - fscache_stat(&fscache_n_attr_changed_calls);
51168 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
51169
51170 if (fscache_object_is_active(object)) {
51171 fscache_set_op_state(op, "CallFS");
51172 @@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
51173
51174 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
51175
51176 - fscache_stat(&fscache_n_attr_changed);
51177 + fscache_stat_unchecked(&fscache_n_attr_changed);
51178
51179 op = kzalloc(sizeof(*op), GFP_KERNEL);
51180 if (!op) {
51181 - fscache_stat(&fscache_n_attr_changed_nomem);
51182 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
51183 _leave(" = -ENOMEM");
51184 return -ENOMEM;
51185 }
51186 @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
51187 if (fscache_submit_exclusive_op(object, op) < 0)
51188 goto nobufs;
51189 spin_unlock(&cookie->lock);
51190 - fscache_stat(&fscache_n_attr_changed_ok);
51191 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
51192 fscache_put_operation(op);
51193 _leave(" = 0");
51194 return 0;
51195 @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
51196 nobufs:
51197 spin_unlock(&cookie->lock);
51198 kfree(op);
51199 - fscache_stat(&fscache_n_attr_changed_nobufs);
51200 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
51201 _leave(" = %d", -ENOBUFS);
51202 return -ENOBUFS;
51203 }
51204 @@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
51205 /* allocate a retrieval operation and attempt to submit it */
51206 op = kzalloc(sizeof(*op), GFP_NOIO);
51207 if (!op) {
51208 - fscache_stat(&fscache_n_retrievals_nomem);
51209 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
51210 return NULL;
51211 }
51212
51213 @@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
51214 return 0;
51215 }
51216
51217 - fscache_stat(&fscache_n_retrievals_wait);
51218 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
51219
51220 jif = jiffies;
51221 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
51222 fscache_wait_bit_interruptible,
51223 TASK_INTERRUPTIBLE) != 0) {
51224 - fscache_stat(&fscache_n_retrievals_intr);
51225 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
51226 _leave(" = -ERESTARTSYS");
51227 return -ERESTARTSYS;
51228 }
51229 @@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
51230 */
51231 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
51232 struct fscache_retrieval *op,
51233 - atomic_t *stat_op_waits,
51234 - atomic_t *stat_object_dead)
51235 + atomic_unchecked_t *stat_op_waits,
51236 + atomic_unchecked_t *stat_object_dead)
51237 {
51238 int ret;
51239
51240 @@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
51241 goto check_if_dead;
51242
51243 _debug(">>> WT");
51244 - fscache_stat(stat_op_waits);
51245 + fscache_stat_unchecked(stat_op_waits);
51246 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
51247 fscache_wait_bit_interruptible,
51248 TASK_INTERRUPTIBLE) < 0) {
51249 @@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
51250
51251 check_if_dead:
51252 if (unlikely(fscache_object_is_dead(object))) {
51253 - fscache_stat(stat_object_dead);
51254 + fscache_stat_unchecked(stat_object_dead);
51255 return -ENOBUFS;
51256 }
51257 return 0;
51258 @@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
51259
51260 _enter("%p,%p,,,", cookie, page);
51261
51262 - fscache_stat(&fscache_n_retrievals);
51263 + fscache_stat_unchecked(&fscache_n_retrievals);
51264
51265 if (hlist_empty(&cookie->backing_objects))
51266 goto nobufs;
51267 @@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
51268 goto nobufs_unlock;
51269 spin_unlock(&cookie->lock);
51270
51271 - fscache_stat(&fscache_n_retrieval_ops);
51272 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
51273
51274 /* pin the netfs read context in case we need to do the actual netfs
51275 * read because we've encountered a cache read failure */
51276 @@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
51277
51278 error:
51279 if (ret == -ENOMEM)
51280 - fscache_stat(&fscache_n_retrievals_nomem);
51281 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
51282 else if (ret == -ERESTARTSYS)
51283 - fscache_stat(&fscache_n_retrievals_intr);
51284 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
51285 else if (ret == -ENODATA)
51286 - fscache_stat(&fscache_n_retrievals_nodata);
51287 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
51288 else if (ret < 0)
51289 - fscache_stat(&fscache_n_retrievals_nobufs);
51290 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
51291 else
51292 - fscache_stat(&fscache_n_retrievals_ok);
51293 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
51294
51295 fscache_put_retrieval(op);
51296 _leave(" = %d", ret);
51297 @@ -453,7 +453,7 @@ nobufs_unlock:
51298 spin_unlock(&cookie->lock);
51299 kfree(op);
51300 nobufs:
51301 - fscache_stat(&fscache_n_retrievals_nobufs);
51302 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
51303 _leave(" = -ENOBUFS");
51304 return -ENOBUFS;
51305 }
51306 @@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
51307
51308 _enter("%p,,%d,,,", cookie, *nr_pages);
51309
51310 - fscache_stat(&fscache_n_retrievals);
51311 + fscache_stat_unchecked(&fscache_n_retrievals);
51312
51313 if (hlist_empty(&cookie->backing_objects))
51314 goto nobufs;
51315 @@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
51316 goto nobufs_unlock;
51317 spin_unlock(&cookie->lock);
51318
51319 - fscache_stat(&fscache_n_retrieval_ops);
51320 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
51321
51322 /* pin the netfs read context in case we need to do the actual netfs
51323 * read because we've encountered a cache read failure */
51324 @@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
51325
51326 error:
51327 if (ret == -ENOMEM)
51328 - fscache_stat(&fscache_n_retrievals_nomem);
51329 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
51330 else if (ret == -ERESTARTSYS)
51331 - fscache_stat(&fscache_n_retrievals_intr);
51332 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
51333 else if (ret == -ENODATA)
51334 - fscache_stat(&fscache_n_retrievals_nodata);
51335 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
51336 else if (ret < 0)
51337 - fscache_stat(&fscache_n_retrievals_nobufs);
51338 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
51339 else
51340 - fscache_stat(&fscache_n_retrievals_ok);
51341 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
51342
51343 fscache_put_retrieval(op);
51344 _leave(" = %d", ret);
51345 @@ -570,7 +570,7 @@ nobufs_unlock:
51346 spin_unlock(&cookie->lock);
51347 kfree(op);
51348 nobufs:
51349 - fscache_stat(&fscache_n_retrievals_nobufs);
51350 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
51351 _leave(" = -ENOBUFS");
51352 return -ENOBUFS;
51353 }
51354 @@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
51355
51356 _enter("%p,%p,,,", cookie, page);
51357
51358 - fscache_stat(&fscache_n_allocs);
51359 + fscache_stat_unchecked(&fscache_n_allocs);
51360
51361 if (hlist_empty(&cookie->backing_objects))
51362 goto nobufs;
51363 @@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
51364 goto nobufs_unlock;
51365 spin_unlock(&cookie->lock);
51366
51367 - fscache_stat(&fscache_n_alloc_ops);
51368 + fscache_stat_unchecked(&fscache_n_alloc_ops);
51369
51370 ret = fscache_wait_for_retrieval_activation(
51371 object, op,
51372 @@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
51373
51374 error:
51375 if (ret == -ERESTARTSYS)
51376 - fscache_stat(&fscache_n_allocs_intr);
51377 + fscache_stat_unchecked(&fscache_n_allocs_intr);
51378 else if (ret < 0)
51379 - fscache_stat(&fscache_n_allocs_nobufs);
51380 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
51381 else
51382 - fscache_stat(&fscache_n_allocs_ok);
51383 + fscache_stat_unchecked(&fscache_n_allocs_ok);
51384
51385 fscache_put_retrieval(op);
51386 _leave(" = %d", ret);
51387 @@ -651,7 +651,7 @@ nobufs_unlock:
51388 spin_unlock(&cookie->lock);
51389 kfree(op);
51390 nobufs:
51391 - fscache_stat(&fscache_n_allocs_nobufs);
51392 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
51393 _leave(" = -ENOBUFS");
51394 return -ENOBUFS;
51395 }
51396 @@ -694,7 +694,7 @@ static void fscache_write_op(struct fscache_operation *_op)
51397
51398 spin_lock(&cookie->stores_lock);
51399
51400 - fscache_stat(&fscache_n_store_calls);
51401 + fscache_stat_unchecked(&fscache_n_store_calls);
51402
51403 /* find a page to store */
51404 page = NULL;
51405 @@ -705,7 +705,7 @@ static void fscache_write_op(struct fscache_operation *_op)
51406 page = results[0];
51407 _debug("gang %d [%lx]", n, page->index);
51408 if (page->index > op->store_limit) {
51409 - fscache_stat(&fscache_n_store_pages_over_limit);
51410 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
51411 goto superseded;
51412 }
51413
51414 @@ -721,7 +721,7 @@ static void fscache_write_op(struct fscache_operation *_op)
51415
51416 if (page) {
51417 fscache_set_op_state(&op->op, "Store");
51418 - fscache_stat(&fscache_n_store_pages);
51419 + fscache_stat_unchecked(&fscache_n_store_pages);
51420 fscache_stat(&fscache_n_cop_write_page);
51421 ret = object->cache->ops->write_page(op, page);
51422 fscache_stat_d(&fscache_n_cop_write_page);
51423 @@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
51424 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
51425 ASSERT(PageFsCache(page));
51426
51427 - fscache_stat(&fscache_n_stores);
51428 + fscache_stat_unchecked(&fscache_n_stores);
51429
51430 op = kzalloc(sizeof(*op), GFP_NOIO);
51431 if (!op)
51432 @@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
51433 spin_unlock(&cookie->stores_lock);
51434 spin_unlock(&object->lock);
51435
51436 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
51437 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
51438 op->store_limit = object->store_limit;
51439
51440 if (fscache_submit_op(object, &op->op) < 0)
51441 @@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
51442
51443 spin_unlock(&cookie->lock);
51444 radix_tree_preload_end();
51445 - fscache_stat(&fscache_n_store_ops);
51446 - fscache_stat(&fscache_n_stores_ok);
51447 + fscache_stat_unchecked(&fscache_n_store_ops);
51448 + fscache_stat_unchecked(&fscache_n_stores_ok);
51449
51450 /* the slow work queue now carries its own ref on the object */
51451 fscache_put_operation(&op->op);
51452 @@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
51453 return 0;
51454
51455 already_queued:
51456 - fscache_stat(&fscache_n_stores_again);
51457 + fscache_stat_unchecked(&fscache_n_stores_again);
51458 already_pending:
51459 spin_unlock(&cookie->stores_lock);
51460 spin_unlock(&object->lock);
51461 spin_unlock(&cookie->lock);
51462 radix_tree_preload_end();
51463 kfree(op);
51464 - fscache_stat(&fscache_n_stores_ok);
51465 + fscache_stat_unchecked(&fscache_n_stores_ok);
51466 _leave(" = 0");
51467 return 0;
51468
51469 @@ -886,14 +886,14 @@ nobufs:
51470 spin_unlock(&cookie->lock);
51471 radix_tree_preload_end();
51472 kfree(op);
51473 - fscache_stat(&fscache_n_stores_nobufs);
51474 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
51475 _leave(" = -ENOBUFS");
51476 return -ENOBUFS;
51477
51478 nomem_free:
51479 kfree(op);
51480 nomem:
51481 - fscache_stat(&fscache_n_stores_oom);
51482 + fscache_stat_unchecked(&fscache_n_stores_oom);
51483 _leave(" = -ENOMEM");
51484 return -ENOMEM;
51485 }
51486 @@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
51487 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
51488 ASSERTCMP(page, !=, NULL);
51489
51490 - fscache_stat(&fscache_n_uncaches);
51491 + fscache_stat_unchecked(&fscache_n_uncaches);
51492
51493 /* cache withdrawal may beat us to it */
51494 if (!PageFsCache(page))
51495 @@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
51496 unsigned long loop;
51497
51498 #ifdef CONFIG_FSCACHE_STATS
51499 - atomic_add(pagevec->nr, &fscache_n_marks);
51500 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
51501 #endif
51502
51503 for (loop = 0; loop < pagevec->nr; loop++) {
51504 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
51505 index 46435f3..8cddf18 100644
51506 --- a/fs/fscache/stats.c
51507 +++ b/fs/fscache/stats.c
51508 @@ -18,95 +18,95 @@
51509 /*
51510 * operation counters
51511 */
51512 -atomic_t fscache_n_op_pend;
51513 -atomic_t fscache_n_op_run;
51514 -atomic_t fscache_n_op_enqueue;
51515 -atomic_t fscache_n_op_requeue;
51516 -atomic_t fscache_n_op_deferred_release;
51517 -atomic_t fscache_n_op_release;
51518 -atomic_t fscache_n_op_gc;
51519 -atomic_t fscache_n_op_cancelled;
51520 -atomic_t fscache_n_op_rejected;
51521 +atomic_unchecked_t fscache_n_op_pend;
51522 +atomic_unchecked_t fscache_n_op_run;
51523 +atomic_unchecked_t fscache_n_op_enqueue;
51524 +atomic_unchecked_t fscache_n_op_requeue;
51525 +atomic_unchecked_t fscache_n_op_deferred_release;
51526 +atomic_unchecked_t fscache_n_op_release;
51527 +atomic_unchecked_t fscache_n_op_gc;
51528 +atomic_unchecked_t fscache_n_op_cancelled;
51529 +atomic_unchecked_t fscache_n_op_rejected;
51530
51531 -atomic_t fscache_n_attr_changed;
51532 -atomic_t fscache_n_attr_changed_ok;
51533 -atomic_t fscache_n_attr_changed_nobufs;
51534 -atomic_t fscache_n_attr_changed_nomem;
51535 -atomic_t fscache_n_attr_changed_calls;
51536 +atomic_unchecked_t fscache_n_attr_changed;
51537 +atomic_unchecked_t fscache_n_attr_changed_ok;
51538 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
51539 +atomic_unchecked_t fscache_n_attr_changed_nomem;
51540 +atomic_unchecked_t fscache_n_attr_changed_calls;
51541
51542 -atomic_t fscache_n_allocs;
51543 -atomic_t fscache_n_allocs_ok;
51544 -atomic_t fscache_n_allocs_wait;
51545 -atomic_t fscache_n_allocs_nobufs;
51546 -atomic_t fscache_n_allocs_intr;
51547 -atomic_t fscache_n_allocs_object_dead;
51548 -atomic_t fscache_n_alloc_ops;
51549 -atomic_t fscache_n_alloc_op_waits;
51550 +atomic_unchecked_t fscache_n_allocs;
51551 +atomic_unchecked_t fscache_n_allocs_ok;
51552 +atomic_unchecked_t fscache_n_allocs_wait;
51553 +atomic_unchecked_t fscache_n_allocs_nobufs;
51554 +atomic_unchecked_t fscache_n_allocs_intr;
51555 +atomic_unchecked_t fscache_n_allocs_object_dead;
51556 +atomic_unchecked_t fscache_n_alloc_ops;
51557 +atomic_unchecked_t fscache_n_alloc_op_waits;
51558
51559 -atomic_t fscache_n_retrievals;
51560 -atomic_t fscache_n_retrievals_ok;
51561 -atomic_t fscache_n_retrievals_wait;
51562 -atomic_t fscache_n_retrievals_nodata;
51563 -atomic_t fscache_n_retrievals_nobufs;
51564 -atomic_t fscache_n_retrievals_intr;
51565 -atomic_t fscache_n_retrievals_nomem;
51566 -atomic_t fscache_n_retrievals_object_dead;
51567 -atomic_t fscache_n_retrieval_ops;
51568 -atomic_t fscache_n_retrieval_op_waits;
51569 +atomic_unchecked_t fscache_n_retrievals;
51570 +atomic_unchecked_t fscache_n_retrievals_ok;
51571 +atomic_unchecked_t fscache_n_retrievals_wait;
51572 +atomic_unchecked_t fscache_n_retrievals_nodata;
51573 +atomic_unchecked_t fscache_n_retrievals_nobufs;
51574 +atomic_unchecked_t fscache_n_retrievals_intr;
51575 +atomic_unchecked_t fscache_n_retrievals_nomem;
51576 +atomic_unchecked_t fscache_n_retrievals_object_dead;
51577 +atomic_unchecked_t fscache_n_retrieval_ops;
51578 +atomic_unchecked_t fscache_n_retrieval_op_waits;
51579
51580 -atomic_t fscache_n_stores;
51581 -atomic_t fscache_n_stores_ok;
51582 -atomic_t fscache_n_stores_again;
51583 -atomic_t fscache_n_stores_nobufs;
51584 -atomic_t fscache_n_stores_oom;
51585 -atomic_t fscache_n_store_ops;
51586 -atomic_t fscache_n_store_calls;
51587 -atomic_t fscache_n_store_pages;
51588 -atomic_t fscache_n_store_radix_deletes;
51589 -atomic_t fscache_n_store_pages_over_limit;
51590 +atomic_unchecked_t fscache_n_stores;
51591 +atomic_unchecked_t fscache_n_stores_ok;
51592 +atomic_unchecked_t fscache_n_stores_again;
51593 +atomic_unchecked_t fscache_n_stores_nobufs;
51594 +atomic_unchecked_t fscache_n_stores_oom;
51595 +atomic_unchecked_t fscache_n_store_ops;
51596 +atomic_unchecked_t fscache_n_store_calls;
51597 +atomic_unchecked_t fscache_n_store_pages;
51598 +atomic_unchecked_t fscache_n_store_radix_deletes;
51599 +atomic_unchecked_t fscache_n_store_pages_over_limit;
51600
51601 -atomic_t fscache_n_store_vmscan_not_storing;
51602 -atomic_t fscache_n_store_vmscan_gone;
51603 -atomic_t fscache_n_store_vmscan_busy;
51604 -atomic_t fscache_n_store_vmscan_cancelled;
51605 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
51606 +atomic_unchecked_t fscache_n_store_vmscan_gone;
51607 +atomic_unchecked_t fscache_n_store_vmscan_busy;
51608 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
51609
51610 -atomic_t fscache_n_marks;
51611 -atomic_t fscache_n_uncaches;
51612 +atomic_unchecked_t fscache_n_marks;
51613 +atomic_unchecked_t fscache_n_uncaches;
51614
51615 -atomic_t fscache_n_acquires;
51616 -atomic_t fscache_n_acquires_null;
51617 -atomic_t fscache_n_acquires_no_cache;
51618 -atomic_t fscache_n_acquires_ok;
51619 -atomic_t fscache_n_acquires_nobufs;
51620 -atomic_t fscache_n_acquires_oom;
51621 +atomic_unchecked_t fscache_n_acquires;
51622 +atomic_unchecked_t fscache_n_acquires_null;
51623 +atomic_unchecked_t fscache_n_acquires_no_cache;
51624 +atomic_unchecked_t fscache_n_acquires_ok;
51625 +atomic_unchecked_t fscache_n_acquires_nobufs;
51626 +atomic_unchecked_t fscache_n_acquires_oom;
51627
51628 -atomic_t fscache_n_updates;
51629 -atomic_t fscache_n_updates_null;
51630 -atomic_t fscache_n_updates_run;
51631 +atomic_unchecked_t fscache_n_updates;
51632 +atomic_unchecked_t fscache_n_updates_null;
51633 +atomic_unchecked_t fscache_n_updates_run;
51634
51635 -atomic_t fscache_n_relinquishes;
51636 -atomic_t fscache_n_relinquishes_null;
51637 -atomic_t fscache_n_relinquishes_waitcrt;
51638 -atomic_t fscache_n_relinquishes_retire;
51639 +atomic_unchecked_t fscache_n_relinquishes;
51640 +atomic_unchecked_t fscache_n_relinquishes_null;
51641 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
51642 +atomic_unchecked_t fscache_n_relinquishes_retire;
51643
51644 -atomic_t fscache_n_cookie_index;
51645 -atomic_t fscache_n_cookie_data;
51646 -atomic_t fscache_n_cookie_special;
51647 +atomic_unchecked_t fscache_n_cookie_index;
51648 +atomic_unchecked_t fscache_n_cookie_data;
51649 +atomic_unchecked_t fscache_n_cookie_special;
51650
51651 -atomic_t fscache_n_object_alloc;
51652 -atomic_t fscache_n_object_no_alloc;
51653 -atomic_t fscache_n_object_lookups;
51654 -atomic_t fscache_n_object_lookups_negative;
51655 -atomic_t fscache_n_object_lookups_positive;
51656 -atomic_t fscache_n_object_lookups_timed_out;
51657 -atomic_t fscache_n_object_created;
51658 -atomic_t fscache_n_object_avail;
51659 -atomic_t fscache_n_object_dead;
51660 +atomic_unchecked_t fscache_n_object_alloc;
51661 +atomic_unchecked_t fscache_n_object_no_alloc;
51662 +atomic_unchecked_t fscache_n_object_lookups;
51663 +atomic_unchecked_t fscache_n_object_lookups_negative;
51664 +atomic_unchecked_t fscache_n_object_lookups_positive;
51665 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
51666 +atomic_unchecked_t fscache_n_object_created;
51667 +atomic_unchecked_t fscache_n_object_avail;
51668 +atomic_unchecked_t fscache_n_object_dead;
51669
51670 -atomic_t fscache_n_checkaux_none;
51671 -atomic_t fscache_n_checkaux_okay;
51672 -atomic_t fscache_n_checkaux_update;
51673 -atomic_t fscache_n_checkaux_obsolete;
51674 +atomic_unchecked_t fscache_n_checkaux_none;
51675 +atomic_unchecked_t fscache_n_checkaux_okay;
51676 +atomic_unchecked_t fscache_n_checkaux_update;
51677 +atomic_unchecked_t fscache_n_checkaux_obsolete;
51678
51679 atomic_t fscache_n_cop_alloc_object;
51680 atomic_t fscache_n_cop_lookup_object;
51681 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
51682 seq_puts(m, "FS-Cache statistics\n");
51683
51684 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
51685 - atomic_read(&fscache_n_cookie_index),
51686 - atomic_read(&fscache_n_cookie_data),
51687 - atomic_read(&fscache_n_cookie_special));
51688 + atomic_read_unchecked(&fscache_n_cookie_index),
51689 + atomic_read_unchecked(&fscache_n_cookie_data),
51690 + atomic_read_unchecked(&fscache_n_cookie_special));
51691
51692 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
51693 - atomic_read(&fscache_n_object_alloc),
51694 - atomic_read(&fscache_n_object_no_alloc),
51695 - atomic_read(&fscache_n_object_avail),
51696 - atomic_read(&fscache_n_object_dead));
51697 + atomic_read_unchecked(&fscache_n_object_alloc),
51698 + atomic_read_unchecked(&fscache_n_object_no_alloc),
51699 + atomic_read_unchecked(&fscache_n_object_avail),
51700 + atomic_read_unchecked(&fscache_n_object_dead));
51701 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
51702 - atomic_read(&fscache_n_checkaux_none),
51703 - atomic_read(&fscache_n_checkaux_okay),
51704 - atomic_read(&fscache_n_checkaux_update),
51705 - atomic_read(&fscache_n_checkaux_obsolete));
51706 + atomic_read_unchecked(&fscache_n_checkaux_none),
51707 + atomic_read_unchecked(&fscache_n_checkaux_okay),
51708 + atomic_read_unchecked(&fscache_n_checkaux_update),
51709 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
51710
51711 seq_printf(m, "Pages : mrk=%u unc=%u\n",
51712 - atomic_read(&fscache_n_marks),
51713 - atomic_read(&fscache_n_uncaches));
51714 + atomic_read_unchecked(&fscache_n_marks),
51715 + atomic_read_unchecked(&fscache_n_uncaches));
51716
51717 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
51718 " oom=%u\n",
51719 - atomic_read(&fscache_n_acquires),
51720 - atomic_read(&fscache_n_acquires_null),
51721 - atomic_read(&fscache_n_acquires_no_cache),
51722 - atomic_read(&fscache_n_acquires_ok),
51723 - atomic_read(&fscache_n_acquires_nobufs),
51724 - atomic_read(&fscache_n_acquires_oom));
51725 + atomic_read_unchecked(&fscache_n_acquires),
51726 + atomic_read_unchecked(&fscache_n_acquires_null),
51727 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
51728 + atomic_read_unchecked(&fscache_n_acquires_ok),
51729 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
51730 + atomic_read_unchecked(&fscache_n_acquires_oom));
51731
51732 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
51733 - atomic_read(&fscache_n_object_lookups),
51734 - atomic_read(&fscache_n_object_lookups_negative),
51735 - atomic_read(&fscache_n_object_lookups_positive),
51736 - atomic_read(&fscache_n_object_lookups_timed_out),
51737 - atomic_read(&fscache_n_object_created));
51738 + atomic_read_unchecked(&fscache_n_object_lookups),
51739 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
51740 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
51741 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
51742 + atomic_read_unchecked(&fscache_n_object_created));
51743
51744 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
51745 - atomic_read(&fscache_n_updates),
51746 - atomic_read(&fscache_n_updates_null),
51747 - atomic_read(&fscache_n_updates_run));
51748 + atomic_read_unchecked(&fscache_n_updates),
51749 + atomic_read_unchecked(&fscache_n_updates_null),
51750 + atomic_read_unchecked(&fscache_n_updates_run));
51751
51752 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
51753 - atomic_read(&fscache_n_relinquishes),
51754 - atomic_read(&fscache_n_relinquishes_null),
51755 - atomic_read(&fscache_n_relinquishes_waitcrt),
51756 - atomic_read(&fscache_n_relinquishes_retire));
51757 + atomic_read_unchecked(&fscache_n_relinquishes),
51758 + atomic_read_unchecked(&fscache_n_relinquishes_null),
51759 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
51760 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
51761
51762 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
51763 - atomic_read(&fscache_n_attr_changed),
51764 - atomic_read(&fscache_n_attr_changed_ok),
51765 - atomic_read(&fscache_n_attr_changed_nobufs),
51766 - atomic_read(&fscache_n_attr_changed_nomem),
51767 - atomic_read(&fscache_n_attr_changed_calls));
51768 + atomic_read_unchecked(&fscache_n_attr_changed),
51769 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
51770 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
51771 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
51772 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
51773
51774 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
51775 - atomic_read(&fscache_n_allocs),
51776 - atomic_read(&fscache_n_allocs_ok),
51777 - atomic_read(&fscache_n_allocs_wait),
51778 - atomic_read(&fscache_n_allocs_nobufs),
51779 - atomic_read(&fscache_n_allocs_intr));
51780 + atomic_read_unchecked(&fscache_n_allocs),
51781 + atomic_read_unchecked(&fscache_n_allocs_ok),
51782 + atomic_read_unchecked(&fscache_n_allocs_wait),
51783 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
51784 + atomic_read_unchecked(&fscache_n_allocs_intr));
51785 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
51786 - atomic_read(&fscache_n_alloc_ops),
51787 - atomic_read(&fscache_n_alloc_op_waits),
51788 - atomic_read(&fscache_n_allocs_object_dead));
51789 + atomic_read_unchecked(&fscache_n_alloc_ops),
51790 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
51791 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
51792
51793 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
51794 " int=%u oom=%u\n",
51795 - atomic_read(&fscache_n_retrievals),
51796 - atomic_read(&fscache_n_retrievals_ok),
51797 - atomic_read(&fscache_n_retrievals_wait),
51798 - atomic_read(&fscache_n_retrievals_nodata),
51799 - atomic_read(&fscache_n_retrievals_nobufs),
51800 - atomic_read(&fscache_n_retrievals_intr),
51801 - atomic_read(&fscache_n_retrievals_nomem));
51802 + atomic_read_unchecked(&fscache_n_retrievals),
51803 + atomic_read_unchecked(&fscache_n_retrievals_ok),
51804 + atomic_read_unchecked(&fscache_n_retrievals_wait),
51805 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
51806 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
51807 + atomic_read_unchecked(&fscache_n_retrievals_intr),
51808 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
51809 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
51810 - atomic_read(&fscache_n_retrieval_ops),
51811 - atomic_read(&fscache_n_retrieval_op_waits),
51812 - atomic_read(&fscache_n_retrievals_object_dead));
51813 + atomic_read_unchecked(&fscache_n_retrieval_ops),
51814 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
51815 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
51816
51817 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
51818 - atomic_read(&fscache_n_stores),
51819 - atomic_read(&fscache_n_stores_ok),
51820 - atomic_read(&fscache_n_stores_again),
51821 - atomic_read(&fscache_n_stores_nobufs),
51822 - atomic_read(&fscache_n_stores_oom));
51823 + atomic_read_unchecked(&fscache_n_stores),
51824 + atomic_read_unchecked(&fscache_n_stores_ok),
51825 + atomic_read_unchecked(&fscache_n_stores_again),
51826 + atomic_read_unchecked(&fscache_n_stores_nobufs),
51827 + atomic_read_unchecked(&fscache_n_stores_oom));
51828 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
51829 - atomic_read(&fscache_n_store_ops),
51830 - atomic_read(&fscache_n_store_calls),
51831 - atomic_read(&fscache_n_store_pages),
51832 - atomic_read(&fscache_n_store_radix_deletes),
51833 - atomic_read(&fscache_n_store_pages_over_limit));
51834 + atomic_read_unchecked(&fscache_n_store_ops),
51835 + atomic_read_unchecked(&fscache_n_store_calls),
51836 + atomic_read_unchecked(&fscache_n_store_pages),
51837 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
51838 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
51839
51840 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
51841 - atomic_read(&fscache_n_store_vmscan_not_storing),
51842 - atomic_read(&fscache_n_store_vmscan_gone),
51843 - atomic_read(&fscache_n_store_vmscan_busy),
51844 - atomic_read(&fscache_n_store_vmscan_cancelled));
51845 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
51846 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
51847 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
51848 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
51849
51850 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
51851 - atomic_read(&fscache_n_op_pend),
51852 - atomic_read(&fscache_n_op_run),
51853 - atomic_read(&fscache_n_op_enqueue),
51854 - atomic_read(&fscache_n_op_cancelled),
51855 - atomic_read(&fscache_n_op_rejected));
51856 + atomic_read_unchecked(&fscache_n_op_pend),
51857 + atomic_read_unchecked(&fscache_n_op_run),
51858 + atomic_read_unchecked(&fscache_n_op_enqueue),
51859 + atomic_read_unchecked(&fscache_n_op_cancelled),
51860 + atomic_read_unchecked(&fscache_n_op_rejected));
51861 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
51862 - atomic_read(&fscache_n_op_deferred_release),
51863 - atomic_read(&fscache_n_op_release),
51864 - atomic_read(&fscache_n_op_gc));
51865 + atomic_read_unchecked(&fscache_n_op_deferred_release),
51866 + atomic_read_unchecked(&fscache_n_op_release),
51867 + atomic_read_unchecked(&fscache_n_op_gc));
51868
51869 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
51870 atomic_read(&fscache_n_cop_alloc_object),
51871 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
51872 index de792dc..448b532 100644
51873 --- a/fs/fuse/cuse.c
51874 +++ b/fs/fuse/cuse.c
51875 @@ -576,10 +576,12 @@ static int __init cuse_init(void)
51876 INIT_LIST_HEAD(&cuse_conntbl[i]);
51877
51878 /* inherit and extend fuse_dev_operations */
51879 - cuse_channel_fops = fuse_dev_operations;
51880 - cuse_channel_fops.owner = THIS_MODULE;
51881 - cuse_channel_fops.open = cuse_channel_open;
51882 - cuse_channel_fops.release = cuse_channel_release;
51883 + pax_open_kernel();
51884 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
51885 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
51886 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
51887 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
51888 + pax_close_kernel();
51889
51890 cuse_class = class_create(THIS_MODULE, "cuse");
51891 if (IS_ERR(cuse_class))
51892 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
51893 index 1facb39..7f48557 100644
51894 --- a/fs/fuse/dev.c
51895 +++ b/fs/fuse/dev.c
51896 @@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
51897 {
51898 struct fuse_notify_inval_entry_out outarg;
51899 int err = -EINVAL;
51900 - char buf[FUSE_NAME_MAX+1];
51901 + char *buf = NULL;
51902 struct qstr name;
51903
51904 if (size < sizeof(outarg))
51905 @@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
51906 if (outarg.namelen > FUSE_NAME_MAX)
51907 goto err;
51908
51909 + err = -ENOMEM;
51910 + buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
51911 + if (!buf)
51912 + goto err;
51913 +
51914 err = -EINVAL;
51915 if (size != sizeof(outarg) + outarg.namelen + 1)
51916 goto err;
51917 @@ -914,17 +919,15 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
51918
51919 down_read(&fc->killsb);
51920 err = -ENOENT;
51921 - if (!fc->sb)
51922 - goto err_unlock;
51923 -
51924 - err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
51925 -
51926 -err_unlock:
51927 + if (fc->sb)
51928 + err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
51929 up_read(&fc->killsb);
51930 + kfree(buf);
51931 return err;
51932
51933 err:
51934 fuse_copy_finish(cs);
51935 + kfree(buf);
51936 return err;
51937 }
51938
51939 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
51940 index 4787ae6..73efff7 100644
51941 --- a/fs/fuse/dir.c
51942 +++ b/fs/fuse/dir.c
51943 @@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *dentry)
51944 return link;
51945 }
51946
51947 -static void free_link(char *link)
51948 +static void free_link(const char *link)
51949 {
51950 if (!IS_ERR(link))
51951 free_page((unsigned long) link);
51952 diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
51953 index 247436c..e650ccb 100644
51954 --- a/fs/gfs2/ops_inode.c
51955 +++ b/fs/gfs2/ops_inode.c
51956 @@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
51957 unsigned int x;
51958 int error;
51959
51960 + pax_track_stack();
51961 +
51962 if (ndentry->d_inode) {
51963 nip = GFS2_I(ndentry->d_inode);
51964 if (ip == nip)
51965 diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
51966 index 4463297..4fed53b 100644
51967 --- a/fs/gfs2/sys.c
51968 +++ b/fs/gfs2/sys.c
51969 @@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr,
51970 return a->store ? a->store(sdp, buf, len) : len;
51971 }
51972
51973 -static struct sysfs_ops gfs2_attr_ops = {
51974 +static const struct sysfs_ops gfs2_attr_ops = {
51975 .show = gfs2_attr_show,
51976 .store = gfs2_attr_store,
51977 };
51978 @@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
51979 return 0;
51980 }
51981
51982 -static struct kset_uevent_ops gfs2_uevent_ops = {
51983 +static const struct kset_uevent_ops gfs2_uevent_ops = {
51984 .uevent = gfs2_uevent,
51985 };
51986
51987 diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
51988 index f6874ac..7cd98a8 100644
51989 --- a/fs/hfsplus/catalog.c
51990 +++ b/fs/hfsplus/catalog.c
51991 @@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
51992 int err;
51993 u16 type;
51994
51995 + pax_track_stack();
51996 +
51997 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
51998 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
51999 if (err)
52000 @@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct ino
52001 int entry_size;
52002 int err;
52003
52004 + pax_track_stack();
52005 +
52006 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
52007 sb = dir->i_sb;
52008 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
52009 @@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
52010 int entry_size, type;
52011 int err = 0;
52012
52013 + pax_track_stack();
52014 +
52015 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
52016 dst_dir->i_ino, dst_name->name);
52017 sb = src_dir->i_sb;
52018 diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
52019 index 5f40236..dac3421 100644
52020 --- a/fs/hfsplus/dir.c
52021 +++ b/fs/hfsplus/dir.c
52022 @@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
52023 struct hfsplus_readdir_data *rd;
52024 u16 type;
52025
52026 + pax_track_stack();
52027 +
52028 if (filp->f_pos >= inode->i_size)
52029 return 0;
52030
52031 diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
52032 index 1bcf597..905a251 100644
52033 --- a/fs/hfsplus/inode.c
52034 +++ b/fs/hfsplus/inode.c
52035 @@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
52036 int res = 0;
52037 u16 type;
52038
52039 + pax_track_stack();
52040 +
52041 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
52042
52043 HFSPLUS_I(inode).dev = 0;
52044 @@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
52045 struct hfs_find_data fd;
52046 hfsplus_cat_entry entry;
52047
52048 + pax_track_stack();
52049 +
52050 if (HFSPLUS_IS_RSRC(inode))
52051 main_inode = HFSPLUS_I(inode).rsrc_inode;
52052
52053 diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
52054 index f457d2c..7ef4ad5 100644
52055 --- a/fs/hfsplus/ioctl.c
52056 +++ b/fs/hfsplus/ioctl.c
52057 @@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
52058 struct hfsplus_cat_file *file;
52059 int res;
52060
52061 + pax_track_stack();
52062 +
52063 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
52064 return -EOPNOTSUPP;
52065
52066 @@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
52067 struct hfsplus_cat_file *file;
52068 ssize_t res = 0;
52069
52070 + pax_track_stack();
52071 +
52072 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
52073 return -EOPNOTSUPP;
52074
52075 diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
52076 index 43022f3..7298079 100644
52077 --- a/fs/hfsplus/super.c
52078 +++ b/fs/hfsplus/super.c
52079 @@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
52080 struct nls_table *nls = NULL;
52081 int err = -EINVAL;
52082
52083 + pax_track_stack();
52084 +
52085 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
52086 if (!sbi)
52087 return -ENOMEM;
52088 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
52089 index 87a1258..5694d91 100644
52090 --- a/fs/hugetlbfs/inode.c
52091 +++ b/fs/hugetlbfs/inode.c
52092 @@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs_fs_type = {
52093 .kill_sb = kill_litter_super,
52094 };
52095
52096 -static struct vfsmount *hugetlbfs_vfsmount;
52097 +struct vfsmount *hugetlbfs_vfsmount;
52098
52099 static int can_do_hugetlb_shm(void)
52100 {
52101 diff --git a/fs/ioctl.c b/fs/ioctl.c
52102 index 6c75110..19d2c3c 100644
52103 --- a/fs/ioctl.c
52104 +++ b/fs/ioctl.c
52105 @@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
52106 u64 phys, u64 len, u32 flags)
52107 {
52108 struct fiemap_extent extent;
52109 - struct fiemap_extent *dest = fieinfo->fi_extents_start;
52110 + struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
52111
52112 /* only count the extents */
52113 if (fieinfo->fi_extents_max == 0) {
52114 @@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
52115
52116 fieinfo.fi_flags = fiemap.fm_flags;
52117 fieinfo.fi_extents_max = fiemap.fm_extent_count;
52118 - fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
52119 + fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
52120
52121 if (fiemap.fm_extent_count != 0 &&
52122 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
52123 @@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
52124 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
52125 fiemap.fm_flags = fieinfo.fi_flags;
52126 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
52127 - if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
52128 + if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
52129 error = -EFAULT;
52130
52131 return error;
52132 diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
52133 index b0435dd..81ee0be 100644
52134 --- a/fs/jbd/checkpoint.c
52135 +++ b/fs/jbd/checkpoint.c
52136 @@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal)
52137 tid_t this_tid;
52138 int result;
52139
52140 + pax_track_stack();
52141 +
52142 jbd_debug(1, "Start checkpoint\n");
52143
52144 /*
52145 diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
52146 index 546d153..736896c 100644
52147 --- a/fs/jffs2/compr_rtime.c
52148 +++ b/fs/jffs2/compr_rtime.c
52149 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned char *data_in,
52150 int outpos = 0;
52151 int pos=0;
52152
52153 + pax_track_stack();
52154 +
52155 memset(positions,0,sizeof(positions));
52156
52157 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
52158 @@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
52159 int outpos = 0;
52160 int pos=0;
52161
52162 + pax_track_stack();
52163 +
52164 memset(positions,0,sizeof(positions));
52165
52166 while (outpos<destlen) {
52167 diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
52168 index 170d289..3254b98 100644
52169 --- a/fs/jffs2/compr_rubin.c
52170 +++ b/fs/jffs2/compr_rubin.c
52171 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
52172 int ret;
52173 uint32_t mysrclen, mydstlen;
52174
52175 + pax_track_stack();
52176 +
52177 mysrclen = *sourcelen;
52178 mydstlen = *dstlen - 8;
52179
52180 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
52181 index b47679b..00d65d3 100644
52182 --- a/fs/jffs2/erase.c
52183 +++ b/fs/jffs2/erase.c
52184 @@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
52185 struct jffs2_unknown_node marker = {
52186 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
52187 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
52188 - .totlen = cpu_to_je32(c->cleanmarker_size)
52189 + .totlen = cpu_to_je32(c->cleanmarker_size),
52190 + .hdr_crc = cpu_to_je32(0)
52191 };
52192
52193 jffs2_prealloc_raw_node_refs(c, jeb, 1);
52194 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
52195 index 5ef7bac..4fd1e3c 100644
52196 --- a/fs/jffs2/wbuf.c
52197 +++ b/fs/jffs2/wbuf.c
52198 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
52199 {
52200 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
52201 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
52202 - .totlen = constant_cpu_to_je32(8)
52203 + .totlen = constant_cpu_to_je32(8),
52204 + .hdr_crc = constant_cpu_to_je32(0)
52205 };
52206
52207 /*
52208 diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
52209 index 082e844..52012a1 100644
52210 --- a/fs/jffs2/xattr.c
52211 +++ b/fs/jffs2/xattr.c
52212 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
52213
52214 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
52215
52216 + pax_track_stack();
52217 +
52218 /* Phase.1 : Merge same xref */
52219 for (i=0; i < XREF_TMPHASH_SIZE; i++)
52220 xref_tmphash[i] = NULL;
52221 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
52222 index 2234c73..f6e6e6b 100644
52223 --- a/fs/jfs/super.c
52224 +++ b/fs/jfs/super.c
52225 @@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
52226
52227 jfs_inode_cachep =
52228 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
52229 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
52230 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
52231 init_once);
52232 if (jfs_inode_cachep == NULL)
52233 return -ENOMEM;
52234 diff --git a/fs/libfs.c b/fs/libfs.c
52235 index ba36e93..3153fce 100644
52236 --- a/fs/libfs.c
52237 +++ b/fs/libfs.c
52238 @@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
52239
52240 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
52241 struct dentry *next;
52242 + char d_name[sizeof(next->d_iname)];
52243 + const unsigned char *name;
52244 +
52245 next = list_entry(p, struct dentry, d_u.d_child);
52246 if (d_unhashed(next) || !next->d_inode)
52247 continue;
52248
52249 spin_unlock(&dcache_lock);
52250 - if (filldir(dirent, next->d_name.name,
52251 + name = next->d_name.name;
52252 + if (name == next->d_iname) {
52253 + memcpy(d_name, name, next->d_name.len);
52254 + name = d_name;
52255 + }
52256 + if (filldir(dirent, name,
52257 next->d_name.len, filp->f_pos,
52258 next->d_inode->i_ino,
52259 dt_type(next->d_inode)) < 0)
52260 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
52261 index c325a83..d15b07b 100644
52262 --- a/fs/lockd/clntproc.c
52263 +++ b/fs/lockd/clntproc.c
52264 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
52265 /*
52266 * Cookie counter for NLM requests
52267 */
52268 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
52269 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
52270
52271 void nlmclnt_next_cookie(struct nlm_cookie *c)
52272 {
52273 - u32 cookie = atomic_inc_return(&nlm_cookie);
52274 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
52275
52276 memcpy(c->data, &cookie, 4);
52277 c->len=4;
52278 @@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
52279 struct nlm_rqst reqst, *req;
52280 int status;
52281
52282 + pax_track_stack();
52283 +
52284 req = &reqst;
52285 memset(req, 0, sizeof(*req));
52286 locks_init_lock(&req->a_args.lock.fl);
52287 diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
52288 index 1a54ae1..6a16c27 100644
52289 --- a/fs/lockd/svc.c
52290 +++ b/fs/lockd/svc.c
52291 @@ -43,7 +43,7 @@
52292
52293 static struct svc_program nlmsvc_program;
52294
52295 -struct nlmsvc_binding * nlmsvc_ops;
52296 +const struct nlmsvc_binding * nlmsvc_ops;
52297 EXPORT_SYMBOL_GPL(nlmsvc_ops);
52298
52299 static DEFINE_MUTEX(nlmsvc_mutex);
52300 diff --git a/fs/locks.c b/fs/locks.c
52301 index a8794f2..4041e55 100644
52302 --- a/fs/locks.c
52303 +++ b/fs/locks.c
52304 @@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
52305
52306 static struct kmem_cache *filelock_cache __read_mostly;
52307
52308 +static void locks_init_lock_always(struct file_lock *fl)
52309 +{
52310 + fl->fl_next = NULL;
52311 + fl->fl_fasync = NULL;
52312 + fl->fl_owner = NULL;
52313 + fl->fl_pid = 0;
52314 + fl->fl_nspid = NULL;
52315 + fl->fl_file = NULL;
52316 + fl->fl_flags = 0;
52317 + fl->fl_type = 0;
52318 + fl->fl_start = fl->fl_end = 0;
52319 +}
52320 +
52321 /* Allocate an empty lock structure. */
52322 static struct file_lock *locks_alloc_lock(void)
52323 {
52324 - return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
52325 + struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
52326 +
52327 + if (fl)
52328 + locks_init_lock_always(fl);
52329 +
52330 + return fl;
52331 }
52332
52333 void locks_release_private(struct file_lock *fl)
52334 @@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *fl)
52335 INIT_LIST_HEAD(&fl->fl_link);
52336 INIT_LIST_HEAD(&fl->fl_block);
52337 init_waitqueue_head(&fl->fl_wait);
52338 - fl->fl_next = NULL;
52339 - fl->fl_fasync = NULL;
52340 - fl->fl_owner = NULL;
52341 - fl->fl_pid = 0;
52342 - fl->fl_nspid = NULL;
52343 - fl->fl_file = NULL;
52344 - fl->fl_flags = 0;
52345 - fl->fl_type = 0;
52346 - fl->fl_start = fl->fl_end = 0;
52347 fl->fl_ops = NULL;
52348 fl->fl_lmops = NULL;
52349 + locks_init_lock_always(fl);
52350 }
52351
52352 EXPORT_SYMBOL(locks_init_lock);
52353 @@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *filp)
52354 return;
52355
52356 if (filp->f_op && filp->f_op->flock) {
52357 - struct file_lock fl = {
52358 + struct file_lock flock = {
52359 .fl_pid = current->tgid,
52360 .fl_file = filp,
52361 .fl_flags = FL_FLOCK,
52362 .fl_type = F_UNLCK,
52363 .fl_end = OFFSET_MAX,
52364 };
52365 - filp->f_op->flock(filp, F_SETLKW, &fl);
52366 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
52367 - fl.fl_ops->fl_release_private(&fl);
52368 + filp->f_op->flock(filp, F_SETLKW, &flock);
52369 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
52370 + flock.fl_ops->fl_release_private(&flock);
52371 }
52372
52373 lock_kernel();
52374 diff --git a/fs/mbcache.c b/fs/mbcache.c
52375 index ec88ff3..b843a82 100644
52376 --- a/fs/mbcache.c
52377 +++ b/fs/mbcache.c
52378 @@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct mb_cache_op *cache_op,
52379 if (!cache)
52380 goto fail;
52381 cache->c_name = name;
52382 - cache->c_op.free = NULL;
52383 + *(void **)&cache->c_op.free = NULL;
52384 if (cache_op)
52385 - cache->c_op.free = cache_op->free;
52386 + *(void **)&cache->c_op.free = cache_op->free;
52387 atomic_set(&cache->c_entry_count, 0);
52388 cache->c_bucket_bits = bucket_bits;
52389 #ifdef MB_CACHE_INDEXES_COUNT
52390 diff --git a/fs/namei.c b/fs/namei.c
52391 index b0afbd4..8d065a1 100644
52392 --- a/fs/namei.c
52393 +++ b/fs/namei.c
52394 @@ -224,6 +224,14 @@ int generic_permission(struct inode *inode, int mask,
52395 return ret;
52396
52397 /*
52398 + * Searching includes executable on directories, else just read.
52399 + */
52400 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
52401 + if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
52402 + if (capable(CAP_DAC_READ_SEARCH))
52403 + return 0;
52404 +
52405 + /*
52406 * Read/write DACs are always overridable.
52407 * Executable DACs are overridable if at least one exec bit is set.
52408 */
52409 @@ -231,14 +239,6 @@ int generic_permission(struct inode *inode, int mask,
52410 if (capable(CAP_DAC_OVERRIDE))
52411 return 0;
52412
52413 - /*
52414 - * Searching includes executable on directories, else just read.
52415 - */
52416 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
52417 - if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
52418 - if (capable(CAP_DAC_READ_SEARCH))
52419 - return 0;
52420 -
52421 return -EACCES;
52422 }
52423
52424 @@ -458,7 +458,8 @@ static int exec_permission_lite(struct inode *inode)
52425 if (!ret)
52426 goto ok;
52427
52428 - if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
52429 + if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
52430 + capable(CAP_DAC_OVERRIDE))
52431 goto ok;
52432
52433 return ret;
52434 @@ -638,7 +639,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
52435 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
52436 error = PTR_ERR(cookie);
52437 if (!IS_ERR(cookie)) {
52438 - char *s = nd_get_link(nd);
52439 + const char *s = nd_get_link(nd);
52440 error = 0;
52441 if (s)
52442 error = __vfs_follow_link(nd, s);
52443 @@ -669,6 +670,13 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
52444 err = security_inode_follow_link(path->dentry, nd);
52445 if (err)
52446 goto loop;
52447 +
52448 + if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
52449 + path->dentry->d_inode, path->dentry, nd->path.mnt)) {
52450 + err = -EACCES;
52451 + goto loop;
52452 + }
52453 +
52454 current->link_count++;
52455 current->total_link_count++;
52456 nd->depth++;
52457 @@ -1016,11 +1024,19 @@ return_reval:
52458 break;
52459 }
52460 return_base:
52461 + if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT)) &&
52462 + !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
52463 + path_put(&nd->path);
52464 + return -ENOENT;
52465 + }
52466 return 0;
52467 out_dput:
52468 path_put_conditional(&next, nd);
52469 break;
52470 }
52471 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
52472 + err = -ENOENT;
52473 +
52474 path_put(&nd->path);
52475 return_err:
52476 return err;
52477 @@ -1091,13 +1107,20 @@ static int do_path_lookup(int dfd, const char *name,
52478 int retval = path_init(dfd, name, flags, nd);
52479 if (!retval)
52480 retval = path_walk(name, nd);
52481 - if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
52482 - nd->path.dentry->d_inode))
52483 - audit_inode(name, nd->path.dentry);
52484 +
52485 + if (likely(!retval)) {
52486 + if (nd->path.dentry && nd->path.dentry->d_inode) {
52487 + if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
52488 + retval = -ENOENT;
52489 + if (!audit_dummy_context())
52490 + audit_inode(name, nd->path.dentry);
52491 + }
52492 + }
52493 if (nd->root.mnt) {
52494 path_put(&nd->root);
52495 nd->root.mnt = NULL;
52496 }
52497 +
52498 return retval;
52499 }
52500
52501 @@ -1576,6 +1599,20 @@ int may_open(struct path *path, int acc_mode, int flag)
52502 if (error)
52503 goto err_out;
52504
52505 +
52506 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
52507 + error = -EPERM;
52508 + goto err_out;
52509 + }
52510 + if (gr_handle_rawio(inode)) {
52511 + error = -EPERM;
52512 + goto err_out;
52513 + }
52514 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
52515 + error = -EACCES;
52516 + goto err_out;
52517 + }
52518 +
52519 if (flag & O_TRUNC) {
52520 error = get_write_access(inode);
52521 if (error)
52522 @@ -1620,6 +1657,17 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
52523 {
52524 int error;
52525 struct dentry *dir = nd->path.dentry;
52526 + int acc_mode = ACC_MODE(flag);
52527 +
52528 + if (flag & O_TRUNC)
52529 + acc_mode |= MAY_WRITE;
52530 + if (flag & O_APPEND)
52531 + acc_mode |= MAY_APPEND;
52532 +
52533 + if (!gr_acl_handle_creat(path->dentry, dir, nd->path.mnt, flag, acc_mode, mode)) {
52534 + error = -EACCES;
52535 + goto out_unlock;
52536 + }
52537
52538 if (!IS_POSIXACL(dir->d_inode))
52539 mode &= ~current_umask();
52540 @@ -1627,6 +1675,8 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
52541 if (error)
52542 goto out_unlock;
52543 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
52544 + if (!error)
52545 + gr_handle_create(path->dentry, nd->path.mnt);
52546 out_unlock:
52547 mutex_unlock(&dir->d_inode->i_mutex);
52548 dput(nd->path.dentry);
52549 @@ -1709,6 +1759,22 @@ struct file *do_filp_open(int dfd, const char *pathname,
52550 &nd, flag);
52551 if (error)
52552 return ERR_PTR(error);
52553 +
52554 + if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
52555 + error = -EPERM;
52556 + goto exit;
52557 + }
52558 +
52559 + if (gr_handle_rawio(nd.path.dentry->d_inode)) {
52560 + error = -EPERM;
52561 + goto exit;
52562 + }
52563 +
52564 + if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, acc_mode)) {
52565 + error = -EACCES;
52566 + goto exit;
52567 + }
52568 +
52569 goto ok;
52570 }
52571
52572 @@ -1795,6 +1861,19 @@ do_last:
52573 /*
52574 * It already exists.
52575 */
52576 +
52577 + if (!gr_acl_handle_hidden_file(path.dentry, path.mnt)) {
52578 + error = -ENOENT;
52579 + goto exit_mutex_unlock;
52580 + }
52581 +
52582 + /* only check if O_CREAT is specified, all other checks need
52583 + to go into may_open */
52584 + if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
52585 + error = -EACCES;
52586 + goto exit_mutex_unlock;
52587 + }
52588 +
52589 mutex_unlock(&dir->d_inode->i_mutex);
52590 audit_inode(pathname, path.dentry);
52591
52592 @@ -1887,6 +1966,13 @@ do_link:
52593 error = security_inode_follow_link(path.dentry, &nd);
52594 if (error)
52595 goto exit_dput;
52596 +
52597 + if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
52598 + path.dentry, nd.path.mnt)) {
52599 + error = -EACCES;
52600 + goto exit_dput;
52601 + }
52602 +
52603 error = __do_follow_link(&path, &nd);
52604 if (error) {
52605 /* Does someone understand code flow here? Or it is only
52606 @@ -1984,6 +2070,10 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir)
52607 }
52608 return dentry;
52609 eexist:
52610 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
52611 + dput(dentry);
52612 + return ERR_PTR(-ENOENT);
52613 + }
52614 dput(dentry);
52615 dentry = ERR_PTR(-EEXIST);
52616 fail:
52617 @@ -2061,6 +2151,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
52618 error = may_mknod(mode);
52619 if (error)
52620 goto out_dput;
52621 +
52622 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
52623 + error = -EPERM;
52624 + goto out_dput;
52625 + }
52626 +
52627 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
52628 + error = -EACCES;
52629 + goto out_dput;
52630 + }
52631 +
52632 error = mnt_want_write(nd.path.mnt);
52633 if (error)
52634 goto out_dput;
52635 @@ -2081,6 +2182,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
52636 }
52637 out_drop_write:
52638 mnt_drop_write(nd.path.mnt);
52639 +
52640 + if (!error)
52641 + gr_handle_create(dentry, nd.path.mnt);
52642 out_dput:
52643 dput(dentry);
52644 out_unlock:
52645 @@ -2134,6 +2238,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
52646 if (IS_ERR(dentry))
52647 goto out_unlock;
52648
52649 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
52650 + error = -EACCES;
52651 + goto out_dput;
52652 + }
52653 +
52654 if (!IS_POSIXACL(nd.path.dentry->d_inode))
52655 mode &= ~current_umask();
52656 error = mnt_want_write(nd.path.mnt);
52657 @@ -2145,6 +2254,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
52658 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
52659 out_drop_write:
52660 mnt_drop_write(nd.path.mnt);
52661 +
52662 + if (!error)
52663 + gr_handle_create(dentry, nd.path.mnt);
52664 +
52665 out_dput:
52666 dput(dentry);
52667 out_unlock:
52668 @@ -2226,6 +2339,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
52669 char * name;
52670 struct dentry *dentry;
52671 struct nameidata nd;
52672 + ino_t saved_ino = 0;
52673 + dev_t saved_dev = 0;
52674
52675 error = user_path_parent(dfd, pathname, &nd, &name);
52676 if (error)
52677 @@ -2250,6 +2365,17 @@ static long do_rmdir(int dfd, const char __user *pathname)
52678 error = PTR_ERR(dentry);
52679 if (IS_ERR(dentry))
52680 goto exit2;
52681 +
52682 + if (dentry->d_inode != NULL) {
52683 + saved_ino = dentry->d_inode->i_ino;
52684 + saved_dev = gr_get_dev_from_dentry(dentry);
52685 +
52686 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
52687 + error = -EACCES;
52688 + goto exit3;
52689 + }
52690 + }
52691 +
52692 error = mnt_want_write(nd.path.mnt);
52693 if (error)
52694 goto exit3;
52695 @@ -2257,6 +2383,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
52696 if (error)
52697 goto exit4;
52698 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
52699 + if (!error && (saved_dev || saved_ino))
52700 + gr_handle_delete(saved_ino, saved_dev);
52701 exit4:
52702 mnt_drop_write(nd.path.mnt);
52703 exit3:
52704 @@ -2318,6 +2446,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
52705 struct dentry *dentry;
52706 struct nameidata nd;
52707 struct inode *inode = NULL;
52708 + ino_t saved_ino = 0;
52709 + dev_t saved_dev = 0;
52710
52711 error = user_path_parent(dfd, pathname, &nd, &name);
52712 if (error)
52713 @@ -2337,8 +2467,19 @@ static long do_unlinkat(int dfd, const char __user *pathname)
52714 if (nd.last.name[nd.last.len])
52715 goto slashes;
52716 inode = dentry->d_inode;
52717 - if (inode)
52718 + if (inode) {
52719 + if (inode->i_nlink <= 1) {
52720 + saved_ino = inode->i_ino;
52721 + saved_dev = gr_get_dev_from_dentry(dentry);
52722 + }
52723 +
52724 atomic_inc(&inode->i_count);
52725 +
52726 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
52727 + error = -EACCES;
52728 + goto exit2;
52729 + }
52730 + }
52731 error = mnt_want_write(nd.path.mnt);
52732 if (error)
52733 goto exit2;
52734 @@ -2346,6 +2487,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
52735 if (error)
52736 goto exit3;
52737 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
52738 + if (!error && (saved_ino || saved_dev))
52739 + gr_handle_delete(saved_ino, saved_dev);
52740 exit3:
52741 mnt_drop_write(nd.path.mnt);
52742 exit2:
52743 @@ -2424,6 +2567,11 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
52744 if (IS_ERR(dentry))
52745 goto out_unlock;
52746
52747 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
52748 + error = -EACCES;
52749 + goto out_dput;
52750 + }
52751 +
52752 error = mnt_want_write(nd.path.mnt);
52753 if (error)
52754 goto out_dput;
52755 @@ -2431,6 +2579,8 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
52756 if (error)
52757 goto out_drop_write;
52758 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
52759 + if (!error)
52760 + gr_handle_create(dentry, nd.path.mnt);
52761 out_drop_write:
52762 mnt_drop_write(nd.path.mnt);
52763 out_dput:
52764 @@ -2524,6 +2674,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
52765 error = PTR_ERR(new_dentry);
52766 if (IS_ERR(new_dentry))
52767 goto out_unlock;
52768 +
52769 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
52770 + old_path.dentry->d_inode,
52771 + old_path.dentry->d_inode->i_mode, to)) {
52772 + error = -EACCES;
52773 + goto out_dput;
52774 + }
52775 +
52776 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
52777 + old_path.dentry, old_path.mnt, to)) {
52778 + error = -EACCES;
52779 + goto out_dput;
52780 + }
52781 +
52782 error = mnt_want_write(nd.path.mnt);
52783 if (error)
52784 goto out_dput;
52785 @@ -2531,6 +2695,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
52786 if (error)
52787 goto out_drop_write;
52788 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
52789 + if (!error)
52790 + gr_handle_create(new_dentry, nd.path.mnt);
52791 out_drop_write:
52792 mnt_drop_write(nd.path.mnt);
52793 out_dput:
52794 @@ -2708,6 +2874,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
52795 char *to;
52796 int error;
52797
52798 + pax_track_stack();
52799 +
52800 error = user_path_parent(olddfd, oldname, &oldnd, &from);
52801 if (error)
52802 goto exit;
52803 @@ -2764,6 +2932,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
52804 if (new_dentry == trap)
52805 goto exit5;
52806
52807 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
52808 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
52809 + to);
52810 + if (error)
52811 + goto exit5;
52812 +
52813 error = mnt_want_write(oldnd.path.mnt);
52814 if (error)
52815 goto exit5;
52816 @@ -2773,6 +2947,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
52817 goto exit6;
52818 error = vfs_rename(old_dir->d_inode, old_dentry,
52819 new_dir->d_inode, new_dentry);
52820 + if (!error)
52821 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
52822 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
52823 exit6:
52824 mnt_drop_write(oldnd.path.mnt);
52825 exit5:
52826 @@ -2798,6 +2975,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
52827
52828 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
52829 {
52830 + char tmpbuf[64];
52831 + const char *newlink;
52832 int len;
52833
52834 len = PTR_ERR(link);
52835 @@ -2807,7 +2986,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
52836 len = strlen(link);
52837 if (len > (unsigned) buflen)
52838 len = buflen;
52839 - if (copy_to_user(buffer, link, len))
52840 +
52841 + if (len < sizeof(tmpbuf)) {
52842 + memcpy(tmpbuf, link, len);
52843 + newlink = tmpbuf;
52844 + } else
52845 + newlink = link;
52846 +
52847 + if (copy_to_user(buffer, newlink, len))
52848 len = -EFAULT;
52849 out:
52850 return len;
52851 diff --git a/fs/namespace.c b/fs/namespace.c
52852 index 2beb0fb..11a95a5 100644
52853 --- a/fs/namespace.c
52854 +++ b/fs/namespace.c
52855 @@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
52856 if (!(sb->s_flags & MS_RDONLY))
52857 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
52858 up_write(&sb->s_umount);
52859 +
52860 + gr_log_remount(mnt->mnt_devname, retval);
52861 +
52862 return retval;
52863 }
52864
52865 @@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
52866 security_sb_umount_busy(mnt);
52867 up_write(&namespace_sem);
52868 release_mounts(&umount_list);
52869 +
52870 + gr_log_unmount(mnt->mnt_devname, retval);
52871 +
52872 return retval;
52873 }
52874
52875 @@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
52876 if (retval)
52877 goto dput_out;
52878
52879 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
52880 + retval = -EPERM;
52881 + goto dput_out;
52882 + }
52883 +
52884 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
52885 + retval = -EPERM;
52886 + goto dput_out;
52887 + }
52888 +
52889 if (flags & MS_REMOUNT)
52890 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
52891 data_page);
52892 @@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
52893 dev_name, data_page);
52894 dput_out:
52895 path_put(&path);
52896 +
52897 + gr_log_mount(dev_name, dir_name, retval);
52898 +
52899 return retval;
52900 }
52901
52902 @@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
52903 goto out1;
52904 }
52905
52906 + if (gr_handle_chroot_pivot()) {
52907 + error = -EPERM;
52908 + path_put(&old);
52909 + goto out1;
52910 + }
52911 +
52912 read_lock(&current->fs->lock);
52913 root = current->fs->root;
52914 path_get(&current->fs->root);
52915 diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
52916 index b8b5b30..2bd9ccb 100644
52917 --- a/fs/ncpfs/dir.c
52918 +++ b/fs/ncpfs/dir.c
52919 @@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *dentry)
52920 int res, val = 0, len;
52921 __u8 __name[NCP_MAXPATHLEN + 1];
52922
52923 + pax_track_stack();
52924 +
52925 parent = dget_parent(dentry);
52926 dir = parent->d_inode;
52927
52928 @@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
52929 int error, res, len;
52930 __u8 __name[NCP_MAXPATHLEN + 1];
52931
52932 + pax_track_stack();
52933 +
52934 lock_kernel();
52935 error = -EIO;
52936 if (!ncp_conn_valid(server))
52937 @@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
52938 int error, result, len;
52939 int opmode;
52940 __u8 __name[NCP_MAXPATHLEN + 1];
52941 -
52942 +
52943 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
52944 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
52945
52946 + pax_track_stack();
52947 +
52948 error = -EIO;
52949 lock_kernel();
52950 if (!ncp_conn_valid(server))
52951 @@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
52952 int error, len;
52953 __u8 __name[NCP_MAXPATHLEN + 1];
52954
52955 + pax_track_stack();
52956 +
52957 DPRINTK("ncp_mkdir: making %s/%s\n",
52958 dentry->d_parent->d_name.name, dentry->d_name.name);
52959
52960 @@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
52961 if (!ncp_conn_valid(server))
52962 goto out;
52963
52964 + pax_track_stack();
52965 +
52966 ncp_age_dentry(server, dentry);
52967 len = sizeof(__name);
52968 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
52969 @@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
52970 int old_len, new_len;
52971 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
52972
52973 + pax_track_stack();
52974 +
52975 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
52976 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
52977 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
52978 diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
52979 index cf98da1..da890a9 100644
52980 --- a/fs/ncpfs/inode.c
52981 +++ b/fs/ncpfs/inode.c
52982 @@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
52983 #endif
52984 struct ncp_entry_info finfo;
52985
52986 + pax_track_stack();
52987 +
52988 data.wdog_pid = NULL;
52989 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
52990 if (!server)
52991 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
52992 index bfaef7b..e9d03ca 100644
52993 --- a/fs/nfs/inode.c
52994 +++ b/fs/nfs/inode.c
52995 @@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
52996 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
52997 nfsi->attrtimeo_timestamp = jiffies;
52998
52999 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
53000 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
53001 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
53002 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
53003 else
53004 @@ -973,16 +973,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
53005 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
53006 }
53007
53008 -static atomic_long_t nfs_attr_generation_counter;
53009 +static atomic_long_unchecked_t nfs_attr_generation_counter;
53010
53011 static unsigned long nfs_read_attr_generation_counter(void)
53012 {
53013 - return atomic_long_read(&nfs_attr_generation_counter);
53014 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
53015 }
53016
53017 unsigned long nfs_inc_attr_generation_counter(void)
53018 {
53019 - return atomic_long_inc_return(&nfs_attr_generation_counter);
53020 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
53021 }
53022
53023 void nfs_fattr_init(struct nfs_fattr *fattr)
53024 diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
53025 index cc2f505..f6a236f 100644
53026 --- a/fs/nfsd/lockd.c
53027 +++ b/fs/nfsd/lockd.c
53028 @@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
53029 fput(filp);
53030 }
53031
53032 -static struct nlmsvc_binding nfsd_nlm_ops = {
53033 +static const struct nlmsvc_binding nfsd_nlm_ops = {
53034 .fopen = nlm_fopen, /* open file for locking */
53035 .fclose = nlm_fclose, /* close file */
53036 };
53037 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
53038 index cfc3391..dcc083a 100644
53039 --- a/fs/nfsd/nfs4state.c
53040 +++ b/fs/nfsd/nfs4state.c
53041 @@ -3459,6 +3459,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
53042 unsigned int cmd;
53043 int err;
53044
53045 + pax_track_stack();
53046 +
53047 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
53048 (long long) lock->lk_offset,
53049 (long long) lock->lk_length);
53050 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
53051 index 4a82a96..0d5fb49 100644
53052 --- a/fs/nfsd/nfs4xdr.c
53053 +++ b/fs/nfsd/nfs4xdr.c
53054 @@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
53055 struct nfsd4_compoundres *resp = rqstp->rq_resp;
53056 u32 minorversion = resp->cstate.minorversion;
53057
53058 + pax_track_stack();
53059 +
53060 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
53061 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
53062 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
53063 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
53064 index 2e09588..596421d 100644
53065 --- a/fs/nfsd/vfs.c
53066 +++ b/fs/nfsd/vfs.c
53067 @@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
53068 } else {
53069 oldfs = get_fs();
53070 set_fs(KERNEL_DS);
53071 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
53072 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
53073 set_fs(oldfs);
53074 }
53075
53076 @@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
53077
53078 /* Write the data. */
53079 oldfs = get_fs(); set_fs(KERNEL_DS);
53080 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
53081 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
53082 set_fs(oldfs);
53083 if (host_err < 0)
53084 goto out_nfserr;
53085 @@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
53086 */
53087
53088 oldfs = get_fs(); set_fs(KERNEL_DS);
53089 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
53090 + host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
53091 set_fs(oldfs);
53092
53093 if (host_err < 0)
53094 diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
53095 index f6af760..d0adf34 100644
53096 --- a/fs/nilfs2/ioctl.c
53097 +++ b/fs/nilfs2/ioctl.c
53098 @@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
53099 unsigned int cmd, void __user *argp)
53100 {
53101 struct nilfs_argv argv[5];
53102 - const static size_t argsz[5] = {
53103 + static const size_t argsz[5] = {
53104 sizeof(struct nilfs_vdesc),
53105 sizeof(struct nilfs_period),
53106 sizeof(__u64),
53107 @@ -522,6 +522,9 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
53108 if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
53109 goto out_free;
53110
53111 + if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
53112 + goto out_free;
53113 +
53114 len = argv[n].v_size * argv[n].v_nmembs;
53115 base = (void __user *)(unsigned long)argv[n].v_base;
53116 if (len == 0) {
53117 diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
53118 index 7e54e52..9337248 100644
53119 --- a/fs/notify/dnotify/dnotify.c
53120 +++ b/fs/notify/dnotify/dnotify.c
53121 @@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsnotify_mark_entry *entry)
53122 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
53123 }
53124
53125 -static struct fsnotify_ops dnotify_fsnotify_ops = {
53126 +static const struct fsnotify_ops dnotify_fsnotify_ops = {
53127 .handle_event = dnotify_handle_event,
53128 .should_send_event = dnotify_should_send_event,
53129 .free_group_priv = NULL,
53130 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
53131 index b8bf53b..c518688 100644
53132 --- a/fs/notify/notification.c
53133 +++ b/fs/notify/notification.c
53134 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
53135 * get set to 0 so it will never get 'freed'
53136 */
53137 static struct fsnotify_event q_overflow_event;
53138 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
53139 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
53140
53141 /**
53142 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
53143 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
53144 */
53145 u32 fsnotify_get_cookie(void)
53146 {
53147 - return atomic_inc_return(&fsnotify_sync_cookie);
53148 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
53149 }
53150 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
53151
53152 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
53153 index 5a9e344..0f8cd28 100644
53154 --- a/fs/ntfs/dir.c
53155 +++ b/fs/ntfs/dir.c
53156 @@ -1328,7 +1328,7 @@ find_next_index_buffer:
53157 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
53158 ~(s64)(ndir->itype.index.block_size - 1)));
53159 /* Bounds checks. */
53160 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
53161 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
53162 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
53163 "inode 0x%lx or driver bug.", vdir->i_ino);
53164 goto err_out;
53165 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
53166 index 663c0e3..b6868e9 100644
53167 --- a/fs/ntfs/file.c
53168 +++ b/fs/ntfs/file.c
53169 @@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_inode_ops = {
53170 #endif /* NTFS_RW */
53171 };
53172
53173 -const struct file_operations ntfs_empty_file_ops = {};
53174 +const struct file_operations ntfs_empty_file_ops __read_only;
53175
53176 -const struct inode_operations ntfs_empty_inode_ops = {};
53177 +const struct inode_operations ntfs_empty_inode_ops __read_only;
53178 diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
53179 index 1cd2934..880b5d2 100644
53180 --- a/fs/ocfs2/cluster/masklog.c
53181 +++ b/fs/ocfs2/cluster/masklog.c
53182 @@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
53183 return mlog_mask_store(mlog_attr->mask, buf, count);
53184 }
53185
53186 -static struct sysfs_ops mlog_attr_ops = {
53187 +static const struct sysfs_ops mlog_attr_ops = {
53188 .show = mlog_show,
53189 .store = mlog_store,
53190 };
53191 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
53192 index ac10f83..2cd2607 100644
53193 --- a/fs/ocfs2/localalloc.c
53194 +++ b/fs/ocfs2/localalloc.c
53195 @@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
53196 goto bail;
53197 }
53198
53199 - atomic_inc(&osb->alloc_stats.moves);
53200 + atomic_inc_unchecked(&osb->alloc_stats.moves);
53201
53202 status = 0;
53203 bail:
53204 diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
53205 index f010b22..9f9ed34 100644
53206 --- a/fs/ocfs2/namei.c
53207 +++ b/fs/ocfs2/namei.c
53208 @@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *old_dir,
53209 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
53210 struct ocfs2_dir_lookup_result target_insert = { NULL, };
53211
53212 + pax_track_stack();
53213 +
53214 /* At some point it might be nice to break this function up a
53215 * bit. */
53216
53217 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
53218 index d963d86..914cfbd 100644
53219 --- a/fs/ocfs2/ocfs2.h
53220 +++ b/fs/ocfs2/ocfs2.h
53221 @@ -217,11 +217,11 @@ enum ocfs2_vol_state
53222
53223 struct ocfs2_alloc_stats
53224 {
53225 - atomic_t moves;
53226 - atomic_t local_data;
53227 - atomic_t bitmap_data;
53228 - atomic_t bg_allocs;
53229 - atomic_t bg_extends;
53230 + atomic_unchecked_t moves;
53231 + atomic_unchecked_t local_data;
53232 + atomic_unchecked_t bitmap_data;
53233 + atomic_unchecked_t bg_allocs;
53234 + atomic_unchecked_t bg_extends;
53235 };
53236
53237 enum ocfs2_local_alloc_state
53238 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
53239 index 79b5dac..d322952 100644
53240 --- a/fs/ocfs2/suballoc.c
53241 +++ b/fs/ocfs2/suballoc.c
53242 @@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
53243 mlog_errno(status);
53244 goto bail;
53245 }
53246 - atomic_inc(&osb->alloc_stats.bg_extends);
53247 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
53248
53249 /* You should never ask for this much metadata */
53250 BUG_ON(bits_wanted >
53251 @@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_super *osb,
53252 mlog_errno(status);
53253 goto bail;
53254 }
53255 - atomic_inc(&osb->alloc_stats.bg_allocs);
53256 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
53257
53258 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
53259 ac->ac_bits_given += (*num_bits);
53260 @@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_super *osb,
53261 mlog_errno(status);
53262 goto bail;
53263 }
53264 - atomic_inc(&osb->alloc_stats.bg_allocs);
53265 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
53266
53267 BUG_ON(num_bits != 1);
53268
53269 @@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
53270 cluster_start,
53271 num_clusters);
53272 if (!status)
53273 - atomic_inc(&osb->alloc_stats.local_data);
53274 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
53275 } else {
53276 if (min_clusters > (osb->bitmap_cpg - 1)) {
53277 /* The only paths asking for contiguousness
53278 @@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
53279 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
53280 bg_blkno,
53281 bg_bit_off);
53282 - atomic_inc(&osb->alloc_stats.bitmap_data);
53283 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
53284 }
53285 }
53286 if (status < 0) {
53287 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
53288 index 9f55be4..a3f8048 100644
53289 --- a/fs/ocfs2/super.c
53290 +++ b/fs/ocfs2/super.c
53291 @@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
53292 "%10s => GlobalAllocs: %d LocalAllocs: %d "
53293 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
53294 "Stats",
53295 - atomic_read(&osb->alloc_stats.bitmap_data),
53296 - atomic_read(&osb->alloc_stats.local_data),
53297 - atomic_read(&osb->alloc_stats.bg_allocs),
53298 - atomic_read(&osb->alloc_stats.moves),
53299 - atomic_read(&osb->alloc_stats.bg_extends));
53300 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
53301 + atomic_read_unchecked(&osb->alloc_stats.local_data),
53302 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
53303 + atomic_read_unchecked(&osb->alloc_stats.moves),
53304 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
53305
53306 out += snprintf(buf + out, len - out,
53307 "%10s => State: %u Descriptor: %llu Size: %u bits "
53308 @@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
53309 spin_lock_init(&osb->osb_xattr_lock);
53310 ocfs2_init_inode_steal_slot(osb);
53311
53312 - atomic_set(&osb->alloc_stats.moves, 0);
53313 - atomic_set(&osb->alloc_stats.local_data, 0);
53314 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
53315 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
53316 - atomic_set(&osb->alloc_stats.bg_extends, 0);
53317 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
53318 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
53319 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
53320 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
53321 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
53322
53323 /* Copy the blockcheck stats from the superblock probe */
53324 osb->osb_ecc_stats = *stats;
53325 diff --git a/fs/open.c b/fs/open.c
53326 index 4f01e06..2a8057a 100644
53327 --- a/fs/open.c
53328 +++ b/fs/open.c
53329 @@ -275,6 +275,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
53330 error = locks_verify_truncate(inode, NULL, length);
53331 if (!error)
53332 error = security_path_truncate(&path, length, 0);
53333 +
53334 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
53335 + error = -EACCES;
53336 +
53337 if (!error) {
53338 vfs_dq_init(inode);
53339 error = do_truncate(path.dentry, length, 0, NULL);
53340 @@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
53341 if (__mnt_is_readonly(path.mnt))
53342 res = -EROFS;
53343
53344 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
53345 + res = -EACCES;
53346 +
53347 out_path_release:
53348 path_put(&path);
53349 out:
53350 @@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
53351 if (error)
53352 goto dput_and_out;
53353
53354 + gr_log_chdir(path.dentry, path.mnt);
53355 +
53356 set_fs_pwd(current->fs, &path);
53357
53358 dput_and_out:
53359 @@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
53360 goto out_putf;
53361
53362 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
53363 +
53364 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
53365 + error = -EPERM;
53366 +
53367 + if (!error)
53368 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
53369 +
53370 if (!error)
53371 set_fs_pwd(current->fs, &file->f_path);
53372 out_putf:
53373 @@ -588,7 +604,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
53374 if (!capable(CAP_SYS_CHROOT))
53375 goto dput_and_out;
53376
53377 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
53378 + goto dput_and_out;
53379 +
53380 set_fs_root(current->fs, &path);
53381 +
53382 + gr_handle_chroot_chdir(&path);
53383 +
53384 error = 0;
53385 dput_and_out:
53386 path_put(&path);
53387 @@ -596,66 +618,57 @@ out:
53388 return error;
53389 }
53390
53391 +static int chmod_common(struct path *path, umode_t mode)
53392 +{
53393 + struct inode *inode = path->dentry->d_inode;
53394 + struct iattr newattrs;
53395 + int error;
53396 +
53397 + error = mnt_want_write(path->mnt);
53398 + if (error)
53399 + return error;
53400 + mutex_lock(&inode->i_mutex);
53401 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
53402 + error = -EACCES;
53403 + goto out_unlock;
53404 + }
53405 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
53406 + error = -EPERM;
53407 + goto out_unlock;
53408 + }
53409 + newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
53410 + newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
53411 + error = notify_change(path->dentry, &newattrs);
53412 +out_unlock:
53413 + mutex_unlock(&inode->i_mutex);
53414 + mnt_drop_write(path->mnt);
53415 + return error;
53416 +}
53417 +
53418 SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
53419 {
53420 - struct inode * inode;
53421 - struct dentry * dentry;
53422 struct file * file;
53423 int err = -EBADF;
53424 - struct iattr newattrs;
53425
53426 file = fget(fd);
53427 - if (!file)
53428 - goto out;
53429 -
53430 - dentry = file->f_path.dentry;
53431 - inode = dentry->d_inode;
53432 -
53433 - audit_inode(NULL, dentry);
53434 -
53435 - err = mnt_want_write_file(file);
53436 - if (err)
53437 - goto out_putf;
53438 - mutex_lock(&inode->i_mutex);
53439 - if (mode == (mode_t) -1)
53440 - mode = inode->i_mode;
53441 - newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
53442 - newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
53443 - err = notify_change(dentry, &newattrs);
53444 - mutex_unlock(&inode->i_mutex);
53445 - mnt_drop_write(file->f_path.mnt);
53446 -out_putf:
53447 - fput(file);
53448 -out:
53449 + if (file) {
53450 + audit_inode(NULL, file->f_path.dentry);
53451 + err = chmod_common(&file->f_path, mode);
53452 + fput(file);
53453 + }
53454 return err;
53455 }
53456
53457 SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
53458 {
53459 struct path path;
53460 - struct inode *inode;
53461 int error;
53462 - struct iattr newattrs;
53463
53464 error = user_path_at(dfd, filename, LOOKUP_FOLLOW, &path);
53465 - if (error)
53466 - goto out;
53467 - inode = path.dentry->d_inode;
53468 -
53469 - error = mnt_want_write(path.mnt);
53470 - if (error)
53471 - goto dput_and_out;
53472 - mutex_lock(&inode->i_mutex);
53473 - if (mode == (mode_t) -1)
53474 - mode = inode->i_mode;
53475 - newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
53476 - newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
53477 - error = notify_change(path.dentry, &newattrs);
53478 - mutex_unlock(&inode->i_mutex);
53479 - mnt_drop_write(path.mnt);
53480 -dput_and_out:
53481 - path_put(&path);
53482 -out:
53483 + if (!error) {
53484 + error = chmod_common(&path, mode);
53485 + path_put(&path);
53486 + }
53487 return error;
53488 }
53489
53490 @@ -664,12 +677,15 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
53491 return sys_fchmodat(AT_FDCWD, filename, mode);
53492 }
53493
53494 -static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
53495 +static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
53496 {
53497 struct inode *inode = dentry->d_inode;
53498 int error;
53499 struct iattr newattrs;
53500
53501 + if (!gr_acl_handle_chown(dentry, mnt))
53502 + return -EACCES;
53503 +
53504 newattrs.ia_valid = ATTR_CTIME;
53505 if (user != (uid_t) -1) {
53506 newattrs.ia_valid |= ATTR_UID;
53507 @@ -700,7 +716,7 @@ SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
53508 error = mnt_want_write(path.mnt);
53509 if (error)
53510 goto out_release;
53511 - error = chown_common(path.dentry, user, group);
53512 + error = chown_common(path.dentry, user, group, path.mnt);
53513 mnt_drop_write(path.mnt);
53514 out_release:
53515 path_put(&path);
53516 @@ -725,7 +741,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
53517 error = mnt_want_write(path.mnt);
53518 if (error)
53519 goto out_release;
53520 - error = chown_common(path.dentry, user, group);
53521 + error = chown_common(path.dentry, user, group, path.mnt);
53522 mnt_drop_write(path.mnt);
53523 out_release:
53524 path_put(&path);
53525 @@ -744,7 +760,7 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group
53526 error = mnt_want_write(path.mnt);
53527 if (error)
53528 goto out_release;
53529 - error = chown_common(path.dentry, user, group);
53530 + error = chown_common(path.dentry, user, group, path.mnt);
53531 mnt_drop_write(path.mnt);
53532 out_release:
53533 path_put(&path);
53534 @@ -767,7 +783,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
53535 goto out_fput;
53536 dentry = file->f_path.dentry;
53537 audit_inode(NULL, dentry);
53538 - error = chown_common(dentry, user, group);
53539 + error = chown_common(dentry, user, group, file->f_path.mnt);
53540 mnt_drop_write(file->f_path.mnt);
53541 out_fput:
53542 fput(file);
53543 @@ -1036,7 +1052,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
53544 if (!IS_ERR(tmp)) {
53545 fd = get_unused_fd_flags(flags);
53546 if (fd >= 0) {
53547 - struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
53548 + struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
53549 if (IS_ERR(f)) {
53550 put_unused_fd(fd);
53551 fd = PTR_ERR(f);
53552 diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
53553 index 6ab70f4..f4103d1 100644
53554 --- a/fs/partitions/efi.c
53555 +++ b/fs/partitions/efi.c
53556 @@ -231,14 +231,14 @@ alloc_read_gpt_entries(struct block_device *bdev, gpt_header *gpt)
53557 if (!bdev || !gpt)
53558 return NULL;
53559
53560 + if (!le32_to_cpu(gpt->num_partition_entries))
53561 + return NULL;
53562 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
53563 + if (!pte)
53564 + return NULL;
53565 +
53566 count = le32_to_cpu(gpt->num_partition_entries) *
53567 le32_to_cpu(gpt->sizeof_partition_entry);
53568 - if (!count)
53569 - return NULL;
53570 - pte = kzalloc(count, GFP_KERNEL);
53571 - if (!pte)
53572 - return NULL;
53573 -
53574 if (read_lba(bdev, le64_to_cpu(gpt->partition_entry_lba),
53575 (u8 *) pte,
53576 count) < count) {
53577 diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
53578 index dd6efdb..3babc6c 100644
53579 --- a/fs/partitions/ldm.c
53580 +++ b/fs/partitions/ldm.c
53581 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
53582 ldm_error ("A VBLK claims to have %d parts.", num);
53583 return false;
53584 }
53585 +
53586 if (rec >= num) {
53587 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
53588 return false;
53589 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
53590 goto found;
53591 }
53592
53593 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
53594 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
53595 if (!f) {
53596 ldm_crit ("Out of memory.");
53597 return false;
53598 diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
53599 index 5765198..7f8e9e0 100644
53600 --- a/fs/partitions/mac.c
53601 +++ b/fs/partitions/mac.c
53602 @@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitions *state, struct block_device *bdev)
53603 return 0; /* not a MacOS disk */
53604 }
53605 blocks_in_map = be32_to_cpu(part->map_count);
53606 - if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
53607 - put_dev_sector(sect);
53608 - return 0;
53609 - }
53610 printk(" [mac]");
53611 + if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
53612 + put_dev_sector(sect);
53613 + return 0;
53614 + }
53615 for (slot = 1; slot <= blocks_in_map; ++slot) {
53616 int pos = slot * secsize;
53617 put_dev_sector(sect);
53618 diff --git a/fs/pipe.c b/fs/pipe.c
53619 index d0cc080..8a6f211 100644
53620 --- a/fs/pipe.c
53621 +++ b/fs/pipe.c
53622 @@ -401,9 +401,9 @@ redo:
53623 }
53624 if (bufs) /* More to do? */
53625 continue;
53626 - if (!pipe->writers)
53627 + if (!atomic_read(&pipe->writers))
53628 break;
53629 - if (!pipe->waiting_writers) {
53630 + if (!atomic_read(&pipe->waiting_writers)) {
53631 /* syscall merging: Usually we must not sleep
53632 * if O_NONBLOCK is set, or if we got some data.
53633 * But if a writer sleeps in kernel space, then
53634 @@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
53635 mutex_lock(&inode->i_mutex);
53636 pipe = inode->i_pipe;
53637
53638 - if (!pipe->readers) {
53639 + if (!atomic_read(&pipe->readers)) {
53640 send_sig(SIGPIPE, current, 0);
53641 ret = -EPIPE;
53642 goto out;
53643 @@ -511,7 +511,7 @@ redo1:
53644 for (;;) {
53645 int bufs;
53646
53647 - if (!pipe->readers) {
53648 + if (!atomic_read(&pipe->readers)) {
53649 send_sig(SIGPIPE, current, 0);
53650 if (!ret)
53651 ret = -EPIPE;
53652 @@ -597,9 +597,9 @@ redo2:
53653 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
53654 do_wakeup = 0;
53655 }
53656 - pipe->waiting_writers++;
53657 + atomic_inc(&pipe->waiting_writers);
53658 pipe_wait(pipe);
53659 - pipe->waiting_writers--;
53660 + atomic_dec(&pipe->waiting_writers);
53661 }
53662 out:
53663 mutex_unlock(&inode->i_mutex);
53664 @@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table *wait)
53665 mask = 0;
53666 if (filp->f_mode & FMODE_READ) {
53667 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
53668 - if (!pipe->writers && filp->f_version != pipe->w_counter)
53669 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
53670 mask |= POLLHUP;
53671 }
53672
53673 @@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table *wait)
53674 * Most Unices do not set POLLERR for FIFOs but on Linux they
53675 * behave exactly like pipes for poll().
53676 */
53677 - if (!pipe->readers)
53678 + if (!atomic_read(&pipe->readers))
53679 mask |= POLLERR;
53680 }
53681
53682 @@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int decr, int decw)
53683
53684 mutex_lock(&inode->i_mutex);
53685 pipe = inode->i_pipe;
53686 - pipe->readers -= decr;
53687 - pipe->writers -= decw;
53688 + atomic_sub(decr, &pipe->readers);
53689 + atomic_sub(decw, &pipe->writers);
53690
53691 - if (!pipe->readers && !pipe->writers) {
53692 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
53693 free_pipe_info(inode);
53694 } else {
53695 wake_up_interruptible_sync(&pipe->wait);
53696 @@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
53697
53698 if (inode->i_pipe) {
53699 ret = 0;
53700 - inode->i_pipe->readers++;
53701 + atomic_inc(&inode->i_pipe->readers);
53702 }
53703
53704 mutex_unlock(&inode->i_mutex);
53705 @@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
53706
53707 if (inode->i_pipe) {
53708 ret = 0;
53709 - inode->i_pipe->writers++;
53710 + atomic_inc(&inode->i_pipe->writers);
53711 }
53712
53713 mutex_unlock(&inode->i_mutex);
53714 @@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
53715 if (inode->i_pipe) {
53716 ret = 0;
53717 if (filp->f_mode & FMODE_READ)
53718 - inode->i_pipe->readers++;
53719 + atomic_inc(&inode->i_pipe->readers);
53720 if (filp->f_mode & FMODE_WRITE)
53721 - inode->i_pipe->writers++;
53722 + atomic_inc(&inode->i_pipe->writers);
53723 }
53724
53725 mutex_unlock(&inode->i_mutex);
53726 @@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
53727 inode->i_pipe = NULL;
53728 }
53729
53730 -static struct vfsmount *pipe_mnt __read_mostly;
53731 +struct vfsmount *pipe_mnt __read_mostly;
53732 static int pipefs_delete_dentry(struct dentry *dentry)
53733 {
53734 /*
53735 @@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(void)
53736 goto fail_iput;
53737 inode->i_pipe = pipe;
53738
53739 - pipe->readers = pipe->writers = 1;
53740 + atomic_set(&pipe->readers, 1);
53741 + atomic_set(&pipe->writers, 1);
53742 inode->i_fop = &rdwr_pipefifo_fops;
53743
53744 /*
53745 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
53746 index 50f8f06..c5755df 100644
53747 --- a/fs/proc/Kconfig
53748 +++ b/fs/proc/Kconfig
53749 @@ -30,12 +30,12 @@ config PROC_FS
53750
53751 config PROC_KCORE
53752 bool "/proc/kcore support" if !ARM
53753 - depends on PROC_FS && MMU
53754 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
53755
53756 config PROC_VMCORE
53757 bool "/proc/vmcore support (EXPERIMENTAL)"
53758 - depends on PROC_FS && CRASH_DUMP
53759 - default y
53760 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
53761 + default n
53762 help
53763 Exports the dump image of crashed kernel in ELF format.
53764
53765 @@ -59,8 +59,8 @@ config PROC_SYSCTL
53766 limited in memory.
53767
53768 config PROC_PAGE_MONITOR
53769 - default y
53770 - depends on PROC_FS && MMU
53771 + default n
53772 + depends on PROC_FS && MMU && !GRKERNSEC
53773 bool "Enable /proc page monitoring" if EMBEDDED
53774 help
53775 Various /proc files exist to monitor process memory utilization:
53776 diff --git a/fs/proc/array.c b/fs/proc/array.c
53777 index c5ef152..28c94f7 100644
53778 --- a/fs/proc/array.c
53779 +++ b/fs/proc/array.c
53780 @@ -60,6 +60,7 @@
53781 #include <linux/tty.h>
53782 #include <linux/string.h>
53783 #include <linux/mman.h>
53784 +#include <linux/grsecurity.h>
53785 #include <linux/proc_fs.h>
53786 #include <linux/ioport.h>
53787 #include <linux/uaccess.h>
53788 @@ -321,6 +322,21 @@ static inline void task_context_switch_counts(struct seq_file *m,
53789 p->nivcsw);
53790 }
53791
53792 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53793 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
53794 +{
53795 + if (p->mm)
53796 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
53797 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
53798 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
53799 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
53800 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
53801 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
53802 + else
53803 + seq_printf(m, "PaX:\t-----\n");
53804 +}
53805 +#endif
53806 +
53807 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53808 struct pid *pid, struct task_struct *task)
53809 {
53810 @@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53811 task_cap(m, task);
53812 cpuset_task_status_allowed(m, task);
53813 task_context_switch_counts(m, task);
53814 +
53815 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53816 + task_pax(m, task);
53817 +#endif
53818 +
53819 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
53820 + task_grsec_rbac(m, task);
53821 +#endif
53822 +
53823 return 0;
53824 }
53825
53826 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53827 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
53828 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
53829 + _mm->pax_flags & MF_PAX_SEGMEXEC))
53830 +#endif
53831 +
53832 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53833 struct pid *pid, struct task_struct *task, int whole)
53834 {
53835 @@ -358,9 +389,18 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53836 cputime_t cutime, cstime, utime, stime;
53837 cputime_t cgtime, gtime;
53838 unsigned long rsslim = 0;
53839 - char tcomm[sizeof(task->comm)];
53840 + char tcomm[sizeof(task->comm)] = { 0 };
53841 unsigned long flags;
53842
53843 + pax_track_stack();
53844 +
53845 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53846 + if (current->exec_id != m->exec_id) {
53847 + gr_log_badprocpid("stat");
53848 + return 0;
53849 + }
53850 +#endif
53851 +
53852 state = *get_task_state(task);
53853 vsize = eip = esp = 0;
53854 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
53855 @@ -433,6 +473,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53856 gtime = task_gtime(task);
53857 }
53858
53859 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53860 + if (PAX_RAND_FLAGS(mm)) {
53861 + eip = 0;
53862 + esp = 0;
53863 + wchan = 0;
53864 + }
53865 +#endif
53866 +#ifdef CONFIG_GRKERNSEC_HIDESYM
53867 + wchan = 0;
53868 + eip =0;
53869 + esp =0;
53870 +#endif
53871 +
53872 /* scale priority and nice values from timeslices to -20..20 */
53873 /* to make it look like a "normal" Unix priority/nice value */
53874 priority = task_prio(task);
53875 @@ -473,9 +526,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53876 vsize,
53877 mm ? get_mm_rss(mm) : 0,
53878 rsslim,
53879 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53880 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
53881 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
53882 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
53883 +#else
53884 mm ? (permitted ? mm->start_code : 1) : 0,
53885 mm ? (permitted ? mm->end_code : 1) : 0,
53886 (permitted && mm) ? mm->start_stack : 0,
53887 +#endif
53888 esp,
53889 eip,
53890 /* The signal information here is obsolete.
53891 @@ -517,8 +576,16 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53892 struct pid *pid, struct task_struct *task)
53893 {
53894 int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0;
53895 - struct mm_struct *mm = get_task_mm(task);
53896 + struct mm_struct *mm;
53897
53898 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53899 + if (current->exec_id != m->exec_id) {
53900 + gr_log_badprocpid("statm");
53901 + return 0;
53902 + }
53903 +#endif
53904 +
53905 + mm = get_task_mm(task);
53906 if (mm) {
53907 size = task_statm(mm, &shared, &text, &data, &resident);
53908 mmput(mm);
53909 @@ -528,3 +595,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53910
53911 return 0;
53912 }
53913 +
53914 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53915 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
53916 +{
53917 + u32 curr_ip = 0;
53918 + unsigned long flags;
53919 +
53920 + if (lock_task_sighand(task, &flags)) {
53921 + curr_ip = task->signal->curr_ip;
53922 + unlock_task_sighand(task, &flags);
53923 + }
53924 +
53925 + return sprintf(buffer, "%pI4\n", &curr_ip);
53926 +}
53927 +#endif
53928 diff --git a/fs/proc/base.c b/fs/proc/base.c
53929 index 67f7dc0..a86ad9a 100644
53930 --- a/fs/proc/base.c
53931 +++ b/fs/proc/base.c
53932 @@ -102,6 +102,22 @@ struct pid_entry {
53933 union proc_op op;
53934 };
53935
53936 +struct getdents_callback {
53937 + struct linux_dirent __user * current_dir;
53938 + struct linux_dirent __user * previous;
53939 + struct file * file;
53940 + int count;
53941 + int error;
53942 +};
53943 +
53944 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
53945 + loff_t offset, u64 ino, unsigned int d_type)
53946 +{
53947 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
53948 + buf->error = -EINVAL;
53949 + return 0;
53950 +}
53951 +
53952 #define NOD(NAME, MODE, IOP, FOP, OP) { \
53953 .name = (NAME), \
53954 .len = sizeof(NAME) - 1, \
53955 @@ -213,6 +229,9 @@ static int check_mem_permission(struct task_struct *task)
53956 if (task == current)
53957 return 0;
53958
53959 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
53960 + return -EPERM;
53961 +
53962 /*
53963 * If current is actively ptrace'ing, and would also be
53964 * permitted to freshly attach with ptrace now, permit it.
53965 @@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
53966 if (!mm->arg_end)
53967 goto out_mm; /* Shh! No looking before we're done */
53968
53969 + if (gr_acl_handle_procpidmem(task))
53970 + goto out_mm;
53971 +
53972 len = mm->arg_end - mm->arg_start;
53973
53974 if (len > PAGE_SIZE)
53975 @@ -287,12 +309,28 @@ out:
53976 return res;
53977 }
53978
53979 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53980 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
53981 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
53982 + _mm->pax_flags & MF_PAX_SEGMEXEC))
53983 +#endif
53984 +
53985 static int proc_pid_auxv(struct task_struct *task, char *buffer)
53986 {
53987 int res = 0;
53988 struct mm_struct *mm = get_task_mm(task);
53989 if (mm) {
53990 unsigned int nwords = 0;
53991 +
53992 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53993 + /* allow if we're currently ptracing this task */
53994 + if (PAX_RAND_FLAGS(mm) &&
53995 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
53996 + mmput(mm);
53997 + return 0;
53998 + }
53999 +#endif
54000 +
54001 do {
54002 nwords += 2;
54003 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
54004 @@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
54005 }
54006
54007
54008 -#ifdef CONFIG_KALLSYMS
54009 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54010 /*
54011 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
54012 * Returns the resolved symbol. If that fails, simply return the address.
54013 @@ -345,7 +383,7 @@ static void unlock_trace(struct task_struct *task)
54014 mutex_unlock(&task->cred_guard_mutex);
54015 }
54016
54017 -#ifdef CONFIG_STACKTRACE
54018 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54019
54020 #define MAX_STACK_TRACE_DEPTH 64
54021
54022 @@ -545,7 +583,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
54023 return count;
54024 }
54025
54026 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
54027 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
54028 static int proc_pid_syscall(struct task_struct *task, char *buffer)
54029 {
54030 long nr;
54031 @@ -574,7 +612,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
54032 /************************************************************************/
54033
54034 /* permission checks */
54035 -static int proc_fd_access_allowed(struct inode *inode)
54036 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
54037 {
54038 struct task_struct *task;
54039 int allowed = 0;
54040 @@ -584,7 +622,10 @@ static int proc_fd_access_allowed(struct inode *inode)
54041 */
54042 task = get_proc_task(inode);
54043 if (task) {
54044 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
54045 + if (log)
54046 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
54047 + else
54048 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
54049 put_task_struct(task);
54050 }
54051 return allowed;
54052 @@ -806,9 +847,16 @@ static const struct file_operations proc_single_file_operations = {
54053 static int mem_open(struct inode* inode, struct file* file)
54054 {
54055 file->private_data = (void*)((long)current->self_exec_id);
54056 +
54057 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54058 + file->f_version = current->exec_id;
54059 +#endif
54060 +
54061 return 0;
54062 }
54063
54064 +static int task_dumpable(struct task_struct *task);
54065 +
54066 static ssize_t mem_read(struct file * file, char __user * buf,
54067 size_t count, loff_t *ppos)
54068 {
54069 @@ -818,6 +866,13 @@ static ssize_t mem_read(struct file * file, char __user * buf,
54070 int ret = -ESRCH;
54071 struct mm_struct *mm;
54072
54073 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54074 + if (file->f_version != current->exec_id) {
54075 + gr_log_badprocpid("mem");
54076 + return 0;
54077 + }
54078 +#endif
54079 +
54080 if (!task)
54081 goto out_no_task;
54082
54083 @@ -963,6 +1018,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
54084 if (!task)
54085 goto out_no_task;
54086
54087 + if (gr_acl_handle_procpidmem(task))
54088 + goto out;
54089 +
54090 if (!ptrace_may_access(task, PTRACE_MODE_READ))
54091 goto out;
54092
54093 @@ -1377,7 +1435,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
54094 path_put(&nd->path);
54095
54096 /* Are we allowed to snoop on the tasks file descriptors? */
54097 - if (!proc_fd_access_allowed(inode))
54098 + if (!proc_fd_access_allowed(inode,0))
54099 goto out;
54100
54101 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
54102 @@ -1417,8 +1475,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
54103 struct path path;
54104
54105 /* Are we allowed to snoop on the tasks file descriptors? */
54106 - if (!proc_fd_access_allowed(inode))
54107 - goto out;
54108 + /* logging this is needed for learning on chromium to work properly,
54109 + but we don't want to flood the logs from 'ps' which does a readlink
54110 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
54111 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
54112 + */
54113 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
54114 + if (!proc_fd_access_allowed(inode,0))
54115 + goto out;
54116 + } else {
54117 + if (!proc_fd_access_allowed(inode,1))
54118 + goto out;
54119 + }
54120
54121 error = PROC_I(inode)->op.proc_get_link(inode, &path);
54122 if (error)
54123 @@ -1483,7 +1551,11 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st
54124 rcu_read_lock();
54125 cred = __task_cred(task);
54126 inode->i_uid = cred->euid;
54127 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
54128 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
54129 +#else
54130 inode->i_gid = cred->egid;
54131 +#endif
54132 rcu_read_unlock();
54133 }
54134 security_task_to_inode(task, inode);
54135 @@ -1501,6 +1573,9 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
54136 struct inode *inode = dentry->d_inode;
54137 struct task_struct *task;
54138 const struct cred *cred;
54139 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54140 + const struct cred *tmpcred = current_cred();
54141 +#endif
54142
54143 generic_fillattr(inode, stat);
54144
54145 @@ -1508,13 +1583,41 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
54146 stat->uid = 0;
54147 stat->gid = 0;
54148 task = pid_task(proc_pid(inode), PIDTYPE_PID);
54149 +
54150 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
54151 + rcu_read_unlock();
54152 + return -ENOENT;
54153 + }
54154 +
54155 if (task) {
54156 + cred = __task_cred(task);
54157 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54158 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
54159 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
54160 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
54161 +#endif
54162 + ) {
54163 +#endif
54164 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
54165 +#ifdef CONFIG_GRKERNSEC_PROC_USER
54166 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
54167 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54168 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
54169 +#endif
54170 task_dumpable(task)) {
54171 - cred = __task_cred(task);
54172 stat->uid = cred->euid;
54173 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
54174 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
54175 +#else
54176 stat->gid = cred->egid;
54177 +#endif
54178 }
54179 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54180 + } else {
54181 + rcu_read_unlock();
54182 + return -ENOENT;
54183 + }
54184 +#endif
54185 }
54186 rcu_read_unlock();
54187 return 0;
54188 @@ -1545,11 +1648,20 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
54189
54190 if (task) {
54191 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
54192 +#ifdef CONFIG_GRKERNSEC_PROC_USER
54193 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
54194 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54195 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
54196 +#endif
54197 task_dumpable(task)) {
54198 rcu_read_lock();
54199 cred = __task_cred(task);
54200 inode->i_uid = cred->euid;
54201 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
54202 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
54203 +#else
54204 inode->i_gid = cred->egid;
54205 +#endif
54206 rcu_read_unlock();
54207 } else {
54208 inode->i_uid = 0;
54209 @@ -1670,7 +1782,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
54210 int fd = proc_fd(inode);
54211
54212 if (task) {
54213 - files = get_files_struct(task);
54214 + if (!gr_acl_handle_procpidmem(task))
54215 + files = get_files_struct(task);
54216 put_task_struct(task);
54217 }
54218 if (files) {
54219 @@ -1922,12 +2035,22 @@ static const struct file_operations proc_fd_operations = {
54220 static int proc_fd_permission(struct inode *inode, int mask)
54221 {
54222 int rv;
54223 + struct task_struct *task;
54224
54225 rv = generic_permission(inode, mask, NULL);
54226 - if (rv == 0)
54227 - return 0;
54228 +
54229 if (task_pid(current) == proc_pid(inode))
54230 rv = 0;
54231 +
54232 + task = get_proc_task(inode);
54233 + if (task == NULL)
54234 + return rv;
54235 +
54236 + if (gr_acl_handle_procpidmem(task))
54237 + rv = -EACCES;
54238 +
54239 + put_task_struct(task);
54240 +
54241 return rv;
54242 }
54243
54244 @@ -2036,6 +2159,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
54245 if (!task)
54246 goto out_no_task;
54247
54248 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
54249 + goto out;
54250 +
54251 /*
54252 * Yes, it does not scale. And it should not. Don't add
54253 * new entries into /proc/<tgid>/ without very good reasons.
54254 @@ -2080,6 +2206,9 @@ static int proc_pident_readdir(struct file *filp,
54255 if (!task)
54256 goto out_no_task;
54257
54258 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
54259 + goto out;
54260 +
54261 ret = 0;
54262 i = filp->f_pos;
54263 switch (i) {
54264 @@ -2347,7 +2476,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
54265 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
54266 void *cookie)
54267 {
54268 - char *s = nd_get_link(nd);
54269 + const char *s = nd_get_link(nd);
54270 if (!IS_ERR(s))
54271 __putname(s);
54272 }
54273 @@ -2553,7 +2682,7 @@ static const struct pid_entry tgid_base_stuff[] = {
54274 #ifdef CONFIG_SCHED_DEBUG
54275 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
54276 #endif
54277 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
54278 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
54279 INF("syscall", S_IRUGO, proc_pid_syscall),
54280 #endif
54281 INF("cmdline", S_IRUGO, proc_pid_cmdline),
54282 @@ -2578,10 +2707,10 @@ static const struct pid_entry tgid_base_stuff[] = {
54283 #ifdef CONFIG_SECURITY
54284 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
54285 #endif
54286 -#ifdef CONFIG_KALLSYMS
54287 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54288 INF("wchan", S_IRUGO, proc_pid_wchan),
54289 #endif
54290 -#ifdef CONFIG_STACKTRACE
54291 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54292 ONE("stack", S_IRUGO, proc_pid_stack),
54293 #endif
54294 #ifdef CONFIG_SCHEDSTATS
54295 @@ -2611,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = {
54296 #ifdef CONFIG_TASK_IO_ACCOUNTING
54297 INF("io", S_IRUSR, proc_tgid_io_accounting),
54298 #endif
54299 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
54300 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
54301 +#endif
54302 };
54303
54304 static int proc_tgid_base_readdir(struct file * filp,
54305 @@ -2735,7 +2867,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
54306 if (!inode)
54307 goto out;
54308
54309 +#ifdef CONFIG_GRKERNSEC_PROC_USER
54310 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
54311 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54312 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
54313 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
54314 +#else
54315 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
54316 +#endif
54317 inode->i_op = &proc_tgid_base_inode_operations;
54318 inode->i_fop = &proc_tgid_base_operations;
54319 inode->i_flags|=S_IMMUTABLE;
54320 @@ -2777,7 +2916,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
54321 if (!task)
54322 goto out;
54323
54324 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
54325 + goto out_put_task;
54326 +
54327 result = proc_pid_instantiate(dir, dentry, task, NULL);
54328 +out_put_task:
54329 put_task_struct(task);
54330 out:
54331 return result;
54332 @@ -2842,6 +2985,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
54333 {
54334 unsigned int nr;
54335 struct task_struct *reaper;
54336 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54337 + const struct cred *tmpcred = current_cred();
54338 + const struct cred *itercred;
54339 +#endif
54340 + filldir_t __filldir = filldir;
54341 struct tgid_iter iter;
54342 struct pid_namespace *ns;
54343
54344 @@ -2865,8 +3013,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
54345 for (iter = next_tgid(ns, iter);
54346 iter.task;
54347 iter.tgid += 1, iter = next_tgid(ns, iter)) {
54348 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54349 + rcu_read_lock();
54350 + itercred = __task_cred(iter.task);
54351 +#endif
54352 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
54353 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54354 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
54355 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
54356 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
54357 +#endif
54358 + )
54359 +#endif
54360 + )
54361 + __filldir = &gr_fake_filldir;
54362 + else
54363 + __filldir = filldir;
54364 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54365 + rcu_read_unlock();
54366 +#endif
54367 filp->f_pos = iter.tgid + TGID_OFFSET;
54368 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
54369 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
54370 put_task_struct(iter.task);
54371 goto out;
54372 }
54373 @@ -2892,7 +3059,7 @@ static const struct pid_entry tid_base_stuff[] = {
54374 #ifdef CONFIG_SCHED_DEBUG
54375 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
54376 #endif
54377 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
54378 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
54379 INF("syscall", S_IRUGO, proc_pid_syscall),
54380 #endif
54381 INF("cmdline", S_IRUGO, proc_pid_cmdline),
54382 @@ -2916,10 +3083,10 @@ static const struct pid_entry tid_base_stuff[] = {
54383 #ifdef CONFIG_SECURITY
54384 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
54385 #endif
54386 -#ifdef CONFIG_KALLSYMS
54387 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54388 INF("wchan", S_IRUGO, proc_pid_wchan),
54389 #endif
54390 -#ifdef CONFIG_STACKTRACE
54391 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54392 ONE("stack", S_IRUGO, proc_pid_stack),
54393 #endif
54394 #ifdef CONFIG_SCHEDSTATS
54395 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
54396 index 82676e3..5f8518a 100644
54397 --- a/fs/proc/cmdline.c
54398 +++ b/fs/proc/cmdline.c
54399 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
54400
54401 static int __init proc_cmdline_init(void)
54402 {
54403 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
54404 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
54405 +#else
54406 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
54407 +#endif
54408 return 0;
54409 }
54410 module_init(proc_cmdline_init);
54411 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
54412 index 59ee7da..469b4b6 100644
54413 --- a/fs/proc/devices.c
54414 +++ b/fs/proc/devices.c
54415 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
54416
54417 static int __init proc_devices_init(void)
54418 {
54419 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
54420 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
54421 +#else
54422 proc_create("devices", 0, NULL, &proc_devinfo_operations);
54423 +#endif
54424 return 0;
54425 }
54426 module_init(proc_devices_init);
54427 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
54428 index d78ade3..81767f9 100644
54429 --- a/fs/proc/inode.c
54430 +++ b/fs/proc/inode.c
54431 @@ -18,12 +18,19 @@
54432 #include <linux/module.h>
54433 #include <linux/smp_lock.h>
54434 #include <linux/sysctl.h>
54435 +#include <linux/grsecurity.h>
54436
54437 #include <asm/system.h>
54438 #include <asm/uaccess.h>
54439
54440 #include "internal.h"
54441
54442 +#ifdef CONFIG_PROC_SYSCTL
54443 +extern const struct inode_operations proc_sys_inode_operations;
54444 +extern const struct inode_operations proc_sys_dir_operations;
54445 +#endif
54446 +
54447 +
54448 struct proc_dir_entry *de_get(struct proc_dir_entry *de)
54449 {
54450 atomic_inc(&de->count);
54451 @@ -62,6 +69,13 @@ static void proc_delete_inode(struct inode *inode)
54452 de_put(de);
54453 if (PROC_I(inode)->sysctl)
54454 sysctl_head_put(PROC_I(inode)->sysctl);
54455 +
54456 +#ifdef CONFIG_PROC_SYSCTL
54457 + if (inode->i_op == &proc_sys_inode_operations ||
54458 + inode->i_op == &proc_sys_dir_operations)
54459 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
54460 +#endif
54461 +
54462 clear_inode(inode);
54463 }
54464
54465 @@ -457,7 +471,11 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
54466 if (de->mode) {
54467 inode->i_mode = de->mode;
54468 inode->i_uid = de->uid;
54469 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
54470 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
54471 +#else
54472 inode->i_gid = de->gid;
54473 +#endif
54474 }
54475 if (de->size)
54476 inode->i_size = de->size;
54477 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
54478 index 753ca37..26bcf3b 100644
54479 --- a/fs/proc/internal.h
54480 +++ b/fs/proc/internal.h
54481 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
54482 struct pid *pid, struct task_struct *task);
54483 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
54484 struct pid *pid, struct task_struct *task);
54485 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
54486 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
54487 +#endif
54488 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
54489
54490 extern const struct file_operations proc_maps_operations;
54491 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
54492 index b442dac..aab29cb 100644
54493 --- a/fs/proc/kcore.c
54494 +++ b/fs/proc/kcore.c
54495 @@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
54496 off_t offset = 0;
54497 struct kcore_list *m;
54498
54499 + pax_track_stack();
54500 +
54501 /* setup ELF header */
54502 elf = (struct elfhdr *) bufp;
54503 bufp += sizeof(struct elfhdr);
54504 @@ -477,9 +479,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
54505 * the addresses in the elf_phdr on our list.
54506 */
54507 start = kc_offset_to_vaddr(*fpos - elf_buflen);
54508 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
54509 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
54510 + if (tsz > buflen)
54511 tsz = buflen;
54512 -
54513 +
54514 while (buflen) {
54515 struct kcore_list *m;
54516
54517 @@ -508,20 +511,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
54518 kfree(elf_buf);
54519 } else {
54520 if (kern_addr_valid(start)) {
54521 - unsigned long n;
54522 + char *elf_buf;
54523 + mm_segment_t oldfs;
54524
54525 - n = copy_to_user(buffer, (char *)start, tsz);
54526 - /*
54527 - * We cannot distingush between fault on source
54528 - * and fault on destination. When this happens
54529 - * we clear too and hope it will trigger the
54530 - * EFAULT again.
54531 - */
54532 - if (n) {
54533 - if (clear_user(buffer + tsz - n,
54534 - n))
54535 + elf_buf = kmalloc(tsz, GFP_KERNEL);
54536 + if (!elf_buf)
54537 + return -ENOMEM;
54538 + oldfs = get_fs();
54539 + set_fs(KERNEL_DS);
54540 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
54541 + set_fs(oldfs);
54542 + if (copy_to_user(buffer, elf_buf, tsz)) {
54543 + kfree(elf_buf);
54544 return -EFAULT;
54545 + }
54546 }
54547 + set_fs(oldfs);
54548 + kfree(elf_buf);
54549 } else {
54550 if (clear_user(buffer, tsz))
54551 return -EFAULT;
54552 @@ -541,6 +547,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
54553
54554 static int open_kcore(struct inode *inode, struct file *filp)
54555 {
54556 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
54557 + return -EPERM;
54558 +#endif
54559 if (!capable(CAP_SYS_RAWIO))
54560 return -EPERM;
54561 if (kcore_need_update)
54562 diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
54563 index 7ca7834..cfe90a4 100644
54564 --- a/fs/proc/kmsg.c
54565 +++ b/fs/proc/kmsg.c
54566 @@ -12,37 +12,37 @@
54567 #include <linux/poll.h>
54568 #include <linux/proc_fs.h>
54569 #include <linux/fs.h>
54570 +#include <linux/syslog.h>
54571
54572 #include <asm/uaccess.h>
54573 #include <asm/io.h>
54574
54575 extern wait_queue_head_t log_wait;
54576
54577 -extern int do_syslog(int type, char __user *bug, int count);
54578 -
54579 static int kmsg_open(struct inode * inode, struct file * file)
54580 {
54581 - return do_syslog(1,NULL,0);
54582 + return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE);
54583 }
54584
54585 static int kmsg_release(struct inode * inode, struct file * file)
54586 {
54587 - (void) do_syslog(0,NULL,0);
54588 + (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE);
54589 return 0;
54590 }
54591
54592 static ssize_t kmsg_read(struct file *file, char __user *buf,
54593 size_t count, loff_t *ppos)
54594 {
54595 - if ((file->f_flags & O_NONBLOCK) && !do_syslog(9, NULL, 0))
54596 + if ((file->f_flags & O_NONBLOCK) &&
54597 + !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
54598 return -EAGAIN;
54599 - return do_syslog(2, buf, count);
54600 + return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE);
54601 }
54602
54603 static unsigned int kmsg_poll(struct file *file, poll_table *wait)
54604 {
54605 poll_wait(file, &log_wait, wait);
54606 - if (do_syslog(9, NULL, 0))
54607 + if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
54608 return POLLIN | POLLRDNORM;
54609 return 0;
54610 }
54611 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
54612 index a65239c..ad1182a 100644
54613 --- a/fs/proc/meminfo.c
54614 +++ b/fs/proc/meminfo.c
54615 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
54616 unsigned long pages[NR_LRU_LISTS];
54617 int lru;
54618
54619 + pax_track_stack();
54620 +
54621 /*
54622 * display in kilobytes.
54623 */
54624 @@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
54625 vmi.used >> 10,
54626 vmi.largest_chunk >> 10
54627 #ifdef CONFIG_MEMORY_FAILURE
54628 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
54629 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
54630 #endif
54631 );
54632
54633 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
54634 index 9fe7d7e..cdb62c9 100644
54635 --- a/fs/proc/nommu.c
54636 +++ b/fs/proc/nommu.c
54637 @@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
54638 if (len < 1)
54639 len = 1;
54640 seq_printf(m, "%*c", len, ' ');
54641 - seq_path(m, &file->f_path, "");
54642 + seq_path(m, &file->f_path, "\n\\");
54643 }
54644
54645 seq_putc(m, '\n');
54646 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
54647 index 04d1270..25e1173 100644
54648 --- a/fs/proc/proc_net.c
54649 +++ b/fs/proc/proc_net.c
54650 @@ -104,6 +104,17 @@ static struct net *get_proc_task_net(struct inode *dir)
54651 struct task_struct *task;
54652 struct nsproxy *ns;
54653 struct net *net = NULL;
54654 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54655 + const struct cred *cred = current_cred();
54656 +#endif
54657 +
54658 +#ifdef CONFIG_GRKERNSEC_PROC_USER
54659 + if (cred->fsuid)
54660 + return net;
54661 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54662 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
54663 + return net;
54664 +#endif
54665
54666 rcu_read_lock();
54667 task = pid_task(proc_pid(dir), PIDTYPE_PID);
54668 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
54669 index f667e8a..55f4d96 100644
54670 --- a/fs/proc/proc_sysctl.c
54671 +++ b/fs/proc/proc_sysctl.c
54672 @@ -7,11 +7,13 @@
54673 #include <linux/security.h>
54674 #include "internal.h"
54675
54676 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
54677 +
54678 static const struct dentry_operations proc_sys_dentry_operations;
54679 static const struct file_operations proc_sys_file_operations;
54680 -static const struct inode_operations proc_sys_inode_operations;
54681 +const struct inode_operations proc_sys_inode_operations;
54682 static const struct file_operations proc_sys_dir_file_operations;
54683 -static const struct inode_operations proc_sys_dir_operations;
54684 +const struct inode_operations proc_sys_dir_operations;
54685
54686 static struct inode *proc_sys_make_inode(struct super_block *sb,
54687 struct ctl_table_header *head, struct ctl_table *table)
54688 @@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
54689 if (!p)
54690 goto out;
54691
54692 + if (gr_handle_sysctl(p, MAY_EXEC))
54693 + goto out;
54694 +
54695 err = ERR_PTR(-ENOMEM);
54696 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
54697 if (h)
54698 @@ -119,6 +124,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
54699
54700 err = NULL;
54701 dentry->d_op = &proc_sys_dentry_operations;
54702 +
54703 + gr_handle_proc_create(dentry, inode);
54704 +
54705 d_add(dentry, inode);
54706
54707 out:
54708 @@ -200,6 +208,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
54709 return -ENOMEM;
54710 } else {
54711 child->d_op = &proc_sys_dentry_operations;
54712 +
54713 + gr_handle_proc_create(child, inode);
54714 +
54715 d_add(child, inode);
54716 }
54717 } else {
54718 @@ -228,6 +239,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
54719 if (*pos < file->f_pos)
54720 continue;
54721
54722 + if (gr_handle_sysctl(table, 0))
54723 + continue;
54724 +
54725 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
54726 if (res)
54727 return res;
54728 @@ -344,6 +358,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
54729 if (IS_ERR(head))
54730 return PTR_ERR(head);
54731
54732 + if (table && gr_handle_sysctl(table, MAY_EXEC))
54733 + return -ENOENT;
54734 +
54735 generic_fillattr(inode, stat);
54736 if (table)
54737 stat->mode = (stat->mode & S_IFMT) | table->mode;
54738 @@ -358,17 +375,18 @@ static const struct file_operations proc_sys_file_operations = {
54739 };
54740
54741 static const struct file_operations proc_sys_dir_file_operations = {
54742 + .read = generic_read_dir,
54743 .readdir = proc_sys_readdir,
54744 .llseek = generic_file_llseek,
54745 };
54746
54747 -static const struct inode_operations proc_sys_inode_operations = {
54748 +const struct inode_operations proc_sys_inode_operations = {
54749 .permission = proc_sys_permission,
54750 .setattr = proc_sys_setattr,
54751 .getattr = proc_sys_getattr,
54752 };
54753
54754 -static const struct inode_operations proc_sys_dir_operations = {
54755 +const struct inode_operations proc_sys_dir_operations = {
54756 .lookup = proc_sys_lookup,
54757 .permission = proc_sys_permission,
54758 .setattr = proc_sys_setattr,
54759 diff --git a/fs/proc/root.c b/fs/proc/root.c
54760 index b080b79..d957e63 100644
54761 --- a/fs/proc/root.c
54762 +++ b/fs/proc/root.c
54763 @@ -134,7 +134,15 @@ void __init proc_root_init(void)
54764 #ifdef CONFIG_PROC_DEVICETREE
54765 proc_device_tree_init();
54766 #endif
54767 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
54768 +#ifdef CONFIG_GRKERNSEC_PROC_USER
54769 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
54770 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54771 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
54772 +#endif
54773 +#else
54774 proc_mkdir("bus", NULL);
54775 +#endif
54776 proc_sys_init();
54777 }
54778
54779 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
54780 index 3b7b82a..4b420b0 100644
54781 --- a/fs/proc/task_mmu.c
54782 +++ b/fs/proc/task_mmu.c
54783 @@ -8,6 +8,7 @@
54784 #include <linux/mempolicy.h>
54785 #include <linux/swap.h>
54786 #include <linux/swapops.h>
54787 +#include <linux/grsecurity.h>
54788
54789 #include <asm/elf.h>
54790 #include <asm/uaccess.h>
54791 @@ -46,15 +47,26 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
54792 "VmStk:\t%8lu kB\n"
54793 "VmExe:\t%8lu kB\n"
54794 "VmLib:\t%8lu kB\n"
54795 - "VmPTE:\t%8lu kB\n",
54796 - hiwater_vm << (PAGE_SHIFT-10),
54797 + "VmPTE:\t%8lu kB\n"
54798 +
54799 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
54800 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
54801 +#endif
54802 +
54803 + ,hiwater_vm << (PAGE_SHIFT-10),
54804 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
54805 mm->locked_vm << (PAGE_SHIFT-10),
54806 hiwater_rss << (PAGE_SHIFT-10),
54807 total_rss << (PAGE_SHIFT-10),
54808 data << (PAGE_SHIFT-10),
54809 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
54810 - (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
54811 + (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
54812 +
54813 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
54814 + , mm->context.user_cs_base, mm->context.user_cs_limit
54815 +#endif
54816 +
54817 + );
54818 }
54819
54820 unsigned long task_vsize(struct mm_struct *mm)
54821 @@ -175,7 +187,8 @@ static void m_stop(struct seq_file *m, void *v)
54822 struct proc_maps_private *priv = m->private;
54823 struct vm_area_struct *vma = v;
54824
54825 - vma_stop(priv, vma);
54826 + if (!IS_ERR(vma))
54827 + vma_stop(priv, vma);
54828 if (priv->task)
54829 put_task_struct(priv->task);
54830 }
54831 @@ -199,6 +212,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
54832 return ret;
54833 }
54834
54835 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54836 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
54837 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
54838 + _mm->pax_flags & MF_PAX_SEGMEXEC))
54839 +#endif
54840 +
54841 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54842 {
54843 struct mm_struct *mm = vma->vm_mm;
54844 @@ -206,7 +225,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54845 int flags = vma->vm_flags;
54846 unsigned long ino = 0;
54847 unsigned long long pgoff = 0;
54848 - unsigned long start;
54849 dev_t dev = 0;
54850 int len;
54851
54852 @@ -217,20 +235,23 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54853 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
54854 }
54855
54856 - /* We don't show the stack guard page in /proc/maps */
54857 - start = vma->vm_start;
54858 - if (vma->vm_flags & VM_GROWSDOWN)
54859 - if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
54860 - start += PAGE_SIZE;
54861 -
54862 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
54863 - start,
54864 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54865 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
54866 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
54867 +#else
54868 + vma->vm_start,
54869 vma->vm_end,
54870 +#endif
54871 flags & VM_READ ? 'r' : '-',
54872 flags & VM_WRITE ? 'w' : '-',
54873 flags & VM_EXEC ? 'x' : '-',
54874 flags & VM_MAYSHARE ? 's' : 'p',
54875 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54876 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
54877 +#else
54878 pgoff,
54879 +#endif
54880 MAJOR(dev), MINOR(dev), ino, &len);
54881
54882 /*
54883 @@ -239,7 +260,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54884 */
54885 if (file) {
54886 pad_len_spaces(m, len);
54887 - seq_path(m, &file->f_path, "\n");
54888 + seq_path(m, &file->f_path, "\n\\");
54889 } else {
54890 const char *name = arch_vma_name(vma);
54891 if (!name) {
54892 @@ -247,8 +268,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54893 if (vma->vm_start <= mm->brk &&
54894 vma->vm_end >= mm->start_brk) {
54895 name = "[heap]";
54896 - } else if (vma->vm_start <= mm->start_stack &&
54897 - vma->vm_end >= mm->start_stack) {
54898 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
54899 + (vma->vm_start <= mm->start_stack &&
54900 + vma->vm_end >= mm->start_stack)) {
54901 name = "[stack]";
54902 }
54903 } else {
54904 @@ -269,6 +291,13 @@ static int show_map(struct seq_file *m, void *v)
54905 struct proc_maps_private *priv = m->private;
54906 struct task_struct *task = priv->task;
54907
54908 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54909 + if (current->exec_id != m->exec_id) {
54910 + gr_log_badprocpid("maps");
54911 + return 0;
54912 + }
54913 +#endif
54914 +
54915 show_map_vma(m, vma);
54916
54917 if (m->count < m->size) /* vma is copied successfully */
54918 @@ -390,10 +419,23 @@ static int show_smap(struct seq_file *m, void *v)
54919 .private = &mss,
54920 };
54921
54922 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54923 + if (current->exec_id != m->exec_id) {
54924 + gr_log_badprocpid("smaps");
54925 + return 0;
54926 + }
54927 +#endif
54928 memset(&mss, 0, sizeof mss);
54929 - mss.vma = vma;
54930 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
54931 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
54932 +
54933 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54934 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
54935 +#endif
54936 + mss.vma = vma;
54937 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
54938 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
54939 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54940 + }
54941 +#endif
54942
54943 show_map_vma(m, vma);
54944
54945 @@ -409,7 +451,11 @@ static int show_smap(struct seq_file *m, void *v)
54946 "Swap: %8lu kB\n"
54947 "KernelPageSize: %8lu kB\n"
54948 "MMUPageSize: %8lu kB\n",
54949 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54950 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
54951 +#else
54952 (vma->vm_end - vma->vm_start) >> 10,
54953 +#endif
54954 mss.resident >> 10,
54955 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
54956 mss.shared_clean >> 10,
54957 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
54958 index 8f5c05d..c99c76d 100644
54959 --- a/fs/proc/task_nommu.c
54960 +++ b/fs/proc/task_nommu.c
54961 @@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
54962 else
54963 bytes += kobjsize(mm);
54964
54965 - if (current->fs && current->fs->users > 1)
54966 + if (current->fs && atomic_read(&current->fs->users) > 1)
54967 sbytes += kobjsize(current->fs);
54968 else
54969 bytes += kobjsize(current->fs);
54970 @@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
54971 if (len < 1)
54972 len = 1;
54973 seq_printf(m, "%*c", len, ' ');
54974 - seq_path(m, &file->f_path, "");
54975 + seq_path(m, &file->f_path, "\n\\");
54976 }
54977
54978 seq_putc(m, '\n');
54979 diff --git a/fs/readdir.c b/fs/readdir.c
54980 index 7723401..30059a6 100644
54981 --- a/fs/readdir.c
54982 +++ b/fs/readdir.c
54983 @@ -16,6 +16,7 @@
54984 #include <linux/security.h>
54985 #include <linux/syscalls.h>
54986 #include <linux/unistd.h>
54987 +#include <linux/namei.h>
54988
54989 #include <asm/uaccess.h>
54990
54991 @@ -67,6 +68,7 @@ struct old_linux_dirent {
54992
54993 struct readdir_callback {
54994 struct old_linux_dirent __user * dirent;
54995 + struct file * file;
54996 int result;
54997 };
54998
54999 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
55000 buf->result = -EOVERFLOW;
55001 return -EOVERFLOW;
55002 }
55003 +
55004 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
55005 + return 0;
55006 +
55007 buf->result++;
55008 dirent = buf->dirent;
55009 if (!access_ok(VERIFY_WRITE, dirent,
55010 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
55011
55012 buf.result = 0;
55013 buf.dirent = dirent;
55014 + buf.file = file;
55015
55016 error = vfs_readdir(file, fillonedir, &buf);
55017 if (buf.result)
55018 @@ -142,6 +149,7 @@ struct linux_dirent {
55019 struct getdents_callback {
55020 struct linux_dirent __user * current_dir;
55021 struct linux_dirent __user * previous;
55022 + struct file * file;
55023 int count;
55024 int error;
55025 };
55026 @@ -162,6 +170,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
55027 buf->error = -EOVERFLOW;
55028 return -EOVERFLOW;
55029 }
55030 +
55031 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
55032 + return 0;
55033 +
55034 dirent = buf->previous;
55035 if (dirent) {
55036 if (__put_user(offset, &dirent->d_off))
55037 @@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
55038 buf.previous = NULL;
55039 buf.count = count;
55040 buf.error = 0;
55041 + buf.file = file;
55042
55043 error = vfs_readdir(file, filldir, &buf);
55044 if (error >= 0)
55045 @@ -228,6 +241,7 @@ out:
55046 struct getdents_callback64 {
55047 struct linux_dirent64 __user * current_dir;
55048 struct linux_dirent64 __user * previous;
55049 + struct file *file;
55050 int count;
55051 int error;
55052 };
55053 @@ -242,6 +256,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
55054 buf->error = -EINVAL; /* only used if we fail.. */
55055 if (reclen > buf->count)
55056 return -EINVAL;
55057 +
55058 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
55059 + return 0;
55060 +
55061 dirent = buf->previous;
55062 if (dirent) {
55063 if (__put_user(offset, &dirent->d_off))
55064 @@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
55065
55066 buf.current_dir = dirent;
55067 buf.previous = NULL;
55068 + buf.file = file;
55069 buf.count = count;
55070 buf.error = 0;
55071
55072 @@ -297,7 +316,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
55073 error = buf.error;
55074 lastdirent = buf.previous;
55075 if (lastdirent) {
55076 - typeof(lastdirent->d_off) d_off = file->f_pos;
55077 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
55078 if (__put_user(d_off, &lastdirent->d_off))
55079 error = -EFAULT;
55080 else
55081 diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
55082 index d42c30c..4fd8718 100644
55083 --- a/fs/reiserfs/dir.c
55084 +++ b/fs/reiserfs/dir.c
55085 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
55086 struct reiserfs_dir_entry de;
55087 int ret = 0;
55088
55089 + pax_track_stack();
55090 +
55091 reiserfs_write_lock(inode->i_sb);
55092
55093 reiserfs_check_lock_depth(inode->i_sb, "readdir");
55094 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
55095 index 128d3f7..8840d44 100644
55096 --- a/fs/reiserfs/do_balan.c
55097 +++ b/fs/reiserfs/do_balan.c
55098 @@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
55099 return;
55100 }
55101
55102 - atomic_inc(&(fs_generation(tb->tb_sb)));
55103 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
55104 do_balance_starts(tb);
55105
55106 /* balance leaf returns 0 except if combining L R and S into
55107 diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
55108 index 72cb1cc..d0e3181 100644
55109 --- a/fs/reiserfs/item_ops.c
55110 +++ b/fs/reiserfs/item_ops.c
55111 @@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_item *vi)
55112 vi->vi_index, vi->vi_type, vi->vi_ih);
55113 }
55114
55115 -static struct item_operations stat_data_ops = {
55116 +static const struct item_operations stat_data_ops = {
55117 .bytes_number = sd_bytes_number,
55118 .decrement_key = sd_decrement_key,
55119 .is_left_mergeable = sd_is_left_mergeable,
55120 @@ -196,7 +196,7 @@ static void direct_print_vi(struct virtual_item *vi)
55121 vi->vi_index, vi->vi_type, vi->vi_ih);
55122 }
55123
55124 -static struct item_operations direct_ops = {
55125 +static const struct item_operations direct_ops = {
55126 .bytes_number = direct_bytes_number,
55127 .decrement_key = direct_decrement_key,
55128 .is_left_mergeable = direct_is_left_mergeable,
55129 @@ -341,7 +341,7 @@ static void indirect_print_vi(struct virtual_item *vi)
55130 vi->vi_index, vi->vi_type, vi->vi_ih);
55131 }
55132
55133 -static struct item_operations indirect_ops = {
55134 +static const struct item_operations indirect_ops = {
55135 .bytes_number = indirect_bytes_number,
55136 .decrement_key = indirect_decrement_key,
55137 .is_left_mergeable = indirect_is_left_mergeable,
55138 @@ -628,7 +628,7 @@ static void direntry_print_vi(struct virtual_item *vi)
55139 printk("\n");
55140 }
55141
55142 -static struct item_operations direntry_ops = {
55143 +static const struct item_operations direntry_ops = {
55144 .bytes_number = direntry_bytes_number,
55145 .decrement_key = direntry_decrement_key,
55146 .is_left_mergeable = direntry_is_left_mergeable,
55147 @@ -724,7 +724,7 @@ static void errcatch_print_vi(struct virtual_item *vi)
55148 "Invalid item type observed, run fsck ASAP");
55149 }
55150
55151 -static struct item_operations errcatch_ops = {
55152 +static const struct item_operations errcatch_ops = {
55153 errcatch_bytes_number,
55154 errcatch_decrement_key,
55155 errcatch_is_left_mergeable,
55156 @@ -746,7 +746,7 @@ static struct item_operations errcatch_ops = {
55157 #error Item types must use disk-format assigned values.
55158 #endif
55159
55160 -struct item_operations *item_ops[TYPE_ANY + 1] = {
55161 +const struct item_operations * const item_ops[TYPE_ANY + 1] = {
55162 &stat_data_ops,
55163 &indirect_ops,
55164 &direct_ops,
55165 diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
55166 index b5fe0aa..e0e25c4 100644
55167 --- a/fs/reiserfs/journal.c
55168 +++ b/fs/reiserfs/journal.c
55169 @@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
55170 struct buffer_head *bh;
55171 int i, j;
55172
55173 + pax_track_stack();
55174 +
55175 bh = __getblk(dev, block, bufsize);
55176 if (buffer_uptodate(bh))
55177 return (bh);
55178 diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
55179 index 2715791..b8996db 100644
55180 --- a/fs/reiserfs/namei.c
55181 +++ b/fs/reiserfs/namei.c
55182 @@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
55183 unsigned long savelink = 1;
55184 struct timespec ctime;
55185
55186 + pax_track_stack();
55187 +
55188 /* three balancings: (1) old name removal, (2) new name insertion
55189 and (3) maybe "save" link insertion
55190 stat data updates: (1) old directory,
55191 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
55192 index 9229e55..3d2e3b7 100644
55193 --- a/fs/reiserfs/procfs.c
55194 +++ b/fs/reiserfs/procfs.c
55195 @@ -123,7 +123,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
55196 "SMALL_TAILS " : "NO_TAILS ",
55197 replay_only(sb) ? "REPLAY_ONLY " : "",
55198 convert_reiserfs(sb) ? "CONV " : "",
55199 - atomic_read(&r->s_generation_counter),
55200 + atomic_read_unchecked(&r->s_generation_counter),
55201 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
55202 SF(s_do_balance), SF(s_unneeded_left_neighbor),
55203 SF(s_good_search_by_key_reada), SF(s_bmaps),
55204 @@ -309,6 +309,8 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
55205 struct journal_params *jp = &rs->s_v1.s_journal;
55206 char b[BDEVNAME_SIZE];
55207
55208 + pax_track_stack();
55209 +
55210 seq_printf(m, /* on-disk fields */
55211 "jp_journal_1st_block: \t%i\n"
55212 "jp_journal_dev: \t%s[%x]\n"
55213 diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
55214 index d036ee5..4c7dca1 100644
55215 --- a/fs/reiserfs/stree.c
55216 +++ b/fs/reiserfs/stree.c
55217 @@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
55218 int iter = 0;
55219 #endif
55220
55221 + pax_track_stack();
55222 +
55223 BUG_ON(!th->t_trans_id);
55224
55225 init_tb_struct(th, &s_del_balance, sb, path,
55226 @@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
55227 int retval;
55228 int quota_cut_bytes = 0;
55229
55230 + pax_track_stack();
55231 +
55232 BUG_ON(!th->t_trans_id);
55233
55234 le_key2cpu_key(&cpu_key, key);
55235 @@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
55236 int quota_cut_bytes;
55237 loff_t tail_pos = 0;
55238
55239 + pax_track_stack();
55240 +
55241 BUG_ON(!th->t_trans_id);
55242
55243 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
55244 @@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
55245 int retval;
55246 int fs_gen;
55247
55248 + pax_track_stack();
55249 +
55250 BUG_ON(!th->t_trans_id);
55251
55252 fs_gen = get_generation(inode->i_sb);
55253 @@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
55254 int fs_gen = 0;
55255 int quota_bytes = 0;
55256
55257 + pax_track_stack();
55258 +
55259 BUG_ON(!th->t_trans_id);
55260
55261 if (inode) { /* Do we count quotas for item? */
55262 diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
55263 index 7cb1285..c726cd0 100644
55264 --- a/fs/reiserfs/super.c
55265 +++ b/fs/reiserfs/super.c
55266 @@ -916,6 +916,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
55267 {.option_name = NULL}
55268 };
55269
55270 + pax_track_stack();
55271 +
55272 *blocks = 0;
55273 if (!options || !*options)
55274 /* use default configuration: create tails, journaling on, no
55275 diff --git a/fs/select.c b/fs/select.c
55276 index fd38ce2..f5381b8 100644
55277 --- a/fs/select.c
55278 +++ b/fs/select.c
55279 @@ -20,6 +20,7 @@
55280 #include <linux/module.h>
55281 #include <linux/slab.h>
55282 #include <linux/poll.h>
55283 +#include <linux/security.h>
55284 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
55285 #include <linux/file.h>
55286 #include <linux/fdtable.h>
55287 @@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
55288 int retval, i, timed_out = 0;
55289 unsigned long slack = 0;
55290
55291 + pax_track_stack();
55292 +
55293 rcu_read_lock();
55294 retval = max_select_fd(n, fds);
55295 rcu_read_unlock();
55296 @@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
55297 /* Allocate small arguments on the stack to save memory and be faster */
55298 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
55299
55300 + pax_track_stack();
55301 +
55302 ret = -EINVAL;
55303 if (n < 0)
55304 goto out_nofds;
55305 @@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
55306 struct poll_list *walk = head;
55307 unsigned long todo = nfds;
55308
55309 + pax_track_stack();
55310 +
55311 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
55312 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
55313 return -EINVAL;
55314
55315 diff --git a/fs/seq_file.c b/fs/seq_file.c
55316 index eae7d9d..4ddabe2 100644
55317 --- a/fs/seq_file.c
55318 +++ b/fs/seq_file.c
55319 @@ -9,6 +9,7 @@
55320 #include <linux/module.h>
55321 #include <linux/seq_file.h>
55322 #include <linux/slab.h>
55323 +#include <linux/sched.h>
55324
55325 #include <asm/uaccess.h>
55326 #include <asm/page.h>
55327 @@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
55328 memset(p, 0, sizeof(*p));
55329 mutex_init(&p->lock);
55330 p->op = op;
55331 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55332 + p->exec_id = current->exec_id;
55333 +#endif
55334
55335 /*
55336 * Wrappers around seq_open(e.g. swaps_open) need to be
55337 @@ -76,7 +80,8 @@ static int traverse(struct seq_file *m, loff_t offset)
55338 return 0;
55339 }
55340 if (!m->buf) {
55341 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
55342 + m->size = PAGE_SIZE;
55343 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
55344 if (!m->buf)
55345 return -ENOMEM;
55346 }
55347 @@ -116,7 +121,8 @@ static int traverse(struct seq_file *m, loff_t offset)
55348 Eoverflow:
55349 m->op->stop(m, p);
55350 kfree(m->buf);
55351 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
55352 + m->size <<= 1;
55353 + m->buf = kmalloc(m->size, GFP_KERNEL);
55354 return !m->buf ? -ENOMEM : -EAGAIN;
55355 }
55356
55357 @@ -169,7 +175,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
55358 m->version = file->f_version;
55359 /* grab buffer if we didn't have one */
55360 if (!m->buf) {
55361 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
55362 + m->size = PAGE_SIZE;
55363 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
55364 if (!m->buf)
55365 goto Enomem;
55366 }
55367 @@ -210,7 +217,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
55368 goto Fill;
55369 m->op->stop(m, p);
55370 kfree(m->buf);
55371 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
55372 + m->size <<= 1;
55373 + m->buf = kmalloc(m->size, GFP_KERNEL);
55374 if (!m->buf)
55375 goto Enomem;
55376 m->count = 0;
55377 @@ -551,7 +559,7 @@ static void single_stop(struct seq_file *p, void *v)
55378 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
55379 void *data)
55380 {
55381 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
55382 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
55383 int res = -ENOMEM;
55384
55385 if (op) {
55386 diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
55387 index 71c29b6..54694dd 100644
55388 --- a/fs/smbfs/proc.c
55389 +++ b/fs/smbfs/proc.c
55390 @@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *server, struct smb_nls_codepage *cp)
55391
55392 out:
55393 if (server->local_nls != NULL && server->remote_nls != NULL)
55394 - server->ops->convert = convert_cp;
55395 + *(void **)&server->ops->convert = convert_cp;
55396 else
55397 - server->ops->convert = convert_memcpy;
55398 + *(void **)&server->ops->convert = convert_memcpy;
55399
55400 smb_unlock_server(server);
55401 return n;
55402 @@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt)
55403
55404 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
55405 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
55406 - server->ops->getattr = smb_proc_getattr_core;
55407 + *(void **)&server->ops->getattr = smb_proc_getattr_core;
55408 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
55409 - server->ops->getattr = smb_proc_getattr_ff;
55410 + *(void **)&server->ops->getattr = smb_proc_getattr_ff;
55411 }
55412
55413 /* Decode server capabilities */
55414 @@ -3439,7 +3439,7 @@ out:
55415 static void
55416 install_ops(struct smb_ops *dst, struct smb_ops *src)
55417 {
55418 - memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
55419 + memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
55420 }
55421
55422 /* < LANMAN2 */
55423 diff --git a/fs/smbfs/symlink.c b/fs/smbfs/symlink.c
55424 index 00b2909..2ace383 100644
55425 --- a/fs/smbfs/symlink.c
55426 +++ b/fs/smbfs/symlink.c
55427 @@ -55,7 +55,7 @@ static void *smb_follow_link(struct dentry *dentry, struct nameidata *nd)
55428
55429 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
55430 {
55431 - char *s = nd_get_link(nd);
55432 + const char *s = nd_get_link(nd);
55433 if (!IS_ERR(s))
55434 __putname(s);
55435 }
55436 diff --git a/fs/splice.c b/fs/splice.c
55437 index bb92b7c..5aa72b0 100644
55438 --- a/fs/splice.c
55439 +++ b/fs/splice.c
55440 @@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
55441 pipe_lock(pipe);
55442
55443 for (;;) {
55444 - if (!pipe->readers) {
55445 + if (!atomic_read(&pipe->readers)) {
55446 send_sig(SIGPIPE, current, 0);
55447 if (!ret)
55448 ret = -EPIPE;
55449 @@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
55450 do_wakeup = 0;
55451 }
55452
55453 - pipe->waiting_writers++;
55454 + atomic_inc(&pipe->waiting_writers);
55455 pipe_wait(pipe);
55456 - pipe->waiting_writers--;
55457 + atomic_dec(&pipe->waiting_writers);
55458 }
55459
55460 pipe_unlock(pipe);
55461 @@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
55462 .spd_release = spd_release_page,
55463 };
55464
55465 + pax_track_stack();
55466 +
55467 index = *ppos >> PAGE_CACHE_SHIFT;
55468 loff = *ppos & ~PAGE_CACHE_MASK;
55469 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
55470 @@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
55471 old_fs = get_fs();
55472 set_fs(get_ds());
55473 /* The cast to a user pointer is valid due to the set_fs() */
55474 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
55475 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
55476 set_fs(old_fs);
55477
55478 return res;
55479 @@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
55480 old_fs = get_fs();
55481 set_fs(get_ds());
55482 /* The cast to a user pointer is valid due to the set_fs() */
55483 - res = vfs_write(file, (const char __user *)buf, count, &pos);
55484 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
55485 set_fs(old_fs);
55486
55487 return res;
55488 @@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
55489 .spd_release = spd_release_page,
55490 };
55491
55492 + pax_track_stack();
55493 +
55494 index = *ppos >> PAGE_CACHE_SHIFT;
55495 offset = *ppos & ~PAGE_CACHE_MASK;
55496 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
55497 @@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
55498 goto err;
55499
55500 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
55501 - vec[i].iov_base = (void __user *) page_address(page);
55502 + vec[i].iov_base = (__force void __user *) page_address(page);
55503 vec[i].iov_len = this_len;
55504 pages[i] = page;
55505 spd.nr_pages++;
55506 @@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
55507 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
55508 {
55509 while (!pipe->nrbufs) {
55510 - if (!pipe->writers)
55511 + if (!atomic_read(&pipe->writers))
55512 return 0;
55513
55514 - if (!pipe->waiting_writers && sd->num_spliced)
55515 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
55516 return 0;
55517
55518 if (sd->flags & SPLICE_F_NONBLOCK)
55519 @@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
55520 * out of the pipe right after the splice_to_pipe(). So set
55521 * PIPE_READERS appropriately.
55522 */
55523 - pipe->readers = 1;
55524 + atomic_set(&pipe->readers, 1);
55525
55526 current->splice_pipe = pipe;
55527 }
55528 @@ -1593,6 +1597,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
55529 .spd_release = spd_release_page,
55530 };
55531
55532 + pax_track_stack();
55533 +
55534 pipe = pipe_info(file->f_path.dentry->d_inode);
55535 if (!pipe)
55536 return -EBADF;
55537 @@ -1701,9 +1707,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
55538 ret = -ERESTARTSYS;
55539 break;
55540 }
55541 - if (!pipe->writers)
55542 + if (!atomic_read(&pipe->writers))
55543 break;
55544 - if (!pipe->waiting_writers) {
55545 + if (!atomic_read(&pipe->waiting_writers)) {
55546 if (flags & SPLICE_F_NONBLOCK) {
55547 ret = -EAGAIN;
55548 break;
55549 @@ -1735,7 +1741,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
55550 pipe_lock(pipe);
55551
55552 while (pipe->nrbufs >= PIPE_BUFFERS) {
55553 - if (!pipe->readers) {
55554 + if (!atomic_read(&pipe->readers)) {
55555 send_sig(SIGPIPE, current, 0);
55556 ret = -EPIPE;
55557 break;
55558 @@ -1748,9 +1754,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
55559 ret = -ERESTARTSYS;
55560 break;
55561 }
55562 - pipe->waiting_writers++;
55563 + atomic_inc(&pipe->waiting_writers);
55564 pipe_wait(pipe);
55565 - pipe->waiting_writers--;
55566 + atomic_dec(&pipe->waiting_writers);
55567 }
55568
55569 pipe_unlock(pipe);
55570 @@ -1786,14 +1792,14 @@ retry:
55571 pipe_double_lock(ipipe, opipe);
55572
55573 do {
55574 - if (!opipe->readers) {
55575 + if (!atomic_read(&opipe->readers)) {
55576 send_sig(SIGPIPE, current, 0);
55577 if (!ret)
55578 ret = -EPIPE;
55579 break;
55580 }
55581
55582 - if (!ipipe->nrbufs && !ipipe->writers)
55583 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
55584 break;
55585
55586 /*
55587 @@ -1893,7 +1899,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
55588 pipe_double_lock(ipipe, opipe);
55589
55590 do {
55591 - if (!opipe->readers) {
55592 + if (!atomic_read(&opipe->readers)) {
55593 send_sig(SIGPIPE, current, 0);
55594 if (!ret)
55595 ret = -EPIPE;
55596 @@ -1938,7 +1944,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
55597 * return EAGAIN if we have the potential of some data in the
55598 * future, otherwise just return 0
55599 */
55600 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
55601 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
55602 ret = -EAGAIN;
55603
55604 pipe_unlock(ipipe);
55605 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
55606 index e020183..18d64b4 100644
55607 --- a/fs/sysfs/dir.c
55608 +++ b/fs/sysfs/dir.c
55609 @@ -678,6 +678,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
55610 struct sysfs_dirent *sd;
55611 int rc;
55612
55613 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
55614 + const char *parent_name = parent_sd->s_name;
55615 +
55616 + mode = S_IFDIR | S_IRWXU;
55617 +
55618 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
55619 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
55620 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
55621 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
55622 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
55623 +#endif
55624 +
55625 /* allocate */
55626 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
55627 if (!sd)
55628 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
55629 index 7118a38..70af853 100644
55630 --- a/fs/sysfs/file.c
55631 +++ b/fs/sysfs/file.c
55632 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
55633
55634 struct sysfs_open_dirent {
55635 atomic_t refcnt;
55636 - atomic_t event;
55637 + atomic_unchecked_t event;
55638 wait_queue_head_t poll;
55639 struct list_head buffers; /* goes through sysfs_buffer.list */
55640 };
55641 @@ -53,7 +53,7 @@ struct sysfs_buffer {
55642 size_t count;
55643 loff_t pos;
55644 char * page;
55645 - struct sysfs_ops * ops;
55646 + const struct sysfs_ops * ops;
55647 struct mutex mutex;
55648 int needs_read_fill;
55649 int event;
55650 @@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
55651 {
55652 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
55653 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
55654 - struct sysfs_ops * ops = buffer->ops;
55655 + const struct sysfs_ops * ops = buffer->ops;
55656 int ret = 0;
55657 ssize_t count;
55658
55659 @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
55660 if (!sysfs_get_active_two(attr_sd))
55661 return -ENODEV;
55662
55663 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
55664 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
55665 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
55666
55667 sysfs_put_active_two(attr_sd);
55668 @@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t
55669 {
55670 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
55671 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
55672 - struct sysfs_ops * ops = buffer->ops;
55673 + const struct sysfs_ops * ops = buffer->ops;
55674 int rc;
55675
55676 /* need attr_sd for attr and ops, its parent for kobj */
55677 @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
55678 return -ENOMEM;
55679
55680 atomic_set(&new_od->refcnt, 0);
55681 - atomic_set(&new_od->event, 1);
55682 + atomic_set_unchecked(&new_od->event, 1);
55683 init_waitqueue_head(&new_od->poll);
55684 INIT_LIST_HEAD(&new_od->buffers);
55685 goto retry;
55686 @@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
55687 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
55688 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
55689 struct sysfs_buffer *buffer;
55690 - struct sysfs_ops *ops;
55691 + const struct sysfs_ops *ops;
55692 int error = -EACCES;
55693 char *p;
55694
55695 @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
55696
55697 sysfs_put_active_two(attr_sd);
55698
55699 - if (buffer->event != atomic_read(&od->event))
55700 + if (buffer->event != atomic_read_unchecked(&od->event))
55701 goto trigger;
55702
55703 return DEFAULT_POLLMASK;
55704 @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
55705
55706 od = sd->s_attr.open;
55707 if (od) {
55708 - atomic_inc(&od->event);
55709 + atomic_inc_unchecked(&od->event);
55710 wake_up_interruptible(&od->poll);
55711 }
55712
55713 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
55714 index c5081ad..342ea86 100644
55715 --- a/fs/sysfs/symlink.c
55716 +++ b/fs/sysfs/symlink.c
55717 @@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
55718
55719 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
55720 {
55721 - char *page = nd_get_link(nd);
55722 + const char *page = nd_get_link(nd);
55723 if (!IS_ERR(page))
55724 free_page((unsigned long)page);
55725 }
55726 diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
55727 index 1e06853..b06d325 100644
55728 --- a/fs/udf/balloc.c
55729 +++ b/fs/udf/balloc.c
55730 @@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
55731
55732 mutex_lock(&sbi->s_alloc_mutex);
55733 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
55734 - if (bloc->logicalBlockNum < 0 ||
55735 - (bloc->logicalBlockNum + count) >
55736 - partmap->s_partition_len) {
55737 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
55738 udf_debug("%d < %d || %d + %d > %d\n",
55739 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
55740 count, partmap->s_partition_len);
55741 @@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct super_block *sb,
55742
55743 mutex_lock(&sbi->s_alloc_mutex);
55744 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
55745 - if (bloc->logicalBlockNum < 0 ||
55746 - (bloc->logicalBlockNum + count) >
55747 - partmap->s_partition_len) {
55748 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
55749 udf_debug("%d < %d || %d + %d > %d\n",
55750 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
55751 partmap->s_partition_len);
55752 diff --git a/fs/udf/inode.c b/fs/udf/inode.c
55753 index 6d24c2c..fff470f 100644
55754 --- a/fs/udf/inode.c
55755 +++ b/fs/udf/inode.c
55756 @@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
55757 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
55758 int lastblock = 0;
55759
55760 + pax_track_stack();
55761 +
55762 prev_epos.offset = udf_file_entry_alloc_offset(inode);
55763 prev_epos.block = iinfo->i_location;
55764 prev_epos.bh = NULL;
55765 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
55766 index 9215700..bf1f68e 100644
55767 --- a/fs/udf/misc.c
55768 +++ b/fs/udf/misc.c
55769 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
55770
55771 u8 udf_tag_checksum(const struct tag *t)
55772 {
55773 - u8 *data = (u8 *)t;
55774 + const u8 *data = (const u8 *)t;
55775 u8 checksum = 0;
55776 int i;
55777 for (i = 0; i < sizeof(struct tag); ++i)
55778 diff --git a/fs/utimes.c b/fs/utimes.c
55779 index e4c75db..b4df0e0 100644
55780 --- a/fs/utimes.c
55781 +++ b/fs/utimes.c
55782 @@ -1,6 +1,7 @@
55783 #include <linux/compiler.h>
55784 #include <linux/file.h>
55785 #include <linux/fs.h>
55786 +#include <linux/security.h>
55787 #include <linux/linkage.h>
55788 #include <linux/mount.h>
55789 #include <linux/namei.h>
55790 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
55791 goto mnt_drop_write_and_out;
55792 }
55793 }
55794 +
55795 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
55796 + error = -EACCES;
55797 + goto mnt_drop_write_and_out;
55798 + }
55799 +
55800 mutex_lock(&inode->i_mutex);
55801 error = notify_change(path->dentry, &newattrs);
55802 mutex_unlock(&inode->i_mutex);
55803 diff --git a/fs/xattr.c b/fs/xattr.c
55804 index 6d4f6d3..cda3958 100644
55805 --- a/fs/xattr.c
55806 +++ b/fs/xattr.c
55807 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
55808 * Extended attribute SET operations
55809 */
55810 static long
55811 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
55812 +setxattr(struct path *path, const char __user *name, const void __user *value,
55813 size_t size, int flags)
55814 {
55815 int error;
55816 @@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
55817 return PTR_ERR(kvalue);
55818 }
55819
55820 - error = vfs_setxattr(d, kname, kvalue, size, flags);
55821 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
55822 + error = -EACCES;
55823 + goto out;
55824 + }
55825 +
55826 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
55827 +out:
55828 kfree(kvalue);
55829 return error;
55830 }
55831 @@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
55832 return error;
55833 error = mnt_want_write(path.mnt);
55834 if (!error) {
55835 - error = setxattr(path.dentry, name, value, size, flags);
55836 + error = setxattr(&path, name, value, size, flags);
55837 mnt_drop_write(path.mnt);
55838 }
55839 path_put(&path);
55840 @@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
55841 return error;
55842 error = mnt_want_write(path.mnt);
55843 if (!error) {
55844 - error = setxattr(path.dentry, name, value, size, flags);
55845 + error = setxattr(&path, name, value, size, flags);
55846 mnt_drop_write(path.mnt);
55847 }
55848 path_put(&path);
55849 @@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
55850 const void __user *,value, size_t, size, int, flags)
55851 {
55852 struct file *f;
55853 - struct dentry *dentry;
55854 int error = -EBADF;
55855
55856 f = fget(fd);
55857 if (!f)
55858 return error;
55859 - dentry = f->f_path.dentry;
55860 - audit_inode(NULL, dentry);
55861 + audit_inode(NULL, f->f_path.dentry);
55862 error = mnt_want_write_file(f);
55863 if (!error) {
55864 - error = setxattr(dentry, name, value, size, flags);
55865 + error = setxattr(&f->f_path, name, value, size, flags);
55866 mnt_drop_write(f->f_path.mnt);
55867 }
55868 fput(f);
55869 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
55870 index c6ad7c7..f2847a7 100644
55871 --- a/fs/xattr_acl.c
55872 +++ b/fs/xattr_acl.c
55873 @@ -17,8 +17,8 @@
55874 struct posix_acl *
55875 posix_acl_from_xattr(const void *value, size_t size)
55876 {
55877 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
55878 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
55879 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
55880 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
55881 int count;
55882 struct posix_acl *acl;
55883 struct posix_acl_entry *acl_e;
55884 diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
55885 index 942362f..88f96f5 100644
55886 --- a/fs/xfs/linux-2.6/xfs_ioctl.c
55887 +++ b/fs/xfs/linux-2.6/xfs_ioctl.c
55888 @@ -134,7 +134,7 @@ xfs_find_handle(
55889 }
55890
55891 error = -EFAULT;
55892 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
55893 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
55894 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
55895 goto out_put;
55896
55897 @@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
55898 if (IS_ERR(dentry))
55899 return PTR_ERR(dentry);
55900
55901 - kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
55902 + kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
55903 if (!kbuf)
55904 goto out_dput;
55905
55906 @@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
55907 xfs_mount_t *mp,
55908 void __user *arg)
55909 {
55910 - xfs_fsop_geom_t fsgeo;
55911 + xfs_fsop_geom_t fsgeo;
55912 int error;
55913
55914 error = xfs_fs_geometry(mp, &fsgeo, 3);
55915 diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
55916 index bad485a..479bd32 100644
55917 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c
55918 +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
55919 @@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
55920 xfs_fsop_geom_t fsgeo;
55921 int error;
55922
55923 + memset(&fsgeo, 0, sizeof(fsgeo));
55924 error = xfs_fs_geometry(mp, &fsgeo, 3);
55925 if (error)
55926 return -error;
55927 diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
55928 index 1f3b4b8..6102f6d 100644
55929 --- a/fs/xfs/linux-2.6/xfs_iops.c
55930 +++ b/fs/xfs/linux-2.6/xfs_iops.c
55931 @@ -468,7 +468,7 @@ xfs_vn_put_link(
55932 struct nameidata *nd,
55933 void *p)
55934 {
55935 - char *s = nd_get_link(nd);
55936 + const char *s = nd_get_link(nd);
55937
55938 if (!IS_ERR(s))
55939 kfree(s);
55940 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
55941 index 8971fb0..5fc1eb2 100644
55942 --- a/fs/xfs/xfs_bmap.c
55943 +++ b/fs/xfs/xfs_bmap.c
55944 @@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
55945 int nmap,
55946 int ret_nmap);
55947 #else
55948 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
55949 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
55950 #endif /* DEBUG */
55951
55952 #if defined(XFS_RW_TRACE)
55953 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
55954 index e89734e..5e84d8d 100644
55955 --- a/fs/xfs/xfs_dir2_sf.c
55956 +++ b/fs/xfs/xfs_dir2_sf.c
55957 @@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
55958 }
55959
55960 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
55961 - if (filldir(dirent, sfep->name, sfep->namelen,
55962 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
55963 + char name[sfep->namelen];
55964 + memcpy(name, sfep->name, sfep->namelen);
55965 + if (filldir(dirent, name, sfep->namelen,
55966 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
55967 + *offset = off & 0x7fffffff;
55968 + return 0;
55969 + }
55970 + } else if (filldir(dirent, sfep->name, sfep->namelen,
55971 off & 0x7fffffff, ino, DT_UNKNOWN)) {
55972 *offset = off & 0x7fffffff;
55973 return 0;
55974 diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
55975 index 8f32f50..b6a41e8 100644
55976 --- a/fs/xfs/xfs_vnodeops.c
55977 +++ b/fs/xfs/xfs_vnodeops.c
55978 @@ -564,13 +564,18 @@ xfs_readlink(
55979
55980 xfs_ilock(ip, XFS_ILOCK_SHARED);
55981
55982 - ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFLNK);
55983 - ASSERT(ip->i_d.di_size <= MAXPATHLEN);
55984 -
55985 pathlen = ip->i_d.di_size;
55986 if (!pathlen)
55987 goto out;
55988
55989 + if (pathlen > MAXPATHLEN) {
55990 + xfs_fs_cmn_err(CE_ALERT, mp, "%s: inode (%llu) symlink length (%d) too long",
55991 + __func__, (unsigned long long)ip->i_ino, pathlen);
55992 + ASSERT(0);
55993 + error = XFS_ERROR(EFSCORRUPTED);
55994 + goto out;
55995 + }
55996 +
55997 if (ip->i_df.if_flags & XFS_IFINLINE) {
55998 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
55999 link[pathlen] = '\0';
56000 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
56001 new file mode 100644
56002 index 0000000..50819f8
56003 --- /dev/null
56004 +++ b/grsecurity/Kconfig
56005 @@ -0,0 +1,1077 @@
56006 +#
56007 +# grecurity configuration
56008 +#
56009 +
56010 +menu "Grsecurity"
56011 +
56012 +config GRKERNSEC
56013 + bool "Grsecurity"
56014 + select CRYPTO
56015 + select CRYPTO_SHA256
56016 + help
56017 + If you say Y here, you will be able to configure many features
56018 + that will enhance the security of your system. It is highly
56019 + recommended that you say Y here and read through the help
56020 + for each option so that you fully understand the features and
56021 + can evaluate their usefulness for your machine.
56022 +
56023 +choice
56024 + prompt "Security Level"
56025 + depends on GRKERNSEC
56026 + default GRKERNSEC_CUSTOM
56027 +
56028 +config GRKERNSEC_LOW
56029 + bool "Low"
56030 + select GRKERNSEC_LINK
56031 + select GRKERNSEC_FIFO
56032 + select GRKERNSEC_RANDNET
56033 + select GRKERNSEC_DMESG
56034 + select GRKERNSEC_CHROOT
56035 + select GRKERNSEC_CHROOT_CHDIR
56036 +
56037 + help
56038 + If you choose this option, several of the grsecurity options will
56039 + be enabled that will give you greater protection against a number
56040 + of attacks, while assuring that none of your software will have any
56041 + conflicts with the additional security measures. If you run a lot
56042 + of unusual software, or you are having problems with the higher
56043 + security levels, you should say Y here. With this option, the
56044 + following features are enabled:
56045 +
56046 + - Linking restrictions
56047 + - FIFO restrictions
56048 + - Restricted dmesg
56049 + - Enforced chdir("/") on chroot
56050 + - Runtime module disabling
56051 +
56052 +config GRKERNSEC_MEDIUM
56053 + bool "Medium"
56054 + select PAX
56055 + select PAX_EI_PAX
56056 + select PAX_PT_PAX_FLAGS
56057 + select PAX_HAVE_ACL_FLAGS
56058 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
56059 + select GRKERNSEC_CHROOT
56060 + select GRKERNSEC_CHROOT_SYSCTL
56061 + select GRKERNSEC_LINK
56062 + select GRKERNSEC_FIFO
56063 + select GRKERNSEC_DMESG
56064 + select GRKERNSEC_RANDNET
56065 + select GRKERNSEC_FORKFAIL
56066 + select GRKERNSEC_TIME
56067 + select GRKERNSEC_SIGNAL
56068 + select GRKERNSEC_CHROOT
56069 + select GRKERNSEC_CHROOT_UNIX
56070 + select GRKERNSEC_CHROOT_MOUNT
56071 + select GRKERNSEC_CHROOT_PIVOT
56072 + select GRKERNSEC_CHROOT_DOUBLE
56073 + select GRKERNSEC_CHROOT_CHDIR
56074 + select GRKERNSEC_CHROOT_MKNOD
56075 + select GRKERNSEC_PROC
56076 + select GRKERNSEC_PROC_USERGROUP
56077 + select PAX_RANDUSTACK
56078 + select PAX_ASLR
56079 + select PAX_RANDMMAP
56080 + select PAX_REFCOUNT if (X86 || SPARC64)
56081 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
56082 +
56083 + help
56084 + If you say Y here, several features in addition to those included
56085 + in the low additional security level will be enabled. These
56086 + features provide even more security to your system, though in rare
56087 + cases they may be incompatible with very old or poorly written
56088 + software. If you enable this option, make sure that your auth
56089 + service (identd) is running as gid 1001. With this option,
56090 + the following features (in addition to those provided in the
56091 + low additional security level) will be enabled:
56092 +
56093 + - Failed fork logging
56094 + - Time change logging
56095 + - Signal logging
56096 + - Deny mounts in chroot
56097 + - Deny double chrooting
56098 + - Deny sysctl writes in chroot
56099 + - Deny mknod in chroot
56100 + - Deny access to abstract AF_UNIX sockets out of chroot
56101 + - Deny pivot_root in chroot
56102 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
56103 + - /proc restrictions with special GID set to 10 (usually wheel)
56104 + - Address Space Layout Randomization (ASLR)
56105 + - Prevent exploitation of most refcount overflows
56106 + - Bounds checking of copying between the kernel and userland
56107 +
56108 +config GRKERNSEC_HIGH
56109 + bool "High"
56110 + select GRKERNSEC_LINK
56111 + select GRKERNSEC_FIFO
56112 + select GRKERNSEC_DMESG
56113 + select GRKERNSEC_FORKFAIL
56114 + select GRKERNSEC_TIME
56115 + select GRKERNSEC_SIGNAL
56116 + select GRKERNSEC_CHROOT
56117 + select GRKERNSEC_CHROOT_SHMAT
56118 + select GRKERNSEC_CHROOT_UNIX
56119 + select GRKERNSEC_CHROOT_MOUNT
56120 + select GRKERNSEC_CHROOT_FCHDIR
56121 + select GRKERNSEC_CHROOT_PIVOT
56122 + select GRKERNSEC_CHROOT_DOUBLE
56123 + select GRKERNSEC_CHROOT_CHDIR
56124 + select GRKERNSEC_CHROOT_MKNOD
56125 + select GRKERNSEC_CHROOT_CAPS
56126 + select GRKERNSEC_CHROOT_SYSCTL
56127 + select GRKERNSEC_CHROOT_FINDTASK
56128 + select GRKERNSEC_SYSFS_RESTRICT
56129 + select GRKERNSEC_PROC
56130 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
56131 + select GRKERNSEC_HIDESYM
56132 + select GRKERNSEC_BRUTE
56133 + select GRKERNSEC_PROC_USERGROUP
56134 + select GRKERNSEC_KMEM
56135 + select GRKERNSEC_RESLOG
56136 + select GRKERNSEC_RANDNET
56137 + select GRKERNSEC_PROC_ADD
56138 + select GRKERNSEC_CHROOT_CHMOD
56139 + select GRKERNSEC_CHROOT_NICE
56140 + select GRKERNSEC_SETXID
56141 + select GRKERNSEC_AUDIT_MOUNT
56142 + select GRKERNSEC_MODHARDEN if (MODULES)
56143 + select GRKERNSEC_HARDEN_PTRACE
56144 + select GRKERNSEC_PTRACE_READEXEC
56145 + select GRKERNSEC_VM86 if (X86_32)
56146 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
56147 + select PAX
56148 + select PAX_RANDUSTACK
56149 + select PAX_ASLR
56150 + select PAX_RANDMMAP
56151 + select PAX_NOEXEC
56152 + select PAX_MPROTECT
56153 + select PAX_EI_PAX
56154 + select PAX_PT_PAX_FLAGS
56155 + select PAX_HAVE_ACL_FLAGS
56156 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
56157 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
56158 + select PAX_RANDKSTACK if (X86_TSC && X86)
56159 + select PAX_SEGMEXEC if (X86_32)
56160 + select PAX_PAGEEXEC
56161 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
56162 + select PAX_EMUTRAMP if (PARISC)
56163 + select PAX_EMUSIGRT if (PARISC)
56164 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
56165 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
56166 + select PAX_REFCOUNT if (X86 || SPARC64)
56167 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
56168 + help
56169 + If you say Y here, many of the features of grsecurity will be
56170 + enabled, which will protect you against many kinds of attacks
56171 + against your system. The heightened security comes at a cost
56172 + of an increased chance of incompatibilities with rare software
56173 + on your machine. Since this security level enables PaX, you should
56174 + view <http://pax.grsecurity.net> and read about the PaX
56175 + project. While you are there, download chpax and run it on
56176 + binaries that cause problems with PaX. Also remember that
56177 + since the /proc restrictions are enabled, you must run your
56178 + identd as gid 1001. This security level enables the following
56179 + features in addition to those listed in the low and medium
56180 + security levels:
56181 +
56182 + - Additional /proc restrictions
56183 + - Chmod restrictions in chroot
56184 + - No signals, ptrace, or viewing of processes outside of chroot
56185 + - Capability restrictions in chroot
56186 + - Deny fchdir out of chroot
56187 + - Priority restrictions in chroot
56188 + - Segmentation-based implementation of PaX
56189 + - Mprotect restrictions
56190 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
56191 + - Kernel stack randomization
56192 + - Mount/unmount/remount logging
56193 + - Kernel symbol hiding
56194 + - Hardening of module auto-loading
56195 + - Ptrace restrictions
56196 + - Restricted vm86 mode
56197 + - Restricted sysfs/debugfs
56198 + - Active kernel exploit response
56199 +
56200 +config GRKERNSEC_CUSTOM
56201 + bool "Custom"
56202 + help
56203 + If you say Y here, you will be able to configure every grsecurity
56204 + option, which allows you to enable many more features that aren't
56205 + covered in the basic security levels. These additional features
56206 + include TPE, socket restrictions, and the sysctl system for
56207 + grsecurity. It is advised that you read through the help for
56208 + each option to determine its usefulness in your situation.
56209 +
56210 +endchoice
56211 +
56212 +menu "Memory Protections"
56213 +depends on GRKERNSEC
56214 +
56215 +config GRKERNSEC_KMEM
56216 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
56217 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
56218 + help
56219 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
56220 + be written to or read from to modify or leak the contents of the running
56221 + kernel. /dev/port will also not be allowed to be opened. If you have module
56222 + support disabled, enabling this will close up four ways that are
56223 + currently used to insert malicious code into the running kernel.
56224 + Even with all these features enabled, we still highly recommend that
56225 + you use the RBAC system, as it is still possible for an attacker to
56226 + modify the running kernel through privileged I/O granted by ioperm/iopl.
56227 + If you are not using XFree86, you may be able to stop this additional
56228 + case by enabling the 'Disable privileged I/O' option. Though nothing
56229 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
56230 + but only to video memory, which is the only writing we allow in this
56231 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
56232 + not be allowed to mprotect it with PROT_WRITE later.
56233 + It is highly recommended that you say Y here if you meet all the
56234 + conditions above.
56235 +
56236 +config GRKERNSEC_VM86
56237 + bool "Restrict VM86 mode"
56238 + depends on X86_32
56239 +
56240 + help
56241 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
56242 + make use of a special execution mode on 32bit x86 processors called
56243 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
56244 + video cards and will still work with this option enabled. The purpose
56245 + of the option is to prevent exploitation of emulation errors in
56246 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
56247 + Nearly all users should be able to enable this option.
56248 +
56249 +config GRKERNSEC_IO
56250 + bool "Disable privileged I/O"
56251 + depends on X86
56252 + select RTC_CLASS
56253 + select RTC_INTF_DEV
56254 + select RTC_DRV_CMOS
56255 +
56256 + help
56257 + If you say Y here, all ioperm and iopl calls will return an error.
56258 + Ioperm and iopl can be used to modify the running kernel.
56259 + Unfortunately, some programs need this access to operate properly,
56260 + the most notable of which are XFree86 and hwclock. hwclock can be
56261 + remedied by having RTC support in the kernel, so real-time
56262 + clock support is enabled if this option is enabled, to ensure
56263 + that hwclock operates correctly. XFree86 still will not
56264 + operate correctly with this option enabled, so DO NOT CHOOSE Y
56265 + IF YOU USE XFree86. If you use XFree86 and you still want to
56266 + protect your kernel against modification, use the RBAC system.
56267 +
56268 +config GRKERNSEC_PROC_MEMMAP
56269 + bool "Harden ASLR against information leaks and entropy reduction"
56270 + default y if (PAX_NOEXEC || PAX_ASLR)
56271 + depends on PAX_NOEXEC || PAX_ASLR
56272 + help
56273 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
56274 + give no information about the addresses of its mappings if
56275 + PaX features that rely on random addresses are enabled on the task.
56276 + In addition to sanitizing this information and disabling other
56277 + dangerous sources of information, this option causes reads of sensitive
56278 + /proc/<pid> entries where the file descriptor was opened in a different
56279 + task than the one performing the read. Such attempts are logged.
56280 + This option also limits argv/env strings for suid/sgid binaries
56281 + to 512KB to prevent a complete exhaustion of the stack entropy provided
56282 + by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
56283 + binaries to prevent alternative mmap layouts from being abused.
56284 +
56285 + If you use PaX it is essential that you say Y here as it closes up
56286 + several holes that make full ASLR useless locally.
56287 +
56288 +config GRKERNSEC_BRUTE
56289 + bool "Deter exploit bruteforcing"
56290 + help
56291 + If you say Y here, attempts to bruteforce exploits against forking
56292 + daemons such as apache or sshd, as well as against suid/sgid binaries
56293 + will be deterred. When a child of a forking daemon is killed by PaX
56294 + or crashes due to an illegal instruction or other suspicious signal,
56295 + the parent process will be delayed 30 seconds upon every subsequent
56296 + fork until the administrator is able to assess the situation and
56297 + restart the daemon.
56298 + In the suid/sgid case, the attempt is logged, the user has all their
56299 + processes terminated, and they are prevented from executing any further
56300 + processes for 15 minutes.
56301 + It is recommended that you also enable signal logging in the auditing
56302 + section so that logs are generated when a process triggers a suspicious
56303 + signal.
56304 + If the sysctl option is enabled, a sysctl option with name
56305 + "deter_bruteforce" is created.
56306 +
56307 +config GRKERNSEC_MODHARDEN
56308 + bool "Harden module auto-loading"
56309 + depends on MODULES
56310 + help
56311 + If you say Y here, module auto-loading in response to use of some
56312 + feature implemented by an unloaded module will be restricted to
56313 + root users. Enabling this option helps defend against attacks
56314 + by unprivileged users who abuse the auto-loading behavior to
56315 + cause a vulnerable module to load that is then exploited.
56316 +
56317 + If this option prevents a legitimate use of auto-loading for a
56318 + non-root user, the administrator can execute modprobe manually
56319 + with the exact name of the module mentioned in the alert log.
56320 + Alternatively, the administrator can add the module to the list
56321 + of modules loaded at boot by modifying init scripts.
56322 +
56323 + Modification of init scripts will most likely be needed on
56324 + Ubuntu servers with encrypted home directory support enabled,
56325 + as the first non-root user logging in will cause the ecb(aes),
56326 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
56327 +
56328 +config GRKERNSEC_HIDESYM
56329 + bool "Hide kernel symbols"
56330 + help
56331 + If you say Y here, getting information on loaded modules, and
56332 + displaying all kernel symbols through a syscall will be restricted
56333 + to users with CAP_SYS_MODULE. For software compatibility reasons,
56334 + /proc/kallsyms will be restricted to the root user. The RBAC
56335 + system can hide that entry even from root.
56336 +
56337 + This option also prevents leaking of kernel addresses through
56338 + several /proc entries.
56339 +
56340 + Note that this option is only effective provided the following
56341 + conditions are met:
56342 + 1) The kernel using grsecurity is not precompiled by some distribution
56343 + 2) You have also enabled GRKERNSEC_DMESG
56344 + 3) You are using the RBAC system and hiding other files such as your
56345 + kernel image and System.map. Alternatively, enabling this option
56346 + causes the permissions on /boot, /lib/modules, and the kernel
56347 + source directory to change at compile time to prevent
56348 + reading by non-root users.
56349 + If the above conditions are met, this option will aid in providing a
56350 + useful protection against local kernel exploitation of overflows
56351 + and arbitrary read/write vulnerabilities.
56352 +
56353 +config GRKERNSEC_KERN_LOCKOUT
56354 + bool "Active kernel exploit response"
56355 + depends on X86 || ARM || PPC || SPARC
56356 + help
56357 + If you say Y here, when a PaX alert is triggered due to suspicious
56358 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
56359 + or an OOPs occurs due to bad memory accesses, instead of just
56360 + terminating the offending process (and potentially allowing
56361 + a subsequent exploit from the same user), we will take one of two
56362 + actions:
56363 + If the user was root, we will panic the system
56364 + If the user was non-root, we will log the attempt, terminate
56365 + all processes owned by the user, then prevent them from creating
56366 + any new processes until the system is restarted
56367 + This deters repeated kernel exploitation/bruteforcing attempts
56368 + and is useful for later forensics.
56369 +
56370 +endmenu
56371 +menu "Role Based Access Control Options"
56372 +depends on GRKERNSEC
56373 +
56374 +config GRKERNSEC_RBAC_DEBUG
56375 + bool
56376 +
56377 +config GRKERNSEC_NO_RBAC
56378 + bool "Disable RBAC system"
56379 + help
56380 + If you say Y here, the /dev/grsec device will be removed from the kernel,
56381 + preventing the RBAC system from being enabled. You should only say Y
56382 + here if you have no intention of using the RBAC system, so as to prevent
56383 + an attacker with root access from misusing the RBAC system to hide files
56384 + and processes when loadable module support and /dev/[k]mem have been
56385 + locked down.
56386 +
56387 +config GRKERNSEC_ACL_HIDEKERN
56388 + bool "Hide kernel processes"
56389 + help
56390 + If you say Y here, all kernel threads will be hidden to all
56391 + processes but those whose subject has the "view hidden processes"
56392 + flag.
56393 +
56394 +config GRKERNSEC_ACL_MAXTRIES
56395 + int "Maximum tries before password lockout"
56396 + default 3
56397 + help
56398 + This option enforces the maximum number of times a user can attempt
56399 + to authorize themselves with the grsecurity RBAC system before being
56400 + denied the ability to attempt authorization again for a specified time.
56401 + The lower the number, the harder it will be to brute-force a password.
56402 +
56403 +config GRKERNSEC_ACL_TIMEOUT
56404 + int "Time to wait after max password tries, in seconds"
56405 + default 30
56406 + help
56407 + This option specifies the time the user must wait after attempting to
56408 + authorize to the RBAC system with the maximum number of invalid
56409 + passwords. The higher the number, the harder it will be to brute-force
56410 + a password.
56411 +
56412 +endmenu
56413 +menu "Filesystem Protections"
56414 +depends on GRKERNSEC
56415 +
56416 +config GRKERNSEC_PROC
56417 + bool "Proc restrictions"
56418 + help
56419 + If you say Y here, the permissions of the /proc filesystem
56420 + will be altered to enhance system security and privacy. You MUST
56421 + choose either a user only restriction or a user and group restriction.
56422 + Depending upon the option you choose, you can either restrict users to
56423 + see only the processes they themselves run, or choose a group that can
56424 + view all processes and files normally restricted to root if you choose
56425 + the "restrict to user only" option. NOTE: If you're running identd or
56426 + ntpd as a non-root user, you will have to run it as the group you
56427 + specify here.
56428 +
56429 +config GRKERNSEC_PROC_USER
56430 + bool "Restrict /proc to user only"
56431 + depends on GRKERNSEC_PROC
56432 + help
56433 + If you say Y here, non-root users will only be able to view their own
56434 + processes, and restricts them from viewing network-related information,
56435 + and viewing kernel symbol and module information.
56436 +
56437 +config GRKERNSEC_PROC_USERGROUP
56438 + bool "Allow special group"
56439 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
56440 + help
56441 + If you say Y here, you will be able to select a group that will be
56442 + able to view all processes and network-related information. If you've
56443 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
56444 + remain hidden. This option is useful if you want to run identd as
56445 + a non-root user.
56446 +
56447 +config GRKERNSEC_PROC_GID
56448 + int "GID for special group"
56449 + depends on GRKERNSEC_PROC_USERGROUP
56450 + default 1001
56451 +
56452 +config GRKERNSEC_PROC_ADD
56453 + bool "Additional restrictions"
56454 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
56455 + help
56456 + If you say Y here, additional restrictions will be placed on
56457 + /proc that keep normal users from viewing device information and
56458 + slabinfo information that could be useful for exploits.
56459 +
56460 +config GRKERNSEC_LINK
56461 + bool "Linking restrictions"
56462 + help
56463 + If you say Y here, /tmp race exploits will be prevented, since users
56464 + will no longer be able to follow symlinks owned by other users in
56465 + world-writable +t directories (e.g. /tmp), unless the owner of the
56466 + symlink is the owner of the directory. users will also not be
56467 + able to hardlink to files they do not own. If the sysctl option is
56468 + enabled, a sysctl option with name "linking_restrictions" is created.
56469 +
56470 +config GRKERNSEC_FIFO
56471 + bool "FIFO restrictions"
56472 + help
56473 + If you say Y here, users will not be able to write to FIFOs they don't
56474 + own in world-writable +t directories (e.g. /tmp), unless the owner of
56475 + the FIFO is the same owner of the directory it's held in. If the sysctl
56476 + option is enabled, a sysctl option with name "fifo_restrictions" is
56477 + created.
56478 +
56479 +config GRKERNSEC_SYSFS_RESTRICT
56480 + bool "Sysfs/debugfs restriction"
56481 + depends on SYSFS
56482 + help
56483 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
56484 + any filesystem normally mounted under it (e.g. debugfs) will be
56485 + mostly accessible only by root. These filesystems generally provide access
56486 + to hardware and debug information that isn't appropriate for unprivileged
56487 + users of the system. Sysfs and debugfs have also become a large source
56488 + of new vulnerabilities, ranging from infoleaks to local compromise.
56489 + There has been very little oversight with an eye toward security involved
56490 + in adding new exporters of information to these filesystems, so their
56491 + use is discouraged.
56492 + For reasons of compatibility, a few directories have been whitelisted
56493 + for access by non-root users:
56494 + /sys/fs/selinux
56495 + /sys/fs/fuse
56496 + /sys/devices/system/cpu
56497 +
56498 +config GRKERNSEC_ROFS
56499 + bool "Runtime read-only mount protection"
56500 + help
56501 + If you say Y here, a sysctl option with name "romount_protect" will
56502 + be created. By setting this option to 1 at runtime, filesystems
56503 + will be protected in the following ways:
56504 + * No new writable mounts will be allowed
56505 + * Existing read-only mounts won't be able to be remounted read/write
56506 + * Write operations will be denied on all block devices
56507 + This option acts independently of grsec_lock: once it is set to 1,
56508 + it cannot be turned off. Therefore, please be mindful of the resulting
56509 + behavior if this option is enabled in an init script on a read-only
56510 + filesystem. This feature is mainly intended for secure embedded systems.
56511 +
56512 +config GRKERNSEC_CHROOT
56513 + bool "Chroot jail restrictions"
56514 + help
56515 + If you say Y here, you will be able to choose several options that will
56516 + make breaking out of a chrooted jail much more difficult. If you
56517 + encounter no software incompatibilities with the following options, it
56518 + is recommended that you enable each one.
56519 +
56520 +config GRKERNSEC_CHROOT_MOUNT
56521 + bool "Deny mounts"
56522 + depends on GRKERNSEC_CHROOT
56523 + help
56524 + If you say Y here, processes inside a chroot will not be able to
56525 + mount or remount filesystems. If the sysctl option is enabled, a
56526 + sysctl option with name "chroot_deny_mount" is created.
56527 +
56528 +config GRKERNSEC_CHROOT_DOUBLE
56529 + bool "Deny double-chroots"
56530 + depends on GRKERNSEC_CHROOT
56531 + help
56532 + If you say Y here, processes inside a chroot will not be able to chroot
56533 + again outside the chroot. This is a widely used method of breaking
56534 + out of a chroot jail and should not be allowed. If the sysctl
56535 + option is enabled, a sysctl option with name
56536 + "chroot_deny_chroot" is created.
56537 +
56538 +config GRKERNSEC_CHROOT_PIVOT
56539 + bool "Deny pivot_root in chroot"
56540 + depends on GRKERNSEC_CHROOT
56541 + help
56542 + If you say Y here, processes inside a chroot will not be able to use
56543 + a function called pivot_root() that was introduced in Linux 2.3.41. It
56544 + works similar to chroot in that it changes the root filesystem. This
56545 + function could be misused in a chrooted process to attempt to break out
56546 + of the chroot, and therefore should not be allowed. If the sysctl
56547 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
56548 + created.
56549 +
56550 +config GRKERNSEC_CHROOT_CHDIR
56551 + bool "Enforce chdir(\"/\") on all chroots"
56552 + depends on GRKERNSEC_CHROOT
56553 + help
56554 + If you say Y here, the current working directory of all newly-chrooted
56555 + applications will be set to the the root directory of the chroot.
56556 + The man page on chroot(2) states:
56557 + Note that this call does not change the current working
56558 + directory, so that `.' can be outside the tree rooted at
56559 + `/'. In particular, the super-user can escape from a
56560 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
56561 +
56562 + It is recommended that you say Y here, since it's not known to break
56563 + any software. If the sysctl option is enabled, a sysctl option with
56564 + name "chroot_enforce_chdir" is created.
56565 +
56566 +config GRKERNSEC_CHROOT_CHMOD
56567 + bool "Deny (f)chmod +s"
56568 + depends on GRKERNSEC_CHROOT
56569 + help
56570 + If you say Y here, processes inside a chroot will not be able to chmod
56571 + or fchmod files to make them have suid or sgid bits. This protects
56572 + against another published method of breaking a chroot. If the sysctl
56573 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
56574 + created.
56575 +
56576 +config GRKERNSEC_CHROOT_FCHDIR
56577 + bool "Deny fchdir out of chroot"
56578 + depends on GRKERNSEC_CHROOT
56579 + help
56580 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
56581 + to a file descriptor of the chrooting process that points to a directory
56582 + outside the filesystem will be stopped. If the sysctl option
56583 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
56584 +
56585 +config GRKERNSEC_CHROOT_MKNOD
56586 + bool "Deny mknod"
56587 + depends on GRKERNSEC_CHROOT
56588 + help
56589 + If you say Y here, processes inside a chroot will not be allowed to
56590 + mknod. The problem with using mknod inside a chroot is that it
56591 + would allow an attacker to create a device entry that is the same
56592 + as one on the physical root of your system, which could range from
56593 + anything from the console device to a device for your harddrive (which
56594 + they could then use to wipe the drive or steal data). It is recommended
56595 + that you say Y here, unless you run into software incompatibilities.
56596 + If the sysctl option is enabled, a sysctl option with name
56597 + "chroot_deny_mknod" is created.
56598 +
56599 +config GRKERNSEC_CHROOT_SHMAT
56600 + bool "Deny shmat() out of chroot"
56601 + depends on GRKERNSEC_CHROOT
56602 + help
56603 + If you say Y here, processes inside a chroot will not be able to attach
56604 + to shared memory segments that were created outside of the chroot jail.
56605 + It is recommended that you say Y here. If the sysctl option is enabled,
56606 + a sysctl option with name "chroot_deny_shmat" is created.
56607 +
56608 +config GRKERNSEC_CHROOT_UNIX
56609 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
56610 + depends on GRKERNSEC_CHROOT
56611 + help
56612 + If you say Y here, processes inside a chroot will not be able to
56613 + connect to abstract (meaning not belonging to a filesystem) Unix
56614 + domain sockets that were bound outside of a chroot. It is recommended
56615 + that you say Y here. If the sysctl option is enabled, a sysctl option
56616 + with name "chroot_deny_unix" is created.
56617 +
56618 +config GRKERNSEC_CHROOT_FINDTASK
56619 + bool "Protect outside processes"
56620 + depends on GRKERNSEC_CHROOT
56621 + help
56622 + If you say Y here, processes inside a chroot will not be able to
56623 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
56624 + getsid, or view any process outside of the chroot. If the sysctl
56625 + option is enabled, a sysctl option with name "chroot_findtask" is
56626 + created.
56627 +
56628 +config GRKERNSEC_CHROOT_NICE
56629 + bool "Restrict priority changes"
56630 + depends on GRKERNSEC_CHROOT
56631 + help
56632 + If you say Y here, processes inside a chroot will not be able to raise
56633 + the priority of processes in the chroot, or alter the priority of
56634 + processes outside the chroot. This provides more security than simply
56635 + removing CAP_SYS_NICE from the process' capability set. If the
56636 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
56637 + is created.
56638 +
56639 +config GRKERNSEC_CHROOT_SYSCTL
56640 + bool "Deny sysctl writes"
56641 + depends on GRKERNSEC_CHROOT
56642 + help
56643 + If you say Y here, an attacker in a chroot will not be able to
56644 + write to sysctl entries, either by sysctl(2) or through a /proc
56645 + interface. It is strongly recommended that you say Y here. If the
56646 + sysctl option is enabled, a sysctl option with name
56647 + "chroot_deny_sysctl" is created.
56648 +
56649 +config GRKERNSEC_CHROOT_CAPS
56650 + bool "Capability restrictions"
56651 + depends on GRKERNSEC_CHROOT
56652 + help
56653 + If you say Y here, the capabilities on all processes within a
56654 + chroot jail will be lowered to stop module insertion, raw i/o,
56655 + system and net admin tasks, rebooting the system, modifying immutable
56656 + files, modifying IPC owned by another, and changing the system time.
56657 + This is left an option because it can break some apps. Disable this
56658 + if your chrooted apps are having problems performing those kinds of
56659 + tasks. If the sysctl option is enabled, a sysctl option with
56660 + name "chroot_caps" is created.
56661 +
56662 +endmenu
56663 +menu "Kernel Auditing"
56664 +depends on GRKERNSEC
56665 +
56666 +config GRKERNSEC_AUDIT_GROUP
56667 + bool "Single group for auditing"
56668 + help
56669 + If you say Y here, the exec, chdir, and (un)mount logging features
56670 + will only operate on a group you specify. This option is recommended
56671 + if you only want to watch certain users instead of having a large
56672 + amount of logs from the entire system. If the sysctl option is enabled,
56673 + a sysctl option with name "audit_group" is created.
56674 +
56675 +config GRKERNSEC_AUDIT_GID
56676 + int "GID for auditing"
56677 + depends on GRKERNSEC_AUDIT_GROUP
56678 + default 1007
56679 +
56680 +config GRKERNSEC_EXECLOG
56681 + bool "Exec logging"
56682 + help
56683 + If you say Y here, all execve() calls will be logged (since the
56684 + other exec*() calls are frontends to execve(), all execution
56685 + will be logged). Useful for shell-servers that like to keep track
56686 + of their users. If the sysctl option is enabled, a sysctl option with
56687 + name "exec_logging" is created.
56688 + WARNING: This option when enabled will produce a LOT of logs, especially
56689 + on an active system.
56690 +
56691 +config GRKERNSEC_RESLOG
56692 + bool "Resource logging"
56693 + help
56694 + If you say Y here, all attempts to overstep resource limits will
56695 + be logged with the resource name, the requested size, and the current
56696 + limit. It is highly recommended that you say Y here. If the sysctl
56697 + option is enabled, a sysctl option with name "resource_logging" is
56698 + created. If the RBAC system is enabled, the sysctl value is ignored.
56699 +
56700 +config GRKERNSEC_CHROOT_EXECLOG
56701 + bool "Log execs within chroot"
56702 + help
56703 + If you say Y here, all executions inside a chroot jail will be logged
56704 + to syslog. This can cause a large amount of logs if certain
56705 + applications (eg. djb's daemontools) are installed on the system, and
56706 + is therefore left as an option. If the sysctl option is enabled, a
56707 + sysctl option with name "chroot_execlog" is created.
56708 +
56709 +config GRKERNSEC_AUDIT_PTRACE
56710 + bool "Ptrace logging"
56711 + help
56712 + If you say Y here, all attempts to attach to a process via ptrace
56713 + will be logged. If the sysctl option is enabled, a sysctl option
56714 + with name "audit_ptrace" is created.
56715 +
56716 +config GRKERNSEC_AUDIT_CHDIR
56717 + bool "Chdir logging"
56718 + help
56719 + If you say Y here, all chdir() calls will be logged. If the sysctl
56720 + option is enabled, a sysctl option with name "audit_chdir" is created.
56721 +
56722 +config GRKERNSEC_AUDIT_MOUNT
56723 + bool "(Un)Mount logging"
56724 + help
56725 + If you say Y here, all mounts and unmounts will be logged. If the
56726 + sysctl option is enabled, a sysctl option with name "audit_mount" is
56727 + created.
56728 +
56729 +config GRKERNSEC_SIGNAL
56730 + bool "Signal logging"
56731 + help
56732 + If you say Y here, certain important signals will be logged, such as
56733 + SIGSEGV, which will as a result inform you of when a error in a program
56734 + occurred, which in some cases could mean a possible exploit attempt.
56735 + If the sysctl option is enabled, a sysctl option with name
56736 + "signal_logging" is created.
56737 +
56738 +config GRKERNSEC_FORKFAIL
56739 + bool "Fork failure logging"
56740 + help
56741 + If you say Y here, all failed fork() attempts will be logged.
56742 + This could suggest a fork bomb, or someone attempting to overstep
56743 + their process limit. If the sysctl option is enabled, a sysctl option
56744 + with name "forkfail_logging" is created.
56745 +
56746 +config GRKERNSEC_TIME
56747 + bool "Time change logging"
56748 + help
56749 + If you say Y here, any changes of the system clock will be logged.
56750 + If the sysctl option is enabled, a sysctl option with name
56751 + "timechange_logging" is created.
56752 +
56753 +config GRKERNSEC_PROC_IPADDR
56754 + bool "/proc/<pid>/ipaddr support"
56755 + help
56756 + If you say Y here, a new entry will be added to each /proc/<pid>
56757 + directory that contains the IP address of the person using the task.
56758 + The IP is carried across local TCP and AF_UNIX stream sockets.
56759 + This information can be useful for IDS/IPSes to perform remote response
56760 + to a local attack. The entry is readable by only the owner of the
56761 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
56762 + the RBAC system), and thus does not create privacy concerns.
56763 +
56764 +config GRKERNSEC_RWXMAP_LOG
56765 + bool 'Denied RWX mmap/mprotect logging'
56766 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
56767 + help
56768 + If you say Y here, calls to mmap() and mprotect() with explicit
56769 + usage of PROT_WRITE and PROT_EXEC together will be logged when
56770 + denied by the PAX_MPROTECT feature. If the sysctl option is
56771 + enabled, a sysctl option with name "rwxmap_logging" is created.
56772 +
56773 +config GRKERNSEC_AUDIT_TEXTREL
56774 + bool 'ELF text relocations logging (READ HELP)'
56775 + depends on PAX_MPROTECT
56776 + help
56777 + If you say Y here, text relocations will be logged with the filename
56778 + of the offending library or binary. The purpose of the feature is
56779 + to help Linux distribution developers get rid of libraries and
56780 + binaries that need text relocations which hinder the future progress
56781 + of PaX. Only Linux distribution developers should say Y here, and
56782 + never on a production machine, as this option creates an information
56783 + leak that could aid an attacker in defeating the randomization of
56784 + a single memory region. If the sysctl option is enabled, a sysctl
56785 + option with name "audit_textrel" is created.
56786 +
56787 +endmenu
56788 +
56789 +menu "Executable Protections"
56790 +depends on GRKERNSEC
56791 +
56792 +config GRKERNSEC_DMESG
56793 + bool "Dmesg(8) restriction"
56794 + help
56795 + If you say Y here, non-root users will not be able to use dmesg(8)
56796 + to view up to the last 4kb of messages in the kernel's log buffer.
56797 + The kernel's log buffer often contains kernel addresses and other
56798 + identifying information useful to an attacker in fingerprinting a
56799 + system for a targeted exploit.
56800 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
56801 + created.
56802 +
56803 +config GRKERNSEC_HARDEN_PTRACE
56804 + bool "Deter ptrace-based process snooping"
56805 + help
56806 + If you say Y here, TTY sniffers and other malicious monitoring
56807 + programs implemented through ptrace will be defeated. If you
56808 + have been using the RBAC system, this option has already been
56809 + enabled for several years for all users, with the ability to make
56810 + fine-grained exceptions.
56811 +
56812 + This option only affects the ability of non-root users to ptrace
56813 + processes that are not a descendent of the ptracing process.
56814 + This means that strace ./binary and gdb ./binary will still work,
56815 + but attaching to arbitrary processes will not. If the sysctl
56816 + option is enabled, a sysctl option with name "harden_ptrace" is
56817 + created.
56818 +
56819 +config GRKERNSEC_PTRACE_READEXEC
56820 + bool "Require read access to ptrace sensitive binaries"
56821 + help
56822 + If you say Y here, unprivileged users will not be able to ptrace unreadable
56823 + binaries. This option is useful in environments that
56824 + remove the read bits (e.g. file mode 4711) from suid binaries to
56825 + prevent infoleaking of their contents. This option adds
56826 + consistency to the use of that file mode, as the binary could normally
56827 + be read out when run without privileges while ptracing.
56828 +
56829 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
56830 + is created.
56831 +
56832 +config GRKERNSEC_SETXID
56833 + bool "Enforce consistent multithreaded privileges"
56834 + help
56835 + If you say Y here, a change from a root uid to a non-root uid
56836 + in a multithreaded application will cause the resulting uids,
56837 + gids, supplementary groups, and capabilities in that thread
56838 + to be propagated to the other threads of the process. In most
56839 + cases this is unnecessary, as glibc will emulate this behavior
56840 + on behalf of the application. Other libcs do not act in the
56841 + same way, allowing the other threads of the process to continue
56842 + running with root privileges. If the sysctl option is enabled,
56843 + a sysctl option with name "consistent_setxid" is created.
56844 +
56845 +config GRKERNSEC_TPE
56846 + bool "Trusted Path Execution (TPE)"
56847 + help
56848 + If you say Y here, you will be able to choose a gid to add to the
56849 + supplementary groups of users you want to mark as "untrusted."
56850 + These users will not be able to execute any files that are not in
56851 + root-owned directories writable only by root. If the sysctl option
56852 + is enabled, a sysctl option with name "tpe" is created.
56853 +
56854 +config GRKERNSEC_TPE_ALL
56855 + bool "Partially restrict all non-root users"
56856 + depends on GRKERNSEC_TPE
56857 + help
56858 + If you say Y here, all non-root users will be covered under
56859 + a weaker TPE restriction. This is separate from, and in addition to,
56860 + the main TPE options that you have selected elsewhere. Thus, if a
56861 + "trusted" GID is chosen, this restriction applies to even that GID.
56862 + Under this restriction, all non-root users will only be allowed to
56863 + execute files in directories they own that are not group or
56864 + world-writable, or in directories owned by root and writable only by
56865 + root. If the sysctl option is enabled, a sysctl option with name
56866 + "tpe_restrict_all" is created.
56867 +
56868 +config GRKERNSEC_TPE_INVERT
56869 + bool "Invert GID option"
56870 + depends on GRKERNSEC_TPE
56871 + help
56872 + If you say Y here, the group you specify in the TPE configuration will
56873 + decide what group TPE restrictions will be *disabled* for. This
56874 + option is useful if you want TPE restrictions to be applied to most
56875 + users on the system. If the sysctl option is enabled, a sysctl option
56876 + with name "tpe_invert" is created. Unlike other sysctl options, this
56877 + entry will default to on for backward-compatibility.
56878 +
56879 +config GRKERNSEC_TPE_GID
56880 + int "GID for untrusted users"
56881 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56882 + default 1005
56883 + help
56884 + Setting this GID determines what group TPE restrictions will be
56885 + *enabled* for. If the sysctl option is enabled, a sysctl option
56886 + with name "tpe_gid" is created.
56887 +
56888 +config GRKERNSEC_TPE_GID
56889 + int "GID for trusted users"
56890 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56891 + default 1005
56892 + help
56893 + Setting this GID determines what group TPE restrictions will be
56894 + *disabled* for. If the sysctl option is enabled, a sysctl option
56895 + with name "tpe_gid" is created.
56896 +
56897 +endmenu
56898 +menu "Network Protections"
56899 +depends on GRKERNSEC
56900 +
56901 +config GRKERNSEC_RANDNET
56902 + bool "Larger entropy pools"
56903 + help
56904 + If you say Y here, the entropy pools used for many features of Linux
56905 + and grsecurity will be doubled in size. Since several grsecurity
56906 + features use additional randomness, it is recommended that you say Y
56907 + here. Saying Y here has a similar effect as modifying
56908 + /proc/sys/kernel/random/poolsize.
56909 +
56910 +config GRKERNSEC_BLACKHOLE
56911 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56912 + depends on NET
56913 + help
56914 + If you say Y here, neither TCP resets nor ICMP
56915 + destination-unreachable packets will be sent in response to packets
56916 + sent to ports for which no associated listening process exists.
56917 + This feature supports both IPV4 and IPV6 and exempts the
56918 + loopback interface from blackholing. Enabling this feature
56919 + makes a host more resilient to DoS attacks and reduces network
56920 + visibility against scanners.
56921 +
56922 + The blackhole feature as-implemented is equivalent to the FreeBSD
56923 + blackhole feature, as it prevents RST responses to all packets, not
56924 + just SYNs. Under most application behavior this causes no
56925 + problems, but applications (like haproxy) may not close certain
56926 + connections in a way that cleanly terminates them on the remote
56927 + end, leaving the remote host in LAST_ACK state. Because of this
56928 + side-effect and to prevent intentional LAST_ACK DoSes, this
56929 + feature also adds automatic mitigation against such attacks.
56930 + The mitigation drastically reduces the amount of time a socket
56931 + can spend in LAST_ACK state. If you're using haproxy and not
56932 + all servers it connects to have this option enabled, consider
56933 + disabling this feature on the haproxy host.
56934 +
56935 + If the sysctl option is enabled, two sysctl options with names
56936 + "ip_blackhole" and "lastack_retries" will be created.
56937 + While "ip_blackhole" takes the standard zero/non-zero on/off
56938 + toggle, "lastack_retries" uses the same kinds of values as
56939 + "tcp_retries1" and "tcp_retries2". The default value of 4
56940 + prevents a socket from lasting more than 45 seconds in LAST_ACK
56941 + state.
56942 +
56943 +config GRKERNSEC_SOCKET
56944 + bool "Socket restrictions"
56945 + depends on NET
56946 + help
56947 + If you say Y here, you will be able to choose from several options.
56948 + If you assign a GID on your system and add it to the supplementary
56949 + groups of users you want to restrict socket access to, this patch
56950 + will perform up to three things, based on the option(s) you choose.
56951 +
56952 +config GRKERNSEC_SOCKET_ALL
56953 + bool "Deny any sockets to group"
56954 + depends on GRKERNSEC_SOCKET
56955 + help
56956 + If you say Y here, you will be able to choose a GID of whose users will
56957 + be unable to connect to other hosts from your machine or run server
56958 + applications from your machine. If the sysctl option is enabled, a
56959 + sysctl option with name "socket_all" is created.
56960 +
56961 +config GRKERNSEC_SOCKET_ALL_GID
56962 + int "GID to deny all sockets for"
56963 + depends on GRKERNSEC_SOCKET_ALL
56964 + default 1004
56965 + help
56966 + Here you can choose the GID to disable socket access for. Remember to
56967 + add the users you want socket access disabled for to the GID
56968 + specified here. If the sysctl option is enabled, a sysctl option
56969 + with name "socket_all_gid" is created.
56970 +
56971 +config GRKERNSEC_SOCKET_CLIENT
56972 + bool "Deny client sockets to group"
56973 + depends on GRKERNSEC_SOCKET
56974 + help
56975 + If you say Y here, you will be able to choose a GID of whose users will
56976 + be unable to connect to other hosts from your machine, but will be
56977 + able to run servers. If this option is enabled, all users in the group
56978 + you specify will have to use passive mode when initiating ftp transfers
56979 + from the shell on your machine. If the sysctl option is enabled, a
56980 + sysctl option with name "socket_client" is created.
56981 +
56982 +config GRKERNSEC_SOCKET_CLIENT_GID
56983 + int "GID to deny client sockets for"
56984 + depends on GRKERNSEC_SOCKET_CLIENT
56985 + default 1003
56986 + help
56987 + Here you can choose the GID to disable client socket access for.
56988 + Remember to add the users you want client socket access disabled for to
56989 + the GID specified here. If the sysctl option is enabled, a sysctl
56990 + option with name "socket_client_gid" is created.
56991 +
56992 +config GRKERNSEC_SOCKET_SERVER
56993 + bool "Deny server sockets to group"
56994 + depends on GRKERNSEC_SOCKET
56995 + help
56996 + If you say Y here, you will be able to choose a GID of whose users will
56997 + be unable to run server applications from your machine. If the sysctl
56998 + option is enabled, a sysctl option with name "socket_server" is created.
56999 +
57000 +config GRKERNSEC_SOCKET_SERVER_GID
57001 + int "GID to deny server sockets for"
57002 + depends on GRKERNSEC_SOCKET_SERVER
57003 + default 1002
57004 + help
57005 + Here you can choose the GID to disable server socket access for.
57006 + Remember to add the users you want server socket access disabled for to
57007 + the GID specified here. If the sysctl option is enabled, a sysctl
57008 + option with name "socket_server_gid" is created.
57009 +
57010 +endmenu
57011 +menu "Sysctl support"
57012 +depends on GRKERNSEC && SYSCTL
57013 +
57014 +config GRKERNSEC_SYSCTL
57015 + bool "Sysctl support"
57016 + help
57017 + If you say Y here, you will be able to change the options that
57018 + grsecurity runs with at bootup, without having to recompile your
57019 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
57020 + to enable (1) or disable (0) various features. All the sysctl entries
57021 + are mutable until the "grsec_lock" entry is set to a non-zero value.
57022 + All features enabled in the kernel configuration are disabled at boot
57023 + if you do not say Y to the "Turn on features by default" option.
57024 + All options should be set at startup, and the grsec_lock entry should
57025 + be set to a non-zero value after all the options are set.
57026 + *THIS IS EXTREMELY IMPORTANT*
57027 +
57028 +config GRKERNSEC_SYSCTL_DISTRO
57029 + bool "Extra sysctl support for distro makers (READ HELP)"
57030 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
57031 + help
57032 + If you say Y here, additional sysctl options will be created
57033 + for features that affect processes running as root. Therefore,
57034 + it is critical when using this option that the grsec_lock entry be
57035 + enabled after boot. Only distros with prebuilt kernel packages
57036 + with this option enabled that can ensure grsec_lock is enabled
57037 + after boot should use this option.
57038 + *Failure to set grsec_lock after boot makes all grsec features
57039 + this option covers useless*
57040 +
57041 + Currently this option creates the following sysctl entries:
57042 + "Disable Privileged I/O": "disable_priv_io"
57043 +
57044 +config GRKERNSEC_SYSCTL_ON
57045 + bool "Turn on features by default"
57046 + depends on GRKERNSEC_SYSCTL
57047 + help
57048 + If you say Y here, instead of having all features enabled in the
57049 + kernel configuration disabled at boot time, the features will be
57050 + enabled at boot time. It is recommended you say Y here unless
57051 + there is some reason you would want all sysctl-tunable features to
57052 + be disabled by default. As mentioned elsewhere, it is important
57053 + to enable the grsec_lock entry once you have finished modifying
57054 + the sysctl entries.
57055 +
57056 +endmenu
57057 +menu "Logging Options"
57058 +depends on GRKERNSEC
57059 +
57060 +config GRKERNSEC_FLOODTIME
57061 + int "Seconds in between log messages (minimum)"
57062 + default 10
57063 + help
57064 + This option allows you to enforce the number of seconds between
57065 + grsecurity log messages. The default should be suitable for most
57066 + people, however, if you choose to change it, choose a value small enough
57067 + to allow informative logs to be produced, but large enough to
57068 + prevent flooding.
57069 +
57070 +config GRKERNSEC_FLOODBURST
57071 + int "Number of messages in a burst (maximum)"
57072 + default 6
57073 + help
57074 + This option allows you to choose the maximum number of messages allowed
57075 + within the flood time interval you chose in a separate option. The
57076 + default should be suitable for most people, however if you find that
57077 + many of your logs are being interpreted as flooding, you may want to
57078 + raise this value.
57079 +
57080 +endmenu
57081 +
57082 +endmenu
57083 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
57084 new file mode 100644
57085 index 0000000..1b9afa9
57086 --- /dev/null
57087 +++ b/grsecurity/Makefile
57088 @@ -0,0 +1,38 @@
57089 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
57090 +# during 2001-2009 it has been completely redesigned by Brad Spengler
57091 +# into an RBAC system
57092 +#
57093 +# All code in this directory and various hooks inserted throughout the kernel
57094 +# are copyright Brad Spengler - Open Source Security, Inc., and released
57095 +# under the GPL v2 or higher
57096 +
57097 +KBUILD_CFLAGS += -Werror
57098 +
57099 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
57100 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
57101 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
57102 +
57103 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
57104 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
57105 + gracl_learn.o grsec_log.o
57106 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
57107 +
57108 +ifdef CONFIG_NET
57109 +obj-y += grsec_sock.o
57110 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
57111 +endif
57112 +
57113 +ifndef CONFIG_GRKERNSEC
57114 +obj-y += grsec_disabled.o
57115 +endif
57116 +
57117 +ifdef CONFIG_GRKERNSEC_HIDESYM
57118 +extra-y := grsec_hidesym.o
57119 +$(obj)/grsec_hidesym.o:
57120 + @-chmod -f 500 /boot
57121 + @-chmod -f 500 /lib/modules
57122 + @-chmod -f 500 /lib64/modules
57123 + @-chmod -f 500 /lib32/modules
57124 + @-chmod -f 700 .
57125 + @echo ' grsec: protected kernel image paths'
57126 +endif
57127 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
57128 new file mode 100644
57129 index 0000000..dc4812b
57130 --- /dev/null
57131 +++ b/grsecurity/gracl.c
57132 @@ -0,0 +1,4148 @@
57133 +#include <linux/kernel.h>
57134 +#include <linux/module.h>
57135 +#include <linux/sched.h>
57136 +#include <linux/mm.h>
57137 +#include <linux/file.h>
57138 +#include <linux/fs.h>
57139 +#include <linux/namei.h>
57140 +#include <linux/mount.h>
57141 +#include <linux/tty.h>
57142 +#include <linux/proc_fs.h>
57143 +#include <linux/smp_lock.h>
57144 +#include <linux/slab.h>
57145 +#include <linux/vmalloc.h>
57146 +#include <linux/types.h>
57147 +#include <linux/sysctl.h>
57148 +#include <linux/netdevice.h>
57149 +#include <linux/ptrace.h>
57150 +#include <linux/gracl.h>
57151 +#include <linux/gralloc.h>
57152 +#include <linux/security.h>
57153 +#include <linux/grinternal.h>
57154 +#include <linux/pid_namespace.h>
57155 +#include <linux/fdtable.h>
57156 +#include <linux/percpu.h>
57157 +
57158 +#include <asm/uaccess.h>
57159 +#include <asm/errno.h>
57160 +#include <asm/mman.h>
57161 +
57162 +static struct acl_role_db acl_role_set;
57163 +static struct name_db name_set;
57164 +static struct inodev_db inodev_set;
57165 +
57166 +/* for keeping track of userspace pointers used for subjects, so we
57167 + can share references in the kernel as well
57168 +*/
57169 +
57170 +static struct dentry *real_root;
57171 +static struct vfsmount *real_root_mnt;
57172 +
57173 +static struct acl_subj_map_db subj_map_set;
57174 +
57175 +static struct acl_role_label *default_role;
57176 +
57177 +static struct acl_role_label *role_list;
57178 +
57179 +static u16 acl_sp_role_value;
57180 +
57181 +extern char *gr_shared_page[4];
57182 +static DEFINE_MUTEX(gr_dev_mutex);
57183 +DEFINE_RWLOCK(gr_inode_lock);
57184 +
57185 +struct gr_arg *gr_usermode;
57186 +
57187 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
57188 +
57189 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
57190 +extern void gr_clear_learn_entries(void);
57191 +
57192 +#ifdef CONFIG_GRKERNSEC_RESLOG
57193 +extern void gr_log_resource(const struct task_struct *task,
57194 + const int res, const unsigned long wanted, const int gt);
57195 +#endif
57196 +
57197 +unsigned char *gr_system_salt;
57198 +unsigned char *gr_system_sum;
57199 +
57200 +static struct sprole_pw **acl_special_roles = NULL;
57201 +static __u16 num_sprole_pws = 0;
57202 +
57203 +static struct acl_role_label *kernel_role = NULL;
57204 +
57205 +static unsigned int gr_auth_attempts = 0;
57206 +static unsigned long gr_auth_expires = 0UL;
57207 +
57208 +#ifdef CONFIG_NET
57209 +extern struct vfsmount *sock_mnt;
57210 +#endif
57211 +extern struct vfsmount *pipe_mnt;
57212 +extern struct vfsmount *shm_mnt;
57213 +#ifdef CONFIG_HUGETLBFS
57214 +extern struct vfsmount *hugetlbfs_vfsmount;
57215 +#endif
57216 +
57217 +static struct acl_object_label *fakefs_obj_rw;
57218 +static struct acl_object_label *fakefs_obj_rwx;
57219 +
57220 +extern int gr_init_uidset(void);
57221 +extern void gr_free_uidset(void);
57222 +extern void gr_remove_uid(uid_t uid);
57223 +extern int gr_find_uid(uid_t uid);
57224 +
57225 +__inline__ int
57226 +gr_acl_is_enabled(void)
57227 +{
57228 + return (gr_status & GR_READY);
57229 +}
57230 +
57231 +#ifdef CONFIG_BTRFS_FS
57232 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
57233 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
57234 +#endif
57235 +
57236 +static inline dev_t __get_dev(const struct dentry *dentry)
57237 +{
57238 +#ifdef CONFIG_BTRFS_FS
57239 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
57240 + return get_btrfs_dev_from_inode(dentry->d_inode);
57241 + else
57242 +#endif
57243 + return dentry->d_inode->i_sb->s_dev;
57244 +}
57245 +
57246 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
57247 +{
57248 + return __get_dev(dentry);
57249 +}
57250 +
57251 +static char gr_task_roletype_to_char(struct task_struct *task)
57252 +{
57253 + switch (task->role->roletype &
57254 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
57255 + GR_ROLE_SPECIAL)) {
57256 + case GR_ROLE_DEFAULT:
57257 + return 'D';
57258 + case GR_ROLE_USER:
57259 + return 'U';
57260 + case GR_ROLE_GROUP:
57261 + return 'G';
57262 + case GR_ROLE_SPECIAL:
57263 + return 'S';
57264 + }
57265 +
57266 + return 'X';
57267 +}
57268 +
57269 +char gr_roletype_to_char(void)
57270 +{
57271 + return gr_task_roletype_to_char(current);
57272 +}
57273 +
57274 +__inline__ int
57275 +gr_acl_tpe_check(void)
57276 +{
57277 + if (unlikely(!(gr_status & GR_READY)))
57278 + return 0;
57279 + if (current->role->roletype & GR_ROLE_TPE)
57280 + return 1;
57281 + else
57282 + return 0;
57283 +}
57284 +
57285 +int
57286 +gr_handle_rawio(const struct inode *inode)
57287 +{
57288 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57289 + if (inode && S_ISBLK(inode->i_mode) &&
57290 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
57291 + !capable(CAP_SYS_RAWIO))
57292 + return 1;
57293 +#endif
57294 + return 0;
57295 +}
57296 +
57297 +static int
57298 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
57299 +{
57300 + if (likely(lena != lenb))
57301 + return 0;
57302 +
57303 + return !memcmp(a, b, lena);
57304 +}
57305 +
57306 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
57307 +{
57308 + *buflen -= namelen;
57309 + if (*buflen < 0)
57310 + return -ENAMETOOLONG;
57311 + *buffer -= namelen;
57312 + memcpy(*buffer, str, namelen);
57313 + return 0;
57314 +}
57315 +
57316 +/* this must be called with vfsmount_lock and dcache_lock held */
57317 +
57318 +static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
57319 + struct dentry *root, struct vfsmount *rootmnt,
57320 + char *buffer, int buflen)
57321 +{
57322 + char * end = buffer+buflen;
57323 + char * retval;
57324 + int namelen;
57325 +
57326 + *--end = '\0';
57327 + buflen--;
57328 +
57329 + if (buflen < 1)
57330 + goto Elong;
57331 + /* Get '/' right */
57332 + retval = end-1;
57333 + *retval = '/';
57334 +
57335 + for (;;) {
57336 + struct dentry * parent;
57337 +
57338 + if (dentry == root && vfsmnt == rootmnt)
57339 + break;
57340 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
57341 + /* Global root? */
57342 + if (vfsmnt->mnt_parent == vfsmnt)
57343 + goto global_root;
57344 + dentry = vfsmnt->mnt_mountpoint;
57345 + vfsmnt = vfsmnt->mnt_parent;
57346 + continue;
57347 + }
57348 + parent = dentry->d_parent;
57349 + prefetch(parent);
57350 + namelen = dentry->d_name.len;
57351 + buflen -= namelen + 1;
57352 + if (buflen < 0)
57353 + goto Elong;
57354 + end -= namelen;
57355 + memcpy(end, dentry->d_name.name, namelen);
57356 + *--end = '/';
57357 + retval = end;
57358 + dentry = parent;
57359 + }
57360 +
57361 +out:
57362 + return retval;
57363 +
57364 +global_root:
57365 + namelen = dentry->d_name.len;
57366 + buflen -= namelen;
57367 + if (buflen < 0)
57368 + goto Elong;
57369 + retval -= namelen-1; /* hit the slash */
57370 + memcpy(retval, dentry->d_name.name, namelen);
57371 + goto out;
57372 +Elong:
57373 + retval = ERR_PTR(-ENAMETOOLONG);
57374 + goto out;
57375 +}
57376 +
57377 +static char *
57378 +gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
57379 + struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
57380 +{
57381 + char *retval;
57382 +
57383 + retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
57384 + if (unlikely(IS_ERR(retval)))
57385 + retval = strcpy(buf, "<path too long>");
57386 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
57387 + retval[1] = '\0';
57388 +
57389 + return retval;
57390 +}
57391 +
57392 +static char *
57393 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
57394 + char *buf, int buflen)
57395 +{
57396 + char *res;
57397 +
57398 + /* we can use real_root, real_root_mnt, because this is only called
57399 + by the RBAC system */
57400 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
57401 +
57402 + return res;
57403 +}
57404 +
57405 +static char *
57406 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
57407 + char *buf, int buflen)
57408 +{
57409 + char *res;
57410 + struct dentry *root;
57411 + struct vfsmount *rootmnt;
57412 + struct task_struct *reaper = &init_task;
57413 +
57414 + /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
57415 + read_lock(&reaper->fs->lock);
57416 + root = dget(reaper->fs->root.dentry);
57417 + rootmnt = mntget(reaper->fs->root.mnt);
57418 + read_unlock(&reaper->fs->lock);
57419 +
57420 + spin_lock(&dcache_lock);
57421 + spin_lock(&vfsmount_lock);
57422 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
57423 + spin_unlock(&vfsmount_lock);
57424 + spin_unlock(&dcache_lock);
57425 +
57426 + dput(root);
57427 + mntput(rootmnt);
57428 + return res;
57429 +}
57430 +
57431 +static char *
57432 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
57433 +{
57434 + char *ret;
57435 + spin_lock(&dcache_lock);
57436 + spin_lock(&vfsmount_lock);
57437 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
57438 + PAGE_SIZE);
57439 + spin_unlock(&vfsmount_lock);
57440 + spin_unlock(&dcache_lock);
57441 + return ret;
57442 +}
57443 +
57444 +static char *
57445 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
57446 +{
57447 + char *ret;
57448 + char *buf;
57449 + int buflen;
57450 +
57451 + spin_lock(&dcache_lock);
57452 + spin_lock(&vfsmount_lock);
57453 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
57454 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
57455 + buflen = (int)(ret - buf);
57456 + if (buflen >= 5)
57457 + prepend(&ret, &buflen, "/proc", 5);
57458 + else
57459 + ret = strcpy(buf, "<path too long>");
57460 + spin_unlock(&vfsmount_lock);
57461 + spin_unlock(&dcache_lock);
57462 + return ret;
57463 +}
57464 +
57465 +char *
57466 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
57467 +{
57468 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
57469 + PAGE_SIZE);
57470 +}
57471 +
57472 +char *
57473 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
57474 +{
57475 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
57476 + PAGE_SIZE);
57477 +}
57478 +
57479 +char *
57480 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
57481 +{
57482 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
57483 + PAGE_SIZE);
57484 +}
57485 +
57486 +char *
57487 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
57488 +{
57489 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
57490 + PAGE_SIZE);
57491 +}
57492 +
57493 +char *
57494 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
57495 +{
57496 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
57497 + PAGE_SIZE);
57498 +}
57499 +
57500 +__inline__ __u32
57501 +to_gr_audit(const __u32 reqmode)
57502 +{
57503 + /* masks off auditable permission flags, then shifts them to create
57504 + auditing flags, and adds the special case of append auditing if
57505 + we're requesting write */
57506 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
57507 +}
57508 +
57509 +struct acl_subject_label *
57510 +lookup_subject_map(const struct acl_subject_label *userp)
57511 +{
57512 + unsigned int index = shash(userp, subj_map_set.s_size);
57513 + struct subject_map *match;
57514 +
57515 + match = subj_map_set.s_hash[index];
57516 +
57517 + while (match && match->user != userp)
57518 + match = match->next;
57519 +
57520 + if (match != NULL)
57521 + return match->kernel;
57522 + else
57523 + return NULL;
57524 +}
57525 +
57526 +static void
57527 +insert_subj_map_entry(struct subject_map *subjmap)
57528 +{
57529 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
57530 + struct subject_map **curr;
57531 +
57532 + subjmap->prev = NULL;
57533 +
57534 + curr = &subj_map_set.s_hash[index];
57535 + if (*curr != NULL)
57536 + (*curr)->prev = subjmap;
57537 +
57538 + subjmap->next = *curr;
57539 + *curr = subjmap;
57540 +
57541 + return;
57542 +}
57543 +
57544 +static struct acl_role_label *
57545 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
57546 + const gid_t gid)
57547 +{
57548 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
57549 + struct acl_role_label *match;
57550 + struct role_allowed_ip *ipp;
57551 + unsigned int x;
57552 + u32 curr_ip = task->signal->curr_ip;
57553 +
57554 + task->signal->saved_ip = curr_ip;
57555 +
57556 + match = acl_role_set.r_hash[index];
57557 +
57558 + while (match) {
57559 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
57560 + for (x = 0; x < match->domain_child_num; x++) {
57561 + if (match->domain_children[x] == uid)
57562 + goto found;
57563 + }
57564 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
57565 + break;
57566 + match = match->next;
57567 + }
57568 +found:
57569 + if (match == NULL) {
57570 + try_group:
57571 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
57572 + match = acl_role_set.r_hash[index];
57573 +
57574 + while (match) {
57575 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
57576 + for (x = 0; x < match->domain_child_num; x++) {
57577 + if (match->domain_children[x] == gid)
57578 + goto found2;
57579 + }
57580 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
57581 + break;
57582 + match = match->next;
57583 + }
57584 +found2:
57585 + if (match == NULL)
57586 + match = default_role;
57587 + if (match->allowed_ips == NULL)
57588 + return match;
57589 + else {
57590 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
57591 + if (likely
57592 + ((ntohl(curr_ip) & ipp->netmask) ==
57593 + (ntohl(ipp->addr) & ipp->netmask)))
57594 + return match;
57595 + }
57596 + match = default_role;
57597 + }
57598 + } else if (match->allowed_ips == NULL) {
57599 + return match;
57600 + } else {
57601 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
57602 + if (likely
57603 + ((ntohl(curr_ip) & ipp->netmask) ==
57604 + (ntohl(ipp->addr) & ipp->netmask)))
57605 + return match;
57606 + }
57607 + goto try_group;
57608 + }
57609 +
57610 + return match;
57611 +}
57612 +
57613 +struct acl_subject_label *
57614 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
57615 + const struct acl_role_label *role)
57616 +{
57617 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
57618 + struct acl_subject_label *match;
57619 +
57620 + match = role->subj_hash[index];
57621 +
57622 + while (match && (match->inode != ino || match->device != dev ||
57623 + (match->mode & GR_DELETED))) {
57624 + match = match->next;
57625 + }
57626 +
57627 + if (match && !(match->mode & GR_DELETED))
57628 + return match;
57629 + else
57630 + return NULL;
57631 +}
57632 +
57633 +struct acl_subject_label *
57634 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
57635 + const struct acl_role_label *role)
57636 +{
57637 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
57638 + struct acl_subject_label *match;
57639 +
57640 + match = role->subj_hash[index];
57641 +
57642 + while (match && (match->inode != ino || match->device != dev ||
57643 + !(match->mode & GR_DELETED))) {
57644 + match = match->next;
57645 + }
57646 +
57647 + if (match && (match->mode & GR_DELETED))
57648 + return match;
57649 + else
57650 + return NULL;
57651 +}
57652 +
57653 +static struct acl_object_label *
57654 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
57655 + const struct acl_subject_label *subj)
57656 +{
57657 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
57658 + struct acl_object_label *match;
57659 +
57660 + match = subj->obj_hash[index];
57661 +
57662 + while (match && (match->inode != ino || match->device != dev ||
57663 + (match->mode & GR_DELETED))) {
57664 + match = match->next;
57665 + }
57666 +
57667 + if (match && !(match->mode & GR_DELETED))
57668 + return match;
57669 + else
57670 + return NULL;
57671 +}
57672 +
57673 +static struct acl_object_label *
57674 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
57675 + const struct acl_subject_label *subj)
57676 +{
57677 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
57678 + struct acl_object_label *match;
57679 +
57680 + match = subj->obj_hash[index];
57681 +
57682 + while (match && (match->inode != ino || match->device != dev ||
57683 + !(match->mode & GR_DELETED))) {
57684 + match = match->next;
57685 + }
57686 +
57687 + if (match && (match->mode & GR_DELETED))
57688 + return match;
57689 +
57690 + match = subj->obj_hash[index];
57691 +
57692 + while (match && (match->inode != ino || match->device != dev ||
57693 + (match->mode & GR_DELETED))) {
57694 + match = match->next;
57695 + }
57696 +
57697 + if (match && !(match->mode & GR_DELETED))
57698 + return match;
57699 + else
57700 + return NULL;
57701 +}
57702 +
57703 +static struct name_entry *
57704 +lookup_name_entry(const char *name)
57705 +{
57706 + unsigned int len = strlen(name);
57707 + unsigned int key = full_name_hash(name, len);
57708 + unsigned int index = key % name_set.n_size;
57709 + struct name_entry *match;
57710 +
57711 + match = name_set.n_hash[index];
57712 +
57713 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
57714 + match = match->next;
57715 +
57716 + return match;
57717 +}
57718 +
57719 +static struct name_entry *
57720 +lookup_name_entry_create(const char *name)
57721 +{
57722 + unsigned int len = strlen(name);
57723 + unsigned int key = full_name_hash(name, len);
57724 + unsigned int index = key % name_set.n_size;
57725 + struct name_entry *match;
57726 +
57727 + match = name_set.n_hash[index];
57728 +
57729 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
57730 + !match->deleted))
57731 + match = match->next;
57732 +
57733 + if (match && match->deleted)
57734 + return match;
57735 +
57736 + match = name_set.n_hash[index];
57737 +
57738 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
57739 + match->deleted))
57740 + match = match->next;
57741 +
57742 + if (match && !match->deleted)
57743 + return match;
57744 + else
57745 + return NULL;
57746 +}
57747 +
57748 +static struct inodev_entry *
57749 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
57750 +{
57751 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
57752 + struct inodev_entry *match;
57753 +
57754 + match = inodev_set.i_hash[index];
57755 +
57756 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
57757 + match = match->next;
57758 +
57759 + return match;
57760 +}
57761 +
57762 +static void
57763 +insert_inodev_entry(struct inodev_entry *entry)
57764 +{
57765 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
57766 + inodev_set.i_size);
57767 + struct inodev_entry **curr;
57768 +
57769 + entry->prev = NULL;
57770 +
57771 + curr = &inodev_set.i_hash[index];
57772 + if (*curr != NULL)
57773 + (*curr)->prev = entry;
57774 +
57775 + entry->next = *curr;
57776 + *curr = entry;
57777 +
57778 + return;
57779 +}
57780 +
57781 +static void
57782 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
57783 +{
57784 + unsigned int index =
57785 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
57786 + struct acl_role_label **curr;
57787 + struct acl_role_label *tmp;
57788 +
57789 + curr = &acl_role_set.r_hash[index];
57790 +
57791 + /* if role was already inserted due to domains and already has
57792 + a role in the same bucket as it attached, then we need to
57793 + combine these two buckets
57794 + */
57795 + if (role->next) {
57796 + tmp = role->next;
57797 + while (tmp->next)
57798 + tmp = tmp->next;
57799 + tmp->next = *curr;
57800 + } else
57801 + role->next = *curr;
57802 + *curr = role;
57803 +
57804 + return;
57805 +}
57806 +
57807 +static void
57808 +insert_acl_role_label(struct acl_role_label *role)
57809 +{
57810 + int i;
57811 +
57812 + if (role_list == NULL) {
57813 + role_list = role;
57814 + role->prev = NULL;
57815 + } else {
57816 + role->prev = role_list;
57817 + role_list = role;
57818 + }
57819 +
57820 + /* used for hash chains */
57821 + role->next = NULL;
57822 +
57823 + if (role->roletype & GR_ROLE_DOMAIN) {
57824 + for (i = 0; i < role->domain_child_num; i++)
57825 + __insert_acl_role_label(role, role->domain_children[i]);
57826 + } else
57827 + __insert_acl_role_label(role, role->uidgid);
57828 +}
57829 +
57830 +static int
57831 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
57832 +{
57833 + struct name_entry **curr, *nentry;
57834 + struct inodev_entry *ientry;
57835 + unsigned int len = strlen(name);
57836 + unsigned int key = full_name_hash(name, len);
57837 + unsigned int index = key % name_set.n_size;
57838 +
57839 + curr = &name_set.n_hash[index];
57840 +
57841 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
57842 + curr = &((*curr)->next);
57843 +
57844 + if (*curr != NULL)
57845 + return 1;
57846 +
57847 + nentry = acl_alloc(sizeof (struct name_entry));
57848 + if (nentry == NULL)
57849 + return 0;
57850 + ientry = acl_alloc(sizeof (struct inodev_entry));
57851 + if (ientry == NULL)
57852 + return 0;
57853 + ientry->nentry = nentry;
57854 +
57855 + nentry->key = key;
57856 + nentry->name = name;
57857 + nentry->inode = inode;
57858 + nentry->device = device;
57859 + nentry->len = len;
57860 + nentry->deleted = deleted;
57861 +
57862 + nentry->prev = NULL;
57863 + curr = &name_set.n_hash[index];
57864 + if (*curr != NULL)
57865 + (*curr)->prev = nentry;
57866 + nentry->next = *curr;
57867 + *curr = nentry;
57868 +
57869 + /* insert us into the table searchable by inode/dev */
57870 + insert_inodev_entry(ientry);
57871 +
57872 + return 1;
57873 +}
57874 +
57875 +static void
57876 +insert_acl_obj_label(struct acl_object_label *obj,
57877 + struct acl_subject_label *subj)
57878 +{
57879 + unsigned int index =
57880 + fhash(obj->inode, obj->device, subj->obj_hash_size);
57881 + struct acl_object_label **curr;
57882 +
57883 +
57884 + obj->prev = NULL;
57885 +
57886 + curr = &subj->obj_hash[index];
57887 + if (*curr != NULL)
57888 + (*curr)->prev = obj;
57889 +
57890 + obj->next = *curr;
57891 + *curr = obj;
57892 +
57893 + return;
57894 +}
57895 +
57896 +static void
57897 +insert_acl_subj_label(struct acl_subject_label *obj,
57898 + struct acl_role_label *role)
57899 +{
57900 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
57901 + struct acl_subject_label **curr;
57902 +
57903 + obj->prev = NULL;
57904 +
57905 + curr = &role->subj_hash[index];
57906 + if (*curr != NULL)
57907 + (*curr)->prev = obj;
57908 +
57909 + obj->next = *curr;
57910 + *curr = obj;
57911 +
57912 + return;
57913 +}
57914 +
57915 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
57916 +
57917 +static void *
57918 +create_table(__u32 * len, int elementsize)
57919 +{
57920 + unsigned int table_sizes[] = {
57921 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
57922 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
57923 + 4194301, 8388593, 16777213, 33554393, 67108859
57924 + };
57925 + void *newtable = NULL;
57926 + unsigned int pwr = 0;
57927 +
57928 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
57929 + table_sizes[pwr] <= *len)
57930 + pwr++;
57931 +
57932 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
57933 + return newtable;
57934 +
57935 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
57936 + newtable =
57937 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
57938 + else
57939 + newtable = vmalloc(table_sizes[pwr] * elementsize);
57940 +
57941 + *len = table_sizes[pwr];
57942 +
57943 + return newtable;
57944 +}
57945 +
57946 +static int
57947 +init_variables(const struct gr_arg *arg)
57948 +{
57949 + struct task_struct *reaper = &init_task;
57950 + unsigned int stacksize;
57951 +
57952 + subj_map_set.s_size = arg->role_db.num_subjects;
57953 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
57954 + name_set.n_size = arg->role_db.num_objects;
57955 + inodev_set.i_size = arg->role_db.num_objects;
57956 +
57957 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
57958 + !name_set.n_size || !inodev_set.i_size)
57959 + return 1;
57960 +
57961 + if (!gr_init_uidset())
57962 + return 1;
57963 +
57964 + /* set up the stack that holds allocation info */
57965 +
57966 + stacksize = arg->role_db.num_pointers + 5;
57967 +
57968 + if (!acl_alloc_stack_init(stacksize))
57969 + return 1;
57970 +
57971 + /* grab reference for the real root dentry and vfsmount */
57972 + read_lock(&reaper->fs->lock);
57973 + real_root = dget(reaper->fs->root.dentry);
57974 + real_root_mnt = mntget(reaper->fs->root.mnt);
57975 + read_unlock(&reaper->fs->lock);
57976 +
57977 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
57978 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
57979 +#endif
57980 +
57981 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
57982 + if (fakefs_obj_rw == NULL)
57983 + return 1;
57984 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
57985 +
57986 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
57987 + if (fakefs_obj_rwx == NULL)
57988 + return 1;
57989 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
57990 +
57991 + subj_map_set.s_hash =
57992 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
57993 + acl_role_set.r_hash =
57994 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
57995 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
57996 + inodev_set.i_hash =
57997 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
57998 +
57999 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
58000 + !name_set.n_hash || !inodev_set.i_hash)
58001 + return 1;
58002 +
58003 + memset(subj_map_set.s_hash, 0,
58004 + sizeof(struct subject_map *) * subj_map_set.s_size);
58005 + memset(acl_role_set.r_hash, 0,
58006 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
58007 + memset(name_set.n_hash, 0,
58008 + sizeof (struct name_entry *) * name_set.n_size);
58009 + memset(inodev_set.i_hash, 0,
58010 + sizeof (struct inodev_entry *) * inodev_set.i_size);
58011 +
58012 + return 0;
58013 +}
58014 +
58015 +/* free information not needed after startup
58016 + currently contains user->kernel pointer mappings for subjects
58017 +*/
58018 +
58019 +static void
58020 +free_init_variables(void)
58021 +{
58022 + __u32 i;
58023 +
58024 + if (subj_map_set.s_hash) {
58025 + for (i = 0; i < subj_map_set.s_size; i++) {
58026 + if (subj_map_set.s_hash[i]) {
58027 + kfree(subj_map_set.s_hash[i]);
58028 + subj_map_set.s_hash[i] = NULL;
58029 + }
58030 + }
58031 +
58032 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
58033 + PAGE_SIZE)
58034 + kfree(subj_map_set.s_hash);
58035 + else
58036 + vfree(subj_map_set.s_hash);
58037 + }
58038 +
58039 + return;
58040 +}
58041 +
58042 +static void
58043 +free_variables(void)
58044 +{
58045 + struct acl_subject_label *s;
58046 + struct acl_role_label *r;
58047 + struct task_struct *task, *task2;
58048 + unsigned int x;
58049 +
58050 + gr_clear_learn_entries();
58051 +
58052 + read_lock(&tasklist_lock);
58053 + do_each_thread(task2, task) {
58054 + task->acl_sp_role = 0;
58055 + task->acl_role_id = 0;
58056 + task->acl = NULL;
58057 + task->role = NULL;
58058 + } while_each_thread(task2, task);
58059 + read_unlock(&tasklist_lock);
58060 +
58061 + /* release the reference to the real root dentry and vfsmount */
58062 + if (real_root)
58063 + dput(real_root);
58064 + real_root = NULL;
58065 + if (real_root_mnt)
58066 + mntput(real_root_mnt);
58067 + real_root_mnt = NULL;
58068 +
58069 + /* free all object hash tables */
58070 +
58071 + FOR_EACH_ROLE_START(r)
58072 + if (r->subj_hash == NULL)
58073 + goto next_role;
58074 + FOR_EACH_SUBJECT_START(r, s, x)
58075 + if (s->obj_hash == NULL)
58076 + break;
58077 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
58078 + kfree(s->obj_hash);
58079 + else
58080 + vfree(s->obj_hash);
58081 + FOR_EACH_SUBJECT_END(s, x)
58082 + FOR_EACH_NESTED_SUBJECT_START(r, s)
58083 + if (s->obj_hash == NULL)
58084 + break;
58085 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
58086 + kfree(s->obj_hash);
58087 + else
58088 + vfree(s->obj_hash);
58089 + FOR_EACH_NESTED_SUBJECT_END(s)
58090 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
58091 + kfree(r->subj_hash);
58092 + else
58093 + vfree(r->subj_hash);
58094 + r->subj_hash = NULL;
58095 +next_role:
58096 + FOR_EACH_ROLE_END(r)
58097 +
58098 + acl_free_all();
58099 +
58100 + if (acl_role_set.r_hash) {
58101 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
58102 + PAGE_SIZE)
58103 + kfree(acl_role_set.r_hash);
58104 + else
58105 + vfree(acl_role_set.r_hash);
58106 + }
58107 + if (name_set.n_hash) {
58108 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
58109 + PAGE_SIZE)
58110 + kfree(name_set.n_hash);
58111 + else
58112 + vfree(name_set.n_hash);
58113 + }
58114 +
58115 + if (inodev_set.i_hash) {
58116 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
58117 + PAGE_SIZE)
58118 + kfree(inodev_set.i_hash);
58119 + else
58120 + vfree(inodev_set.i_hash);
58121 + }
58122 +
58123 + gr_free_uidset();
58124 +
58125 + memset(&name_set, 0, sizeof (struct name_db));
58126 + memset(&inodev_set, 0, sizeof (struct inodev_db));
58127 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
58128 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
58129 +
58130 + default_role = NULL;
58131 + role_list = NULL;
58132 +
58133 + return;
58134 +}
58135 +
58136 +static __u32
58137 +count_user_objs(struct acl_object_label *userp)
58138 +{
58139 + struct acl_object_label o_tmp;
58140 + __u32 num = 0;
58141 +
58142 + while (userp) {
58143 + if (copy_from_user(&o_tmp, userp,
58144 + sizeof (struct acl_object_label)))
58145 + break;
58146 +
58147 + userp = o_tmp.prev;
58148 + num++;
58149 + }
58150 +
58151 + return num;
58152 +}
58153 +
58154 +static struct acl_subject_label *
58155 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
58156 +
58157 +static int
58158 +copy_user_glob(struct acl_object_label *obj)
58159 +{
58160 + struct acl_object_label *g_tmp, **guser;
58161 + unsigned int len;
58162 + char *tmp;
58163 +
58164 + if (obj->globbed == NULL)
58165 + return 0;
58166 +
58167 + guser = &obj->globbed;
58168 + while (*guser) {
58169 + g_tmp = (struct acl_object_label *)
58170 + acl_alloc(sizeof (struct acl_object_label));
58171 + if (g_tmp == NULL)
58172 + return -ENOMEM;
58173 +
58174 + if (copy_from_user(g_tmp, *guser,
58175 + sizeof (struct acl_object_label)))
58176 + return -EFAULT;
58177 +
58178 + len = strnlen_user(g_tmp->filename, PATH_MAX);
58179 +
58180 + if (!len || len >= PATH_MAX)
58181 + return -EINVAL;
58182 +
58183 + if ((tmp = (char *) acl_alloc(len)) == NULL)
58184 + return -ENOMEM;
58185 +
58186 + if (copy_from_user(tmp, g_tmp->filename, len))
58187 + return -EFAULT;
58188 + tmp[len-1] = '\0';
58189 + g_tmp->filename = tmp;
58190 +
58191 + *guser = g_tmp;
58192 + guser = &(g_tmp->next);
58193 + }
58194 +
58195 + return 0;
58196 +}
58197 +
58198 +static int
58199 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
58200 + struct acl_role_label *role)
58201 +{
58202 + struct acl_object_label *o_tmp;
58203 + unsigned int len;
58204 + int ret;
58205 + char *tmp;
58206 +
58207 + while (userp) {
58208 + if ((o_tmp = (struct acl_object_label *)
58209 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
58210 + return -ENOMEM;
58211 +
58212 + if (copy_from_user(o_tmp, userp,
58213 + sizeof (struct acl_object_label)))
58214 + return -EFAULT;
58215 +
58216 + userp = o_tmp->prev;
58217 +
58218 + len = strnlen_user(o_tmp->filename, PATH_MAX);
58219 +
58220 + if (!len || len >= PATH_MAX)
58221 + return -EINVAL;
58222 +
58223 + if ((tmp = (char *) acl_alloc(len)) == NULL)
58224 + return -ENOMEM;
58225 +
58226 + if (copy_from_user(tmp, o_tmp->filename, len))
58227 + return -EFAULT;
58228 + tmp[len-1] = '\0';
58229 + o_tmp->filename = tmp;
58230 +
58231 + insert_acl_obj_label(o_tmp, subj);
58232 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
58233 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
58234 + return -ENOMEM;
58235 +
58236 + ret = copy_user_glob(o_tmp);
58237 + if (ret)
58238 + return ret;
58239 +
58240 + if (o_tmp->nested) {
58241 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
58242 + if (IS_ERR(o_tmp->nested))
58243 + return PTR_ERR(o_tmp->nested);
58244 +
58245 + /* insert into nested subject list */
58246 + o_tmp->nested->next = role->hash->first;
58247 + role->hash->first = o_tmp->nested;
58248 + }
58249 + }
58250 +
58251 + return 0;
58252 +}
58253 +
58254 +static __u32
58255 +count_user_subjs(struct acl_subject_label *userp)
58256 +{
58257 + struct acl_subject_label s_tmp;
58258 + __u32 num = 0;
58259 +
58260 + while (userp) {
58261 + if (copy_from_user(&s_tmp, userp,
58262 + sizeof (struct acl_subject_label)))
58263 + break;
58264 +
58265 + userp = s_tmp.prev;
58266 + /* do not count nested subjects against this count, since
58267 + they are not included in the hash table, but are
58268 + attached to objects. We have already counted
58269 + the subjects in userspace for the allocation
58270 + stack
58271 + */
58272 + if (!(s_tmp.mode & GR_NESTED))
58273 + num++;
58274 + }
58275 +
58276 + return num;
58277 +}
58278 +
58279 +static int
58280 +copy_user_allowedips(struct acl_role_label *rolep)
58281 +{
58282 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
58283 +
58284 + ruserip = rolep->allowed_ips;
58285 +
58286 + while (ruserip) {
58287 + rlast = rtmp;
58288 +
58289 + if ((rtmp = (struct role_allowed_ip *)
58290 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
58291 + return -ENOMEM;
58292 +
58293 + if (copy_from_user(rtmp, ruserip,
58294 + sizeof (struct role_allowed_ip)))
58295 + return -EFAULT;
58296 +
58297 + ruserip = rtmp->prev;
58298 +
58299 + if (!rlast) {
58300 + rtmp->prev = NULL;
58301 + rolep->allowed_ips = rtmp;
58302 + } else {
58303 + rlast->next = rtmp;
58304 + rtmp->prev = rlast;
58305 + }
58306 +
58307 + if (!ruserip)
58308 + rtmp->next = NULL;
58309 + }
58310 +
58311 + return 0;
58312 +}
58313 +
58314 +static int
58315 +copy_user_transitions(struct acl_role_label *rolep)
58316 +{
58317 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
58318 +
58319 + unsigned int len;
58320 + char *tmp;
58321 +
58322 + rusertp = rolep->transitions;
58323 +
58324 + while (rusertp) {
58325 + rlast = rtmp;
58326 +
58327 + if ((rtmp = (struct role_transition *)
58328 + acl_alloc(sizeof (struct role_transition))) == NULL)
58329 + return -ENOMEM;
58330 +
58331 + if (copy_from_user(rtmp, rusertp,
58332 + sizeof (struct role_transition)))
58333 + return -EFAULT;
58334 +
58335 + rusertp = rtmp->prev;
58336 +
58337 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
58338 +
58339 + if (!len || len >= GR_SPROLE_LEN)
58340 + return -EINVAL;
58341 +
58342 + if ((tmp = (char *) acl_alloc(len)) == NULL)
58343 + return -ENOMEM;
58344 +
58345 + if (copy_from_user(tmp, rtmp->rolename, len))
58346 + return -EFAULT;
58347 + tmp[len-1] = '\0';
58348 + rtmp->rolename = tmp;
58349 +
58350 + if (!rlast) {
58351 + rtmp->prev = NULL;
58352 + rolep->transitions = rtmp;
58353 + } else {
58354 + rlast->next = rtmp;
58355 + rtmp->prev = rlast;
58356 + }
58357 +
58358 + if (!rusertp)
58359 + rtmp->next = NULL;
58360 + }
58361 +
58362 + return 0;
58363 +}
58364 +
58365 +static struct acl_subject_label *
58366 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
58367 +{
58368 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
58369 + unsigned int len;
58370 + char *tmp;
58371 + __u32 num_objs;
58372 + struct acl_ip_label **i_tmp, *i_utmp2;
58373 + struct gr_hash_struct ghash;
58374 + struct subject_map *subjmap;
58375 + unsigned int i_num;
58376 + int err;
58377 +
58378 + s_tmp = lookup_subject_map(userp);
58379 +
58380 + /* we've already copied this subject into the kernel, just return
58381 + the reference to it, and don't copy it over again
58382 + */
58383 + if (s_tmp)
58384 + return(s_tmp);
58385 +
58386 + if ((s_tmp = (struct acl_subject_label *)
58387 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
58388 + return ERR_PTR(-ENOMEM);
58389 +
58390 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
58391 + if (subjmap == NULL)
58392 + return ERR_PTR(-ENOMEM);
58393 +
58394 + subjmap->user = userp;
58395 + subjmap->kernel = s_tmp;
58396 + insert_subj_map_entry(subjmap);
58397 +
58398 + if (copy_from_user(s_tmp, userp,
58399 + sizeof (struct acl_subject_label)))
58400 + return ERR_PTR(-EFAULT);
58401 +
58402 + len = strnlen_user(s_tmp->filename, PATH_MAX);
58403 +
58404 + if (!len || len >= PATH_MAX)
58405 + return ERR_PTR(-EINVAL);
58406 +
58407 + if ((tmp = (char *) acl_alloc(len)) == NULL)
58408 + return ERR_PTR(-ENOMEM);
58409 +
58410 + if (copy_from_user(tmp, s_tmp->filename, len))
58411 + return ERR_PTR(-EFAULT);
58412 + tmp[len-1] = '\0';
58413 + s_tmp->filename = tmp;
58414 +
58415 + if (!strcmp(s_tmp->filename, "/"))
58416 + role->root_label = s_tmp;
58417 +
58418 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
58419 + return ERR_PTR(-EFAULT);
58420 +
58421 + /* copy user and group transition tables */
58422 +
58423 + if (s_tmp->user_trans_num) {
58424 + uid_t *uidlist;
58425 +
58426 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
58427 + if (uidlist == NULL)
58428 + return ERR_PTR(-ENOMEM);
58429 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
58430 + return ERR_PTR(-EFAULT);
58431 +
58432 + s_tmp->user_transitions = uidlist;
58433 + }
58434 +
58435 + if (s_tmp->group_trans_num) {
58436 + gid_t *gidlist;
58437 +
58438 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
58439 + if (gidlist == NULL)
58440 + return ERR_PTR(-ENOMEM);
58441 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
58442 + return ERR_PTR(-EFAULT);
58443 +
58444 + s_tmp->group_transitions = gidlist;
58445 + }
58446 +
58447 + /* set up object hash table */
58448 + num_objs = count_user_objs(ghash.first);
58449 +
58450 + s_tmp->obj_hash_size = num_objs;
58451 + s_tmp->obj_hash =
58452 + (struct acl_object_label **)
58453 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
58454 +
58455 + if (!s_tmp->obj_hash)
58456 + return ERR_PTR(-ENOMEM);
58457 +
58458 + memset(s_tmp->obj_hash, 0,
58459 + s_tmp->obj_hash_size *
58460 + sizeof (struct acl_object_label *));
58461 +
58462 + /* add in objects */
58463 + err = copy_user_objs(ghash.first, s_tmp, role);
58464 +
58465 + if (err)
58466 + return ERR_PTR(err);
58467 +
58468 + /* set pointer for parent subject */
58469 + if (s_tmp->parent_subject) {
58470 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
58471 +
58472 + if (IS_ERR(s_tmp2))
58473 + return s_tmp2;
58474 +
58475 + s_tmp->parent_subject = s_tmp2;
58476 + }
58477 +
58478 + /* add in ip acls */
58479 +
58480 + if (!s_tmp->ip_num) {
58481 + s_tmp->ips = NULL;
58482 + goto insert;
58483 + }
58484 +
58485 + i_tmp =
58486 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
58487 + sizeof (struct acl_ip_label *));
58488 +
58489 + if (!i_tmp)
58490 + return ERR_PTR(-ENOMEM);
58491 +
58492 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
58493 + *(i_tmp + i_num) =
58494 + (struct acl_ip_label *)
58495 + acl_alloc(sizeof (struct acl_ip_label));
58496 + if (!*(i_tmp + i_num))
58497 + return ERR_PTR(-ENOMEM);
58498 +
58499 + if (copy_from_user
58500 + (&i_utmp2, s_tmp->ips + i_num,
58501 + sizeof (struct acl_ip_label *)))
58502 + return ERR_PTR(-EFAULT);
58503 +
58504 + if (copy_from_user
58505 + (*(i_tmp + i_num), i_utmp2,
58506 + sizeof (struct acl_ip_label)))
58507 + return ERR_PTR(-EFAULT);
58508 +
58509 + if ((*(i_tmp + i_num))->iface == NULL)
58510 + continue;
58511 +
58512 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
58513 + if (!len || len >= IFNAMSIZ)
58514 + return ERR_PTR(-EINVAL);
58515 + tmp = acl_alloc(len);
58516 + if (tmp == NULL)
58517 + return ERR_PTR(-ENOMEM);
58518 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
58519 + return ERR_PTR(-EFAULT);
58520 + (*(i_tmp + i_num))->iface = tmp;
58521 + }
58522 +
58523 + s_tmp->ips = i_tmp;
58524 +
58525 +insert:
58526 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
58527 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
58528 + return ERR_PTR(-ENOMEM);
58529 +
58530 + return s_tmp;
58531 +}
58532 +
58533 +static int
58534 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
58535 +{
58536 + struct acl_subject_label s_pre;
58537 + struct acl_subject_label * ret;
58538 + int err;
58539 +
58540 + while (userp) {
58541 + if (copy_from_user(&s_pre, userp,
58542 + sizeof (struct acl_subject_label)))
58543 + return -EFAULT;
58544 +
58545 + /* do not add nested subjects here, add
58546 + while parsing objects
58547 + */
58548 +
58549 + if (s_pre.mode & GR_NESTED) {
58550 + userp = s_pre.prev;
58551 + continue;
58552 + }
58553 +
58554 + ret = do_copy_user_subj(userp, role);
58555 +
58556 + err = PTR_ERR(ret);
58557 + if (IS_ERR(ret))
58558 + return err;
58559 +
58560 + insert_acl_subj_label(ret, role);
58561 +
58562 + userp = s_pre.prev;
58563 + }
58564 +
58565 + return 0;
58566 +}
58567 +
58568 +static int
58569 +copy_user_acl(struct gr_arg *arg)
58570 +{
58571 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
58572 + struct sprole_pw *sptmp;
58573 + struct gr_hash_struct *ghash;
58574 + uid_t *domainlist;
58575 + unsigned int r_num;
58576 + unsigned int len;
58577 + char *tmp;
58578 + int err = 0;
58579 + __u16 i;
58580 + __u32 num_subjs;
58581 +
58582 + /* we need a default and kernel role */
58583 + if (arg->role_db.num_roles < 2)
58584 + return -EINVAL;
58585 +
58586 + /* copy special role authentication info from userspace */
58587 +
58588 + num_sprole_pws = arg->num_sprole_pws;
58589 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
58590 +
58591 + if (!acl_special_roles) {
58592 + err = -ENOMEM;
58593 + goto cleanup;
58594 + }
58595 +
58596 + for (i = 0; i < num_sprole_pws; i++) {
58597 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
58598 + if (!sptmp) {
58599 + err = -ENOMEM;
58600 + goto cleanup;
58601 + }
58602 + if (copy_from_user(sptmp, arg->sprole_pws + i,
58603 + sizeof (struct sprole_pw))) {
58604 + err = -EFAULT;
58605 + goto cleanup;
58606 + }
58607 +
58608 + len =
58609 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
58610 +
58611 + if (!len || len >= GR_SPROLE_LEN) {
58612 + err = -EINVAL;
58613 + goto cleanup;
58614 + }
58615 +
58616 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
58617 + err = -ENOMEM;
58618 + goto cleanup;
58619 + }
58620 +
58621 + if (copy_from_user(tmp, sptmp->rolename, len)) {
58622 + err = -EFAULT;
58623 + goto cleanup;
58624 + }
58625 + tmp[len-1] = '\0';
58626 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58627 + printk(KERN_ALERT "Copying special role %s\n", tmp);
58628 +#endif
58629 + sptmp->rolename = tmp;
58630 + acl_special_roles[i] = sptmp;
58631 + }
58632 +
58633 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
58634 +
58635 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
58636 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
58637 +
58638 + if (!r_tmp) {
58639 + err = -ENOMEM;
58640 + goto cleanup;
58641 + }
58642 +
58643 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
58644 + sizeof (struct acl_role_label *))) {
58645 + err = -EFAULT;
58646 + goto cleanup;
58647 + }
58648 +
58649 + if (copy_from_user(r_tmp, r_utmp2,
58650 + sizeof (struct acl_role_label))) {
58651 + err = -EFAULT;
58652 + goto cleanup;
58653 + }
58654 +
58655 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
58656 +
58657 + if (!len || len >= PATH_MAX) {
58658 + err = -EINVAL;
58659 + goto cleanup;
58660 + }
58661 +
58662 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
58663 + err = -ENOMEM;
58664 + goto cleanup;
58665 + }
58666 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
58667 + err = -EFAULT;
58668 + goto cleanup;
58669 + }
58670 + tmp[len-1] = '\0';
58671 + r_tmp->rolename = tmp;
58672 +
58673 + if (!strcmp(r_tmp->rolename, "default")
58674 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
58675 + default_role = r_tmp;
58676 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
58677 + kernel_role = r_tmp;
58678 + }
58679 +
58680 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
58681 + err = -ENOMEM;
58682 + goto cleanup;
58683 + }
58684 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
58685 + err = -EFAULT;
58686 + goto cleanup;
58687 + }
58688 +
58689 + r_tmp->hash = ghash;
58690 +
58691 + num_subjs = count_user_subjs(r_tmp->hash->first);
58692 +
58693 + r_tmp->subj_hash_size = num_subjs;
58694 + r_tmp->subj_hash =
58695 + (struct acl_subject_label **)
58696 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
58697 +
58698 + if (!r_tmp->subj_hash) {
58699 + err = -ENOMEM;
58700 + goto cleanup;
58701 + }
58702 +
58703 + err = copy_user_allowedips(r_tmp);
58704 + if (err)
58705 + goto cleanup;
58706 +
58707 + /* copy domain info */
58708 + if (r_tmp->domain_children != NULL) {
58709 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
58710 + if (domainlist == NULL) {
58711 + err = -ENOMEM;
58712 + goto cleanup;
58713 + }
58714 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
58715 + err = -EFAULT;
58716 + goto cleanup;
58717 + }
58718 + r_tmp->domain_children = domainlist;
58719 + }
58720 +
58721 + err = copy_user_transitions(r_tmp);
58722 + if (err)
58723 + goto cleanup;
58724 +
58725 + memset(r_tmp->subj_hash, 0,
58726 + r_tmp->subj_hash_size *
58727 + sizeof (struct acl_subject_label *));
58728 +
58729 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
58730 +
58731 + if (err)
58732 + goto cleanup;
58733 +
58734 + /* set nested subject list to null */
58735 + r_tmp->hash->first = NULL;
58736 +
58737 + insert_acl_role_label(r_tmp);
58738 + }
58739 +
58740 + goto return_err;
58741 + cleanup:
58742 + free_variables();
58743 + return_err:
58744 + return err;
58745 +
58746 +}
58747 +
58748 +static int
58749 +gracl_init(struct gr_arg *args)
58750 +{
58751 + int error = 0;
58752 +
58753 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
58754 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
58755 +
58756 + if (init_variables(args)) {
58757 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
58758 + error = -ENOMEM;
58759 + free_variables();
58760 + goto out;
58761 + }
58762 +
58763 + error = copy_user_acl(args);
58764 + free_init_variables();
58765 + if (error) {
58766 + free_variables();
58767 + goto out;
58768 + }
58769 +
58770 + if ((error = gr_set_acls(0))) {
58771 + free_variables();
58772 + goto out;
58773 + }
58774 +
58775 + pax_open_kernel();
58776 + gr_status |= GR_READY;
58777 + pax_close_kernel();
58778 +
58779 + out:
58780 + return error;
58781 +}
58782 +
58783 +/* derived from glibc fnmatch() 0: match, 1: no match*/
58784 +
58785 +static int
58786 +glob_match(const char *p, const char *n)
58787 +{
58788 + char c;
58789 +
58790 + while ((c = *p++) != '\0') {
58791 + switch (c) {
58792 + case '?':
58793 + if (*n == '\0')
58794 + return 1;
58795 + else if (*n == '/')
58796 + return 1;
58797 + break;
58798 + case '\\':
58799 + if (*n != c)
58800 + return 1;
58801 + break;
58802 + case '*':
58803 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
58804 + if (*n == '/')
58805 + return 1;
58806 + else if (c == '?') {
58807 + if (*n == '\0')
58808 + return 1;
58809 + else
58810 + ++n;
58811 + }
58812 + }
58813 + if (c == '\0') {
58814 + return 0;
58815 + } else {
58816 + const char *endp;
58817 +
58818 + if ((endp = strchr(n, '/')) == NULL)
58819 + endp = n + strlen(n);
58820 +
58821 + if (c == '[') {
58822 + for (--p; n < endp; ++n)
58823 + if (!glob_match(p, n))
58824 + return 0;
58825 + } else if (c == '/') {
58826 + while (*n != '\0' && *n != '/')
58827 + ++n;
58828 + if (*n == '/' && !glob_match(p, n + 1))
58829 + return 0;
58830 + } else {
58831 + for (--p; n < endp; ++n)
58832 + if (*n == c && !glob_match(p, n))
58833 + return 0;
58834 + }
58835 +
58836 + return 1;
58837 + }
58838 + case '[':
58839 + {
58840 + int not;
58841 + char cold;
58842 +
58843 + if (*n == '\0' || *n == '/')
58844 + return 1;
58845 +
58846 + not = (*p == '!' || *p == '^');
58847 + if (not)
58848 + ++p;
58849 +
58850 + c = *p++;
58851 + for (;;) {
58852 + unsigned char fn = (unsigned char)*n;
58853 +
58854 + if (c == '\0')
58855 + return 1;
58856 + else {
58857 + if (c == fn)
58858 + goto matched;
58859 + cold = c;
58860 + c = *p++;
58861 +
58862 + if (c == '-' && *p != ']') {
58863 + unsigned char cend = *p++;
58864 +
58865 + if (cend == '\0')
58866 + return 1;
58867 +
58868 + if (cold <= fn && fn <= cend)
58869 + goto matched;
58870 +
58871 + c = *p++;
58872 + }
58873 + }
58874 +
58875 + if (c == ']')
58876 + break;
58877 + }
58878 + if (!not)
58879 + return 1;
58880 + break;
58881 + matched:
58882 + while (c != ']') {
58883 + if (c == '\0')
58884 + return 1;
58885 +
58886 + c = *p++;
58887 + }
58888 + if (not)
58889 + return 1;
58890 + }
58891 + break;
58892 + default:
58893 + if (c != *n)
58894 + return 1;
58895 + }
58896 +
58897 + ++n;
58898 + }
58899 +
58900 + if (*n == '\0')
58901 + return 0;
58902 +
58903 + if (*n == '/')
58904 + return 0;
58905 +
58906 + return 1;
58907 +}
58908 +
58909 +static struct acl_object_label *
58910 +chk_glob_label(struct acl_object_label *globbed,
58911 + const struct dentry *dentry, const struct vfsmount *mnt, char **path)
58912 +{
58913 + struct acl_object_label *tmp;
58914 +
58915 + if (*path == NULL)
58916 + *path = gr_to_filename_nolock(dentry, mnt);
58917 +
58918 + tmp = globbed;
58919 +
58920 + while (tmp) {
58921 + if (!glob_match(tmp->filename, *path))
58922 + return tmp;
58923 + tmp = tmp->next;
58924 + }
58925 +
58926 + return NULL;
58927 +}
58928 +
58929 +static struct acl_object_label *
58930 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
58931 + const ino_t curr_ino, const dev_t curr_dev,
58932 + const struct acl_subject_label *subj, char **path, const int checkglob)
58933 +{
58934 + struct acl_subject_label *tmpsubj;
58935 + struct acl_object_label *retval;
58936 + struct acl_object_label *retval2;
58937 +
58938 + tmpsubj = (struct acl_subject_label *) subj;
58939 + read_lock(&gr_inode_lock);
58940 + do {
58941 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
58942 + if (retval) {
58943 + if (checkglob && retval->globbed) {
58944 + retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
58945 + if (retval2)
58946 + retval = retval2;
58947 + }
58948 + break;
58949 + }
58950 + } while ((tmpsubj = tmpsubj->parent_subject));
58951 + read_unlock(&gr_inode_lock);
58952 +
58953 + return retval;
58954 +}
58955 +
58956 +static __inline__ struct acl_object_label *
58957 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
58958 + const struct dentry *curr_dentry,
58959 + const struct acl_subject_label *subj, char **path, const int checkglob)
58960 +{
58961 + int newglob = checkglob;
58962 +
58963 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
58964 + as we don't want a / * rule to match instead of the / object
58965 + don't do this for create lookups that call this function though, since they're looking up
58966 + on the parent and thus need globbing checks on all paths
58967 + */
58968 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
58969 + newglob = GR_NO_GLOB;
58970 +
58971 + return __full_lookup(orig_dentry, orig_mnt,
58972 + curr_dentry->d_inode->i_ino,
58973 + __get_dev(curr_dentry), subj, path, newglob);
58974 +}
58975 +
58976 +static struct acl_object_label *
58977 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58978 + const struct acl_subject_label *subj, char *path, const int checkglob)
58979 +{
58980 + struct dentry *dentry = (struct dentry *) l_dentry;
58981 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
58982 + struct acl_object_label *retval;
58983 +
58984 + spin_lock(&dcache_lock);
58985 + spin_lock(&vfsmount_lock);
58986 +
58987 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
58988 +#ifdef CONFIG_NET
58989 + mnt == sock_mnt ||
58990 +#endif
58991 +#ifdef CONFIG_HUGETLBFS
58992 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
58993 +#endif
58994 + /* ignore Eric Biederman */
58995 + IS_PRIVATE(l_dentry->d_inode))) {
58996 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
58997 + goto out;
58998 + }
58999 +
59000 + for (;;) {
59001 + if (dentry == real_root && mnt == real_root_mnt)
59002 + break;
59003 +
59004 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
59005 + if (mnt->mnt_parent == mnt)
59006 + break;
59007 +
59008 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
59009 + if (retval != NULL)
59010 + goto out;
59011 +
59012 + dentry = mnt->mnt_mountpoint;
59013 + mnt = mnt->mnt_parent;
59014 + continue;
59015 + }
59016 +
59017 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
59018 + if (retval != NULL)
59019 + goto out;
59020 +
59021 + dentry = dentry->d_parent;
59022 + }
59023 +
59024 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
59025 +
59026 + if (retval == NULL)
59027 + retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
59028 +out:
59029 + spin_unlock(&vfsmount_lock);
59030 + spin_unlock(&dcache_lock);
59031 +
59032 + BUG_ON(retval == NULL);
59033 +
59034 + return retval;
59035 +}
59036 +
59037 +static __inline__ struct acl_object_label *
59038 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59039 + const struct acl_subject_label *subj)
59040 +{
59041 + char *path = NULL;
59042 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
59043 +}
59044 +
59045 +static __inline__ struct acl_object_label *
59046 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59047 + const struct acl_subject_label *subj)
59048 +{
59049 + char *path = NULL;
59050 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
59051 +}
59052 +
59053 +static __inline__ struct acl_object_label *
59054 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59055 + const struct acl_subject_label *subj, char *path)
59056 +{
59057 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
59058 +}
59059 +
59060 +static struct acl_subject_label *
59061 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59062 + const struct acl_role_label *role)
59063 +{
59064 + struct dentry *dentry = (struct dentry *) l_dentry;
59065 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
59066 + struct acl_subject_label *retval;
59067 +
59068 + spin_lock(&dcache_lock);
59069 + spin_lock(&vfsmount_lock);
59070 +
59071 + for (;;) {
59072 + if (dentry == real_root && mnt == real_root_mnt)
59073 + break;
59074 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
59075 + if (mnt->mnt_parent == mnt)
59076 + break;
59077 +
59078 + read_lock(&gr_inode_lock);
59079 + retval =
59080 + lookup_acl_subj_label(dentry->d_inode->i_ino,
59081 + __get_dev(dentry), role);
59082 + read_unlock(&gr_inode_lock);
59083 + if (retval != NULL)
59084 + goto out;
59085 +
59086 + dentry = mnt->mnt_mountpoint;
59087 + mnt = mnt->mnt_parent;
59088 + continue;
59089 + }
59090 +
59091 + read_lock(&gr_inode_lock);
59092 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
59093 + __get_dev(dentry), role);
59094 + read_unlock(&gr_inode_lock);
59095 + if (retval != NULL)
59096 + goto out;
59097 +
59098 + dentry = dentry->d_parent;
59099 + }
59100 +
59101 + read_lock(&gr_inode_lock);
59102 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
59103 + __get_dev(dentry), role);
59104 + read_unlock(&gr_inode_lock);
59105 +
59106 + if (unlikely(retval == NULL)) {
59107 + read_lock(&gr_inode_lock);
59108 + retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
59109 + __get_dev(real_root), role);
59110 + read_unlock(&gr_inode_lock);
59111 + }
59112 +out:
59113 + spin_unlock(&vfsmount_lock);
59114 + spin_unlock(&dcache_lock);
59115 +
59116 + BUG_ON(retval == NULL);
59117 +
59118 + return retval;
59119 +}
59120 +
59121 +static void
59122 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
59123 +{
59124 + struct task_struct *task = current;
59125 + const struct cred *cred = current_cred();
59126 +
59127 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
59128 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
59129 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
59130 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
59131 +
59132 + return;
59133 +}
59134 +
59135 +static void
59136 +gr_log_learn_sysctl(const char *path, const __u32 mode)
59137 +{
59138 + struct task_struct *task = current;
59139 + const struct cred *cred = current_cred();
59140 +
59141 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
59142 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
59143 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
59144 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
59145 +
59146 + return;
59147 +}
59148 +
59149 +static void
59150 +gr_log_learn_id_change(const char type, const unsigned int real,
59151 + const unsigned int effective, const unsigned int fs)
59152 +{
59153 + struct task_struct *task = current;
59154 + const struct cred *cred = current_cred();
59155 +
59156 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
59157 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
59158 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
59159 + type, real, effective, fs, &task->signal->saved_ip);
59160 +
59161 + return;
59162 +}
59163 +
59164 +__u32
59165 +gr_search_file(const struct dentry * dentry, const __u32 mode,
59166 + const struct vfsmount * mnt)
59167 +{
59168 + __u32 retval = mode;
59169 + struct acl_subject_label *curracl;
59170 + struct acl_object_label *currobj;
59171 +
59172 + if (unlikely(!(gr_status & GR_READY)))
59173 + return (mode & ~GR_AUDITS);
59174 +
59175 + curracl = current->acl;
59176 +
59177 + currobj = chk_obj_label(dentry, mnt, curracl);
59178 + retval = currobj->mode & mode;
59179 +
59180 + /* if we're opening a specified transfer file for writing
59181 + (e.g. /dev/initctl), then transfer our role to init
59182 + */
59183 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
59184 + current->role->roletype & GR_ROLE_PERSIST)) {
59185 + struct task_struct *task = init_pid_ns.child_reaper;
59186 +
59187 + if (task->role != current->role) {
59188 + task->acl_sp_role = 0;
59189 + task->acl_role_id = current->acl_role_id;
59190 + task->role = current->role;
59191 + rcu_read_lock();
59192 + read_lock(&grsec_exec_file_lock);
59193 + gr_apply_subject_to_task(task);
59194 + read_unlock(&grsec_exec_file_lock);
59195 + rcu_read_unlock();
59196 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
59197 + }
59198 + }
59199 +
59200 + if (unlikely
59201 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
59202 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
59203 + __u32 new_mode = mode;
59204 +
59205 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
59206 +
59207 + retval = new_mode;
59208 +
59209 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
59210 + new_mode |= GR_INHERIT;
59211 +
59212 + if (!(mode & GR_NOLEARN))
59213 + gr_log_learn(dentry, mnt, new_mode);
59214 + }
59215 +
59216 + return retval;
59217 +}
59218 +
59219 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
59220 + const struct dentry *parent,
59221 + const struct vfsmount *mnt)
59222 +{
59223 + struct name_entry *match;
59224 + struct acl_object_label *matchpo;
59225 + struct acl_subject_label *curracl;
59226 + char *path;
59227 +
59228 + if (unlikely(!(gr_status & GR_READY)))
59229 + return NULL;
59230 +
59231 + preempt_disable();
59232 + path = gr_to_filename_rbac(new_dentry, mnt);
59233 + match = lookup_name_entry_create(path);
59234 +
59235 + curracl = current->acl;
59236 +
59237 + if (match) {
59238 + read_lock(&gr_inode_lock);
59239 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
59240 + read_unlock(&gr_inode_lock);
59241 +
59242 + if (matchpo) {
59243 + preempt_enable();
59244 + return matchpo;
59245 + }
59246 + }
59247 +
59248 + // lookup parent
59249 +
59250 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
59251 +
59252 + preempt_enable();
59253 + return matchpo;
59254 +}
59255 +
59256 +__u32
59257 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
59258 + const struct vfsmount * mnt, const __u32 mode)
59259 +{
59260 + struct acl_object_label *matchpo;
59261 + __u32 retval;
59262 +
59263 + if (unlikely(!(gr_status & GR_READY)))
59264 + return (mode & ~GR_AUDITS);
59265 +
59266 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
59267 +
59268 + retval = matchpo->mode & mode;
59269 +
59270 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
59271 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
59272 + __u32 new_mode = mode;
59273 +
59274 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
59275 +
59276 + gr_log_learn(new_dentry, mnt, new_mode);
59277 + return new_mode;
59278 + }
59279 +
59280 + return retval;
59281 +}
59282 +
59283 +__u32
59284 +gr_check_link(const struct dentry * new_dentry,
59285 + const struct dentry * parent_dentry,
59286 + const struct vfsmount * parent_mnt,
59287 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
59288 +{
59289 + struct acl_object_label *obj;
59290 + __u32 oldmode, newmode;
59291 + __u32 needmode;
59292 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
59293 + GR_DELETE | GR_INHERIT;
59294 +
59295 + if (unlikely(!(gr_status & GR_READY)))
59296 + return (GR_CREATE | GR_LINK);
59297 +
59298 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
59299 + oldmode = obj->mode;
59300 +
59301 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
59302 + newmode = obj->mode;
59303 +
59304 + needmode = newmode & checkmodes;
59305 +
59306 + // old name for hardlink must have at least the permissions of the new name
59307 + if ((oldmode & needmode) != needmode)
59308 + goto bad;
59309 +
59310 + // if old name had restrictions/auditing, make sure the new name does as well
59311 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
59312 +
59313 + // don't allow hardlinking of suid/sgid files without permission
59314 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
59315 + needmode |= GR_SETID;
59316 +
59317 + if ((newmode & needmode) != needmode)
59318 + goto bad;
59319 +
59320 + // enforce minimum permissions
59321 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
59322 + return newmode;
59323 +bad:
59324 + needmode = oldmode;
59325 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
59326 + needmode |= GR_SETID;
59327 +
59328 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
59329 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
59330 + return (GR_CREATE | GR_LINK);
59331 + } else if (newmode & GR_SUPPRESS)
59332 + return GR_SUPPRESS;
59333 + else
59334 + return 0;
59335 +}
59336 +
59337 +int
59338 +gr_check_hidden_task(const struct task_struct *task)
59339 +{
59340 + if (unlikely(!(gr_status & GR_READY)))
59341 + return 0;
59342 +
59343 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
59344 + return 1;
59345 +
59346 + return 0;
59347 +}
59348 +
59349 +int
59350 +gr_check_protected_task(const struct task_struct *task)
59351 +{
59352 + if (unlikely(!(gr_status & GR_READY) || !task))
59353 + return 0;
59354 +
59355 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
59356 + task->acl != current->acl)
59357 + return 1;
59358 +
59359 + return 0;
59360 +}
59361 +
59362 +int
59363 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
59364 +{
59365 + struct task_struct *p;
59366 + int ret = 0;
59367 +
59368 + if (unlikely(!(gr_status & GR_READY) || !pid))
59369 + return ret;
59370 +
59371 + read_lock(&tasklist_lock);
59372 + do_each_pid_task(pid, type, p) {
59373 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
59374 + p->acl != current->acl) {
59375 + ret = 1;
59376 + goto out;
59377 + }
59378 + } while_each_pid_task(pid, type, p);
59379 +out:
59380 + read_unlock(&tasklist_lock);
59381 +
59382 + return ret;
59383 +}
59384 +
59385 +void
59386 +gr_copy_label(struct task_struct *tsk)
59387 +{
59388 + /* plain copying of fields is already done by dup_task_struct */
59389 + tsk->signal->used_accept = 0;
59390 + tsk->acl_sp_role = 0;
59391 + //tsk->acl_role_id = current->acl_role_id;
59392 + //tsk->acl = current->acl;
59393 + //tsk->role = current->role;
59394 + tsk->signal->curr_ip = current->signal->curr_ip;
59395 + tsk->signal->saved_ip = current->signal->saved_ip;
59396 + if (current->exec_file)
59397 + get_file(current->exec_file);
59398 + //tsk->exec_file = current->exec_file;
59399 + //tsk->is_writable = current->is_writable;
59400 + if (unlikely(current->signal->used_accept)) {
59401 + current->signal->curr_ip = 0;
59402 + current->signal->saved_ip = 0;
59403 + }
59404 +
59405 + return;
59406 +}
59407 +
59408 +static void
59409 +gr_set_proc_res(struct task_struct *task)
59410 +{
59411 + struct acl_subject_label *proc;
59412 + unsigned short i;
59413 +
59414 + proc = task->acl;
59415 +
59416 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
59417 + return;
59418 +
59419 + for (i = 0; i < RLIM_NLIMITS; i++) {
59420 + if (!(proc->resmask & (1 << i)))
59421 + continue;
59422 +
59423 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
59424 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
59425 + }
59426 +
59427 + return;
59428 +}
59429 +
59430 +extern int __gr_process_user_ban(struct user_struct *user);
59431 +
59432 +int
59433 +gr_check_user_change(int real, int effective, int fs)
59434 +{
59435 + unsigned int i;
59436 + __u16 num;
59437 + uid_t *uidlist;
59438 + int curuid;
59439 + int realok = 0;
59440 + int effectiveok = 0;
59441 + int fsok = 0;
59442 +
59443 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
59444 + struct user_struct *user;
59445 +
59446 + if (real == -1)
59447 + goto skipit;
59448 +
59449 + user = find_user(real);
59450 + if (user == NULL)
59451 + goto skipit;
59452 +
59453 + if (__gr_process_user_ban(user)) {
59454 + /* for find_user */
59455 + free_uid(user);
59456 + return 1;
59457 + }
59458 +
59459 + /* for find_user */
59460 + free_uid(user);
59461 +
59462 +skipit:
59463 +#endif
59464 +
59465 + if (unlikely(!(gr_status & GR_READY)))
59466 + return 0;
59467 +
59468 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
59469 + gr_log_learn_id_change('u', real, effective, fs);
59470 +
59471 + num = current->acl->user_trans_num;
59472 + uidlist = current->acl->user_transitions;
59473 +
59474 + if (uidlist == NULL)
59475 + return 0;
59476 +
59477 + if (real == -1)
59478 + realok = 1;
59479 + if (effective == -1)
59480 + effectiveok = 1;
59481 + if (fs == -1)
59482 + fsok = 1;
59483 +
59484 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
59485 + for (i = 0; i < num; i++) {
59486 + curuid = (int)uidlist[i];
59487 + if (real == curuid)
59488 + realok = 1;
59489 + if (effective == curuid)
59490 + effectiveok = 1;
59491 + if (fs == curuid)
59492 + fsok = 1;
59493 + }
59494 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
59495 + for (i = 0; i < num; i++) {
59496 + curuid = (int)uidlist[i];
59497 + if (real == curuid)
59498 + break;
59499 + if (effective == curuid)
59500 + break;
59501 + if (fs == curuid)
59502 + break;
59503 + }
59504 + /* not in deny list */
59505 + if (i == num) {
59506 + realok = 1;
59507 + effectiveok = 1;
59508 + fsok = 1;
59509 + }
59510 + }
59511 +
59512 + if (realok && effectiveok && fsok)
59513 + return 0;
59514 + else {
59515 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
59516 + return 1;
59517 + }
59518 +}
59519 +
59520 +int
59521 +gr_check_group_change(int real, int effective, int fs)
59522 +{
59523 + unsigned int i;
59524 + __u16 num;
59525 + gid_t *gidlist;
59526 + int curgid;
59527 + int realok = 0;
59528 + int effectiveok = 0;
59529 + int fsok = 0;
59530 +
59531 + if (unlikely(!(gr_status & GR_READY)))
59532 + return 0;
59533 +
59534 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
59535 + gr_log_learn_id_change('g', real, effective, fs);
59536 +
59537 + num = current->acl->group_trans_num;
59538 + gidlist = current->acl->group_transitions;
59539 +
59540 + if (gidlist == NULL)
59541 + return 0;
59542 +
59543 + if (real == -1)
59544 + realok = 1;
59545 + if (effective == -1)
59546 + effectiveok = 1;
59547 + if (fs == -1)
59548 + fsok = 1;
59549 +
59550 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
59551 + for (i = 0; i < num; i++) {
59552 + curgid = (int)gidlist[i];
59553 + if (real == curgid)
59554 + realok = 1;
59555 + if (effective == curgid)
59556 + effectiveok = 1;
59557 + if (fs == curgid)
59558 + fsok = 1;
59559 + }
59560 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
59561 + for (i = 0; i < num; i++) {
59562 + curgid = (int)gidlist[i];
59563 + if (real == curgid)
59564 + break;
59565 + if (effective == curgid)
59566 + break;
59567 + if (fs == curgid)
59568 + break;
59569 + }
59570 + /* not in deny list */
59571 + if (i == num) {
59572 + realok = 1;
59573 + effectiveok = 1;
59574 + fsok = 1;
59575 + }
59576 + }
59577 +
59578 + if (realok && effectiveok && fsok)
59579 + return 0;
59580 + else {
59581 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
59582 + return 1;
59583 + }
59584 +}
59585 +
59586 +extern int gr_acl_is_capable(const int cap);
59587 +
59588 +void
59589 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
59590 +{
59591 + struct acl_role_label *role = task->role;
59592 + struct acl_subject_label *subj = NULL;
59593 + struct acl_object_label *obj;
59594 + struct file *filp;
59595 +
59596 + if (unlikely(!(gr_status & GR_READY)))
59597 + return;
59598 +
59599 + filp = task->exec_file;
59600 +
59601 + /* kernel process, we'll give them the kernel role */
59602 + if (unlikely(!filp)) {
59603 + task->role = kernel_role;
59604 + task->acl = kernel_role->root_label;
59605 + return;
59606 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
59607 + role = lookup_acl_role_label(task, uid, gid);
59608 +
59609 + /* don't change the role if we're not a privileged process */
59610 + if (role && task->role != role &&
59611 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
59612 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
59613 + return;
59614 +
59615 + /* perform subject lookup in possibly new role
59616 + we can use this result below in the case where role == task->role
59617 + */
59618 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
59619 +
59620 + /* if we changed uid/gid, but result in the same role
59621 + and are using inheritance, don't lose the inherited subject
59622 + if current subject is other than what normal lookup
59623 + would result in, we arrived via inheritance, don't
59624 + lose subject
59625 + */
59626 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
59627 + (subj == task->acl)))
59628 + task->acl = subj;
59629 +
59630 + task->role = role;
59631 +
59632 + task->is_writable = 0;
59633 +
59634 + /* ignore additional mmap checks for processes that are writable
59635 + by the default ACL */
59636 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59637 + if (unlikely(obj->mode & GR_WRITE))
59638 + task->is_writable = 1;
59639 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
59640 + if (unlikely(obj->mode & GR_WRITE))
59641 + task->is_writable = 1;
59642 +
59643 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59644 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
59645 +#endif
59646 +
59647 + gr_set_proc_res(task);
59648 +
59649 + return;
59650 +}
59651 +
59652 +int
59653 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
59654 + const int unsafe_flags)
59655 +{
59656 + struct task_struct *task = current;
59657 + struct acl_subject_label *newacl;
59658 + struct acl_object_label *obj;
59659 + __u32 retmode;
59660 +
59661 + if (unlikely(!(gr_status & GR_READY)))
59662 + return 0;
59663 +
59664 + newacl = chk_subj_label(dentry, mnt, task->role);
59665 +
59666 + task_lock(task);
59667 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
59668 + !(task->role->roletype & GR_ROLE_GOD) &&
59669 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
59670 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
59671 + task_unlock(task);
59672 + if (unsafe_flags & LSM_UNSAFE_SHARE)
59673 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
59674 + else
59675 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
59676 + return -EACCES;
59677 + }
59678 + task_unlock(task);
59679 +
59680 + obj = chk_obj_label(dentry, mnt, task->acl);
59681 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
59682 +
59683 + if (!(task->acl->mode & GR_INHERITLEARN) &&
59684 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
59685 + if (obj->nested)
59686 + task->acl = obj->nested;
59687 + else
59688 + task->acl = newacl;
59689 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
59690 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
59691 +
59692 + task->is_writable = 0;
59693 +
59694 + /* ignore additional mmap checks for processes that are writable
59695 + by the default ACL */
59696 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
59697 + if (unlikely(obj->mode & GR_WRITE))
59698 + task->is_writable = 1;
59699 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
59700 + if (unlikely(obj->mode & GR_WRITE))
59701 + task->is_writable = 1;
59702 +
59703 + gr_set_proc_res(task);
59704 +
59705 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59706 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
59707 +#endif
59708 + return 0;
59709 +}
59710 +
59711 +/* always called with valid inodev ptr */
59712 +static void
59713 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
59714 +{
59715 + struct acl_object_label *matchpo;
59716 + struct acl_subject_label *matchps;
59717 + struct acl_subject_label *subj;
59718 + struct acl_role_label *role;
59719 + unsigned int x;
59720 +
59721 + FOR_EACH_ROLE_START(role)
59722 + FOR_EACH_SUBJECT_START(role, subj, x)
59723 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
59724 + matchpo->mode |= GR_DELETED;
59725 + FOR_EACH_SUBJECT_END(subj,x)
59726 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
59727 + if (subj->inode == ino && subj->device == dev)
59728 + subj->mode |= GR_DELETED;
59729 + FOR_EACH_NESTED_SUBJECT_END(subj)
59730 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
59731 + matchps->mode |= GR_DELETED;
59732 + FOR_EACH_ROLE_END(role)
59733 +
59734 + inodev->nentry->deleted = 1;
59735 +
59736 + return;
59737 +}
59738 +
59739 +void
59740 +gr_handle_delete(const ino_t ino, const dev_t dev)
59741 +{
59742 + struct inodev_entry *inodev;
59743 +
59744 + if (unlikely(!(gr_status & GR_READY)))
59745 + return;
59746 +
59747 + write_lock(&gr_inode_lock);
59748 + inodev = lookup_inodev_entry(ino, dev);
59749 + if (inodev != NULL)
59750 + do_handle_delete(inodev, ino, dev);
59751 + write_unlock(&gr_inode_lock);
59752 +
59753 + return;
59754 +}
59755 +
59756 +static void
59757 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
59758 + const ino_t newinode, const dev_t newdevice,
59759 + struct acl_subject_label *subj)
59760 +{
59761 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
59762 + struct acl_object_label *match;
59763 +
59764 + match = subj->obj_hash[index];
59765 +
59766 + while (match && (match->inode != oldinode ||
59767 + match->device != olddevice ||
59768 + !(match->mode & GR_DELETED)))
59769 + match = match->next;
59770 +
59771 + if (match && (match->inode == oldinode)
59772 + && (match->device == olddevice)
59773 + && (match->mode & GR_DELETED)) {
59774 + if (match->prev == NULL) {
59775 + subj->obj_hash[index] = match->next;
59776 + if (match->next != NULL)
59777 + match->next->prev = NULL;
59778 + } else {
59779 + match->prev->next = match->next;
59780 + if (match->next != NULL)
59781 + match->next->prev = match->prev;
59782 + }
59783 + match->prev = NULL;
59784 + match->next = NULL;
59785 + match->inode = newinode;
59786 + match->device = newdevice;
59787 + match->mode &= ~GR_DELETED;
59788 +
59789 + insert_acl_obj_label(match, subj);
59790 + }
59791 +
59792 + return;
59793 +}
59794 +
59795 +static void
59796 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
59797 + const ino_t newinode, const dev_t newdevice,
59798 + struct acl_role_label *role)
59799 +{
59800 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
59801 + struct acl_subject_label *match;
59802 +
59803 + match = role->subj_hash[index];
59804 +
59805 + while (match && (match->inode != oldinode ||
59806 + match->device != olddevice ||
59807 + !(match->mode & GR_DELETED)))
59808 + match = match->next;
59809 +
59810 + if (match && (match->inode == oldinode)
59811 + && (match->device == olddevice)
59812 + && (match->mode & GR_DELETED)) {
59813 + if (match->prev == NULL) {
59814 + role->subj_hash[index] = match->next;
59815 + if (match->next != NULL)
59816 + match->next->prev = NULL;
59817 + } else {
59818 + match->prev->next = match->next;
59819 + if (match->next != NULL)
59820 + match->next->prev = match->prev;
59821 + }
59822 + match->prev = NULL;
59823 + match->next = NULL;
59824 + match->inode = newinode;
59825 + match->device = newdevice;
59826 + match->mode &= ~GR_DELETED;
59827 +
59828 + insert_acl_subj_label(match, role);
59829 + }
59830 +
59831 + return;
59832 +}
59833 +
59834 +static void
59835 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
59836 + const ino_t newinode, const dev_t newdevice)
59837 +{
59838 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
59839 + struct inodev_entry *match;
59840 +
59841 + match = inodev_set.i_hash[index];
59842 +
59843 + while (match && (match->nentry->inode != oldinode ||
59844 + match->nentry->device != olddevice || !match->nentry->deleted))
59845 + match = match->next;
59846 +
59847 + if (match && (match->nentry->inode == oldinode)
59848 + && (match->nentry->device == olddevice) &&
59849 + match->nentry->deleted) {
59850 + if (match->prev == NULL) {
59851 + inodev_set.i_hash[index] = match->next;
59852 + if (match->next != NULL)
59853 + match->next->prev = NULL;
59854 + } else {
59855 + match->prev->next = match->next;
59856 + if (match->next != NULL)
59857 + match->next->prev = match->prev;
59858 + }
59859 + match->prev = NULL;
59860 + match->next = NULL;
59861 + match->nentry->inode = newinode;
59862 + match->nentry->device = newdevice;
59863 + match->nentry->deleted = 0;
59864 +
59865 + insert_inodev_entry(match);
59866 + }
59867 +
59868 + return;
59869 +}
59870 +
59871 +static void
59872 +__do_handle_create(const struct name_entry *matchn, ino_t inode, dev_t dev)
59873 +{
59874 + struct acl_subject_label *subj;
59875 + struct acl_role_label *role;
59876 + unsigned int x;
59877 +
59878 + FOR_EACH_ROLE_START(role)
59879 + update_acl_subj_label(matchn->inode, matchn->device,
59880 + inode, dev, role);
59881 +
59882 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
59883 + if ((subj->inode == inode) && (subj->device == dev)) {
59884 + subj->inode = inode;
59885 + subj->device = dev;
59886 + }
59887 + FOR_EACH_NESTED_SUBJECT_END(subj)
59888 + FOR_EACH_SUBJECT_START(role, subj, x)
59889 + update_acl_obj_label(matchn->inode, matchn->device,
59890 + inode, dev, subj);
59891 + FOR_EACH_SUBJECT_END(subj,x)
59892 + FOR_EACH_ROLE_END(role)
59893 +
59894 + update_inodev_entry(matchn->inode, matchn->device, inode, dev);
59895 +
59896 + return;
59897 +}
59898 +
59899 +static void
59900 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
59901 + const struct vfsmount *mnt)
59902 +{
59903 + ino_t ino = dentry->d_inode->i_ino;
59904 + dev_t dev = __get_dev(dentry);
59905 +
59906 + __do_handle_create(matchn, ino, dev);
59907 +
59908 + return;
59909 +}
59910 +
59911 +void
59912 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
59913 +{
59914 + struct name_entry *matchn;
59915 +
59916 + if (unlikely(!(gr_status & GR_READY)))
59917 + return;
59918 +
59919 + preempt_disable();
59920 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
59921 +
59922 + if (unlikely((unsigned long)matchn)) {
59923 + write_lock(&gr_inode_lock);
59924 + do_handle_create(matchn, dentry, mnt);
59925 + write_unlock(&gr_inode_lock);
59926 + }
59927 + preempt_enable();
59928 +
59929 + return;
59930 +}
59931 +
59932 +void
59933 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
59934 +{
59935 + struct name_entry *matchn;
59936 +
59937 + if (unlikely(!(gr_status & GR_READY)))
59938 + return;
59939 +
59940 + preempt_disable();
59941 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
59942 +
59943 + if (unlikely((unsigned long)matchn)) {
59944 + write_lock(&gr_inode_lock);
59945 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
59946 + write_unlock(&gr_inode_lock);
59947 + }
59948 + preempt_enable();
59949 +
59950 + return;
59951 +}
59952 +
59953 +void
59954 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59955 + struct dentry *old_dentry,
59956 + struct dentry *new_dentry,
59957 + struct vfsmount *mnt, const __u8 replace)
59958 +{
59959 + struct name_entry *matchn;
59960 + struct inodev_entry *inodev;
59961 + struct inode *inode = new_dentry->d_inode;
59962 + ino_t oldinode = old_dentry->d_inode->i_ino;
59963 + dev_t olddev = __get_dev(old_dentry);
59964 +
59965 + /* vfs_rename swaps the name and parent link for old_dentry and
59966 + new_dentry
59967 + at this point, old_dentry has the new name, parent link, and inode
59968 + for the renamed file
59969 + if a file is being replaced by a rename, new_dentry has the inode
59970 + and name for the replaced file
59971 + */
59972 +
59973 + if (unlikely(!(gr_status & GR_READY)))
59974 + return;
59975 +
59976 + preempt_disable();
59977 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
59978 +
59979 + /* we wouldn't have to check d_inode if it weren't for
59980 + NFS silly-renaming
59981 + */
59982 +
59983 + write_lock(&gr_inode_lock);
59984 + if (unlikely(replace && inode)) {
59985 + ino_t newinode = inode->i_ino;
59986 + dev_t newdev = __get_dev(new_dentry);
59987 + inodev = lookup_inodev_entry(newinode, newdev);
59988 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
59989 + do_handle_delete(inodev, newinode, newdev);
59990 + }
59991 +
59992 + inodev = lookup_inodev_entry(oldinode, olddev);
59993 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
59994 + do_handle_delete(inodev, oldinode, olddev);
59995 +
59996 + if (unlikely((unsigned long)matchn))
59997 + do_handle_create(matchn, old_dentry, mnt);
59998 +
59999 + write_unlock(&gr_inode_lock);
60000 + preempt_enable();
60001 +
60002 + return;
60003 +}
60004 +
60005 +static int
60006 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
60007 + unsigned char **sum)
60008 +{
60009 + struct acl_role_label *r;
60010 + struct role_allowed_ip *ipp;
60011 + struct role_transition *trans;
60012 + unsigned int i;
60013 + int found = 0;
60014 + u32 curr_ip = current->signal->curr_ip;
60015 +
60016 + current->signal->saved_ip = curr_ip;
60017 +
60018 + /* check transition table */
60019 +
60020 + for (trans = current->role->transitions; trans; trans = trans->next) {
60021 + if (!strcmp(rolename, trans->rolename)) {
60022 + found = 1;
60023 + break;
60024 + }
60025 + }
60026 +
60027 + if (!found)
60028 + return 0;
60029 +
60030 + /* handle special roles that do not require authentication
60031 + and check ip */
60032 +
60033 + FOR_EACH_ROLE_START(r)
60034 + if (!strcmp(rolename, r->rolename) &&
60035 + (r->roletype & GR_ROLE_SPECIAL)) {
60036 + found = 0;
60037 + if (r->allowed_ips != NULL) {
60038 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
60039 + if ((ntohl(curr_ip) & ipp->netmask) ==
60040 + (ntohl(ipp->addr) & ipp->netmask))
60041 + found = 1;
60042 + }
60043 + } else
60044 + found = 2;
60045 + if (!found)
60046 + return 0;
60047 +
60048 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
60049 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
60050 + *salt = NULL;
60051 + *sum = NULL;
60052 + return 1;
60053 + }
60054 + }
60055 + FOR_EACH_ROLE_END(r)
60056 +
60057 + for (i = 0; i < num_sprole_pws; i++) {
60058 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
60059 + *salt = acl_special_roles[i]->salt;
60060 + *sum = acl_special_roles[i]->sum;
60061 + return 1;
60062 + }
60063 + }
60064 +
60065 + return 0;
60066 +}
60067 +
60068 +static void
60069 +assign_special_role(char *rolename)
60070 +{
60071 + struct acl_object_label *obj;
60072 + struct acl_role_label *r;
60073 + struct acl_role_label *assigned = NULL;
60074 + struct task_struct *tsk;
60075 + struct file *filp;
60076 +
60077 + FOR_EACH_ROLE_START(r)
60078 + if (!strcmp(rolename, r->rolename) &&
60079 + (r->roletype & GR_ROLE_SPECIAL)) {
60080 + assigned = r;
60081 + break;
60082 + }
60083 + FOR_EACH_ROLE_END(r)
60084 +
60085 + if (!assigned)
60086 + return;
60087 +
60088 + read_lock(&tasklist_lock);
60089 + read_lock(&grsec_exec_file_lock);
60090 +
60091 + tsk = current->real_parent;
60092 + if (tsk == NULL)
60093 + goto out_unlock;
60094 +
60095 + filp = tsk->exec_file;
60096 + if (filp == NULL)
60097 + goto out_unlock;
60098 +
60099 + tsk->is_writable = 0;
60100 +
60101 + tsk->acl_sp_role = 1;
60102 + tsk->acl_role_id = ++acl_sp_role_value;
60103 + tsk->role = assigned;
60104 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
60105 +
60106 + /* ignore additional mmap checks for processes that are writable
60107 + by the default ACL */
60108 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
60109 + if (unlikely(obj->mode & GR_WRITE))
60110 + tsk->is_writable = 1;
60111 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
60112 + if (unlikely(obj->mode & GR_WRITE))
60113 + tsk->is_writable = 1;
60114 +
60115 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
60116 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
60117 +#endif
60118 +
60119 +out_unlock:
60120 + read_unlock(&grsec_exec_file_lock);
60121 + read_unlock(&tasklist_lock);
60122 + return;
60123 +}
60124 +
60125 +int gr_check_secure_terminal(struct task_struct *task)
60126 +{
60127 + struct task_struct *p, *p2, *p3;
60128 + struct files_struct *files;
60129 + struct fdtable *fdt;
60130 + struct file *our_file = NULL, *file;
60131 + int i;
60132 +
60133 + if (task->signal->tty == NULL)
60134 + return 1;
60135 +
60136 + files = get_files_struct(task);
60137 + if (files != NULL) {
60138 + rcu_read_lock();
60139 + fdt = files_fdtable(files);
60140 + for (i=0; i < fdt->max_fds; i++) {
60141 + file = fcheck_files(files, i);
60142 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
60143 + get_file(file);
60144 + our_file = file;
60145 + }
60146 + }
60147 + rcu_read_unlock();
60148 + put_files_struct(files);
60149 + }
60150 +
60151 + if (our_file == NULL)
60152 + return 1;
60153 +
60154 + read_lock(&tasklist_lock);
60155 + do_each_thread(p2, p) {
60156 + files = get_files_struct(p);
60157 + if (files == NULL ||
60158 + (p->signal && p->signal->tty == task->signal->tty)) {
60159 + if (files != NULL)
60160 + put_files_struct(files);
60161 + continue;
60162 + }
60163 + rcu_read_lock();
60164 + fdt = files_fdtable(files);
60165 + for (i=0; i < fdt->max_fds; i++) {
60166 + file = fcheck_files(files, i);
60167 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
60168 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
60169 + p3 = task;
60170 + while (p3->pid > 0) {
60171 + if (p3 == p)
60172 + break;
60173 + p3 = p3->real_parent;
60174 + }
60175 + if (p3 == p)
60176 + break;
60177 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
60178 + gr_handle_alertkill(p);
60179 + rcu_read_unlock();
60180 + put_files_struct(files);
60181 + read_unlock(&tasklist_lock);
60182 + fput(our_file);
60183 + return 0;
60184 + }
60185 + }
60186 + rcu_read_unlock();
60187 + put_files_struct(files);
60188 + } while_each_thread(p2, p);
60189 + read_unlock(&tasklist_lock);
60190 +
60191 + fput(our_file);
60192 + return 1;
60193 +}
60194 +
60195 +ssize_t
60196 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
60197 +{
60198 + struct gr_arg_wrapper uwrap;
60199 + unsigned char *sprole_salt = NULL;
60200 + unsigned char *sprole_sum = NULL;
60201 + int error = sizeof (struct gr_arg_wrapper);
60202 + int error2 = 0;
60203 +
60204 + mutex_lock(&gr_dev_mutex);
60205 +
60206 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
60207 + error = -EPERM;
60208 + goto out;
60209 + }
60210 +
60211 + if (count != sizeof (struct gr_arg_wrapper)) {
60212 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
60213 + error = -EINVAL;
60214 + goto out;
60215 + }
60216 +
60217 +
60218 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
60219 + gr_auth_expires = 0;
60220 + gr_auth_attempts = 0;
60221 + }
60222 +
60223 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
60224 + error = -EFAULT;
60225 + goto out;
60226 + }
60227 +
60228 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
60229 + error = -EINVAL;
60230 + goto out;
60231 + }
60232 +
60233 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
60234 + error = -EFAULT;
60235 + goto out;
60236 + }
60237 +
60238 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
60239 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
60240 + time_after(gr_auth_expires, get_seconds())) {
60241 + error = -EBUSY;
60242 + goto out;
60243 + }
60244 +
60245 + /* if non-root trying to do anything other than use a special role,
60246 + do not attempt authentication, do not count towards authentication
60247 + locking
60248 + */
60249 +
60250 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
60251 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
60252 + current_uid()) {
60253 + error = -EPERM;
60254 + goto out;
60255 + }
60256 +
60257 + /* ensure pw and special role name are null terminated */
60258 +
60259 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
60260 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
60261 +
60262 + /* Okay.
60263 + * We have our enough of the argument structure..(we have yet
60264 + * to copy_from_user the tables themselves) . Copy the tables
60265 + * only if we need them, i.e. for loading operations. */
60266 +
60267 + switch (gr_usermode->mode) {
60268 + case GR_STATUS:
60269 + if (gr_status & GR_READY) {
60270 + error = 1;
60271 + if (!gr_check_secure_terminal(current))
60272 + error = 3;
60273 + } else
60274 + error = 2;
60275 + goto out;
60276 + case GR_SHUTDOWN:
60277 + if ((gr_status & GR_READY)
60278 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
60279 + pax_open_kernel();
60280 + gr_status &= ~GR_READY;
60281 + pax_close_kernel();
60282 +
60283 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
60284 + free_variables();
60285 + memset(gr_usermode, 0, sizeof (struct gr_arg));
60286 + memset(gr_system_salt, 0, GR_SALT_LEN);
60287 + memset(gr_system_sum, 0, GR_SHA_LEN);
60288 + } else if (gr_status & GR_READY) {
60289 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
60290 + error = -EPERM;
60291 + } else {
60292 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
60293 + error = -EAGAIN;
60294 + }
60295 + break;
60296 + case GR_ENABLE:
60297 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
60298 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
60299 + else {
60300 + if (gr_status & GR_READY)
60301 + error = -EAGAIN;
60302 + else
60303 + error = error2;
60304 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
60305 + }
60306 + break;
60307 + case GR_RELOAD:
60308 + if (!(gr_status & GR_READY)) {
60309 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
60310 + error = -EAGAIN;
60311 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
60312 + lock_kernel();
60313 +
60314 + pax_open_kernel();
60315 + gr_status &= ~GR_READY;
60316 + pax_close_kernel();
60317 +
60318 + free_variables();
60319 + if (!(error2 = gracl_init(gr_usermode))) {
60320 + unlock_kernel();
60321 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
60322 + } else {
60323 + unlock_kernel();
60324 + error = error2;
60325 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
60326 + }
60327 + } else {
60328 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
60329 + error = -EPERM;
60330 + }
60331 + break;
60332 + case GR_SEGVMOD:
60333 + if (unlikely(!(gr_status & GR_READY))) {
60334 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
60335 + error = -EAGAIN;
60336 + break;
60337 + }
60338 +
60339 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
60340 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
60341 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
60342 + struct acl_subject_label *segvacl;
60343 + segvacl =
60344 + lookup_acl_subj_label(gr_usermode->segv_inode,
60345 + gr_usermode->segv_device,
60346 + current->role);
60347 + if (segvacl) {
60348 + segvacl->crashes = 0;
60349 + segvacl->expires = 0;
60350 + }
60351 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
60352 + gr_remove_uid(gr_usermode->segv_uid);
60353 + }
60354 + } else {
60355 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
60356 + error = -EPERM;
60357 + }
60358 + break;
60359 + case GR_SPROLE:
60360 + case GR_SPROLEPAM:
60361 + if (unlikely(!(gr_status & GR_READY))) {
60362 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
60363 + error = -EAGAIN;
60364 + break;
60365 + }
60366 +
60367 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
60368 + current->role->expires = 0;
60369 + current->role->auth_attempts = 0;
60370 + }
60371 +
60372 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
60373 + time_after(current->role->expires, get_seconds())) {
60374 + error = -EBUSY;
60375 + goto out;
60376 + }
60377 +
60378 + if (lookup_special_role_auth
60379 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
60380 + && ((!sprole_salt && !sprole_sum)
60381 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
60382 + char *p = "";
60383 + assign_special_role(gr_usermode->sp_role);
60384 + read_lock(&tasklist_lock);
60385 + if (current->real_parent)
60386 + p = current->real_parent->role->rolename;
60387 + read_unlock(&tasklist_lock);
60388 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
60389 + p, acl_sp_role_value);
60390 + } else {
60391 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
60392 + error = -EPERM;
60393 + if(!(current->role->auth_attempts++))
60394 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
60395 +
60396 + goto out;
60397 + }
60398 + break;
60399 + case GR_UNSPROLE:
60400 + if (unlikely(!(gr_status & GR_READY))) {
60401 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
60402 + error = -EAGAIN;
60403 + break;
60404 + }
60405 +
60406 + if (current->role->roletype & GR_ROLE_SPECIAL) {
60407 + char *p = "";
60408 + int i = 0;
60409 +
60410 + read_lock(&tasklist_lock);
60411 + if (current->real_parent) {
60412 + p = current->real_parent->role->rolename;
60413 + i = current->real_parent->acl_role_id;
60414 + }
60415 + read_unlock(&tasklist_lock);
60416 +
60417 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
60418 + gr_set_acls(1);
60419 + } else {
60420 + error = -EPERM;
60421 + goto out;
60422 + }
60423 + break;
60424 + default:
60425 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
60426 + error = -EINVAL;
60427 + break;
60428 + }
60429 +
60430 + if (error != -EPERM)
60431 + goto out;
60432 +
60433 + if(!(gr_auth_attempts++))
60434 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
60435 +
60436 + out:
60437 + mutex_unlock(&gr_dev_mutex);
60438 + return error;
60439 +}
60440 +
60441 +/* must be called with
60442 + rcu_read_lock();
60443 + read_lock(&tasklist_lock);
60444 + read_lock(&grsec_exec_file_lock);
60445 +*/
60446 +int gr_apply_subject_to_task(struct task_struct *task)
60447 +{
60448 + struct acl_object_label *obj;
60449 + char *tmpname;
60450 + struct acl_subject_label *tmpsubj;
60451 + struct file *filp;
60452 + struct name_entry *nmatch;
60453 +
60454 + filp = task->exec_file;
60455 + if (filp == NULL)
60456 + return 0;
60457 +
60458 + /* the following is to apply the correct subject
60459 + on binaries running when the RBAC system
60460 + is enabled, when the binaries have been
60461 + replaced or deleted since their execution
60462 + -----
60463 + when the RBAC system starts, the inode/dev
60464 + from exec_file will be one the RBAC system
60465 + is unaware of. It only knows the inode/dev
60466 + of the present file on disk, or the absence
60467 + of it.
60468 + */
60469 + preempt_disable();
60470 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
60471 +
60472 + nmatch = lookup_name_entry(tmpname);
60473 + preempt_enable();
60474 + tmpsubj = NULL;
60475 + if (nmatch) {
60476 + if (nmatch->deleted)
60477 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
60478 + else
60479 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
60480 + if (tmpsubj != NULL)
60481 + task->acl = tmpsubj;
60482 + }
60483 + if (tmpsubj == NULL)
60484 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
60485 + task->role);
60486 + if (task->acl) {
60487 + task->is_writable = 0;
60488 + /* ignore additional mmap checks for processes that are writable
60489 + by the default ACL */
60490 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
60491 + if (unlikely(obj->mode & GR_WRITE))
60492 + task->is_writable = 1;
60493 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
60494 + if (unlikely(obj->mode & GR_WRITE))
60495 + task->is_writable = 1;
60496 +
60497 + gr_set_proc_res(task);
60498 +
60499 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
60500 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
60501 +#endif
60502 + } else {
60503 + return 1;
60504 + }
60505 +
60506 + return 0;
60507 +}
60508 +
60509 +int
60510 +gr_set_acls(const int type)
60511 +{
60512 + struct task_struct *task, *task2;
60513 + struct acl_role_label *role = current->role;
60514 + __u16 acl_role_id = current->acl_role_id;
60515 + const struct cred *cred;
60516 + int ret;
60517 +
60518 + rcu_read_lock();
60519 + read_lock(&tasklist_lock);
60520 + read_lock(&grsec_exec_file_lock);
60521 + do_each_thread(task2, task) {
60522 + /* check to see if we're called from the exit handler,
60523 + if so, only replace ACLs that have inherited the admin
60524 + ACL */
60525 +
60526 + if (type && (task->role != role ||
60527 + task->acl_role_id != acl_role_id))
60528 + continue;
60529 +
60530 + task->acl_role_id = 0;
60531 + task->acl_sp_role = 0;
60532 +
60533 + if (task->exec_file) {
60534 + cred = __task_cred(task);
60535 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
60536 +
60537 + ret = gr_apply_subject_to_task(task);
60538 + if (ret) {
60539 + read_unlock(&grsec_exec_file_lock);
60540 + read_unlock(&tasklist_lock);
60541 + rcu_read_unlock();
60542 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
60543 + return ret;
60544 + }
60545 + } else {
60546 + // it's a kernel process
60547 + task->role = kernel_role;
60548 + task->acl = kernel_role->root_label;
60549 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
60550 + task->acl->mode &= ~GR_PROCFIND;
60551 +#endif
60552 + }
60553 + } while_each_thread(task2, task);
60554 + read_unlock(&grsec_exec_file_lock);
60555 + read_unlock(&tasklist_lock);
60556 + rcu_read_unlock();
60557 +
60558 + return 0;
60559 +}
60560 +
60561 +void
60562 +gr_learn_resource(const struct task_struct *task,
60563 + const int res, const unsigned long wanted, const int gt)
60564 +{
60565 + struct acl_subject_label *acl;
60566 + const struct cred *cred;
60567 +
60568 + if (unlikely((gr_status & GR_READY) &&
60569 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
60570 + goto skip_reslog;
60571 +
60572 +#ifdef CONFIG_GRKERNSEC_RESLOG
60573 + gr_log_resource(task, res, wanted, gt);
60574 +#endif
60575 + skip_reslog:
60576 +
60577 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
60578 + return;
60579 +
60580 + acl = task->acl;
60581 +
60582 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
60583 + !(acl->resmask & (1 << (unsigned short) res))))
60584 + return;
60585 +
60586 + if (wanted >= acl->res[res].rlim_cur) {
60587 + unsigned long res_add;
60588 +
60589 + res_add = wanted;
60590 + switch (res) {
60591 + case RLIMIT_CPU:
60592 + res_add += GR_RLIM_CPU_BUMP;
60593 + break;
60594 + case RLIMIT_FSIZE:
60595 + res_add += GR_RLIM_FSIZE_BUMP;
60596 + break;
60597 + case RLIMIT_DATA:
60598 + res_add += GR_RLIM_DATA_BUMP;
60599 + break;
60600 + case RLIMIT_STACK:
60601 + res_add += GR_RLIM_STACK_BUMP;
60602 + break;
60603 + case RLIMIT_CORE:
60604 + res_add += GR_RLIM_CORE_BUMP;
60605 + break;
60606 + case RLIMIT_RSS:
60607 + res_add += GR_RLIM_RSS_BUMP;
60608 + break;
60609 + case RLIMIT_NPROC:
60610 + res_add += GR_RLIM_NPROC_BUMP;
60611 + break;
60612 + case RLIMIT_NOFILE:
60613 + res_add += GR_RLIM_NOFILE_BUMP;
60614 + break;
60615 + case RLIMIT_MEMLOCK:
60616 + res_add += GR_RLIM_MEMLOCK_BUMP;
60617 + break;
60618 + case RLIMIT_AS:
60619 + res_add += GR_RLIM_AS_BUMP;
60620 + break;
60621 + case RLIMIT_LOCKS:
60622 + res_add += GR_RLIM_LOCKS_BUMP;
60623 + break;
60624 + case RLIMIT_SIGPENDING:
60625 + res_add += GR_RLIM_SIGPENDING_BUMP;
60626 + break;
60627 + case RLIMIT_MSGQUEUE:
60628 + res_add += GR_RLIM_MSGQUEUE_BUMP;
60629 + break;
60630 + case RLIMIT_NICE:
60631 + res_add += GR_RLIM_NICE_BUMP;
60632 + break;
60633 + case RLIMIT_RTPRIO:
60634 + res_add += GR_RLIM_RTPRIO_BUMP;
60635 + break;
60636 + case RLIMIT_RTTIME:
60637 + res_add += GR_RLIM_RTTIME_BUMP;
60638 + break;
60639 + }
60640 +
60641 + acl->res[res].rlim_cur = res_add;
60642 +
60643 + if (wanted > acl->res[res].rlim_max)
60644 + acl->res[res].rlim_max = res_add;
60645 +
60646 + /* only log the subject filename, since resource logging is supported for
60647 + single-subject learning only */
60648 + rcu_read_lock();
60649 + cred = __task_cred(task);
60650 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
60651 + task->role->roletype, cred->uid, cred->gid, acl->filename,
60652 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
60653 + "", (unsigned long) res, &task->signal->saved_ip);
60654 + rcu_read_unlock();
60655 + }
60656 +
60657 + return;
60658 +}
60659 +
60660 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
60661 +void
60662 +pax_set_initial_flags(struct linux_binprm *bprm)
60663 +{
60664 + struct task_struct *task = current;
60665 + struct acl_subject_label *proc;
60666 + unsigned long flags;
60667 +
60668 + if (unlikely(!(gr_status & GR_READY)))
60669 + return;
60670 +
60671 + flags = pax_get_flags(task);
60672 +
60673 + proc = task->acl;
60674 +
60675 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
60676 + flags &= ~MF_PAX_PAGEEXEC;
60677 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
60678 + flags &= ~MF_PAX_SEGMEXEC;
60679 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
60680 + flags &= ~MF_PAX_RANDMMAP;
60681 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
60682 + flags &= ~MF_PAX_EMUTRAMP;
60683 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
60684 + flags &= ~MF_PAX_MPROTECT;
60685 +
60686 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
60687 + flags |= MF_PAX_PAGEEXEC;
60688 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
60689 + flags |= MF_PAX_SEGMEXEC;
60690 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
60691 + flags |= MF_PAX_RANDMMAP;
60692 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
60693 + flags |= MF_PAX_EMUTRAMP;
60694 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
60695 + flags |= MF_PAX_MPROTECT;
60696 +
60697 + pax_set_flags(task, flags);
60698 +
60699 + return;
60700 +}
60701 +#endif
60702 +
60703 +#ifdef CONFIG_SYSCTL
60704 +/* Eric Biederman likes breaking userland ABI and every inode-based security
60705 + system to save 35kb of memory */
60706 +
60707 +/* we modify the passed in filename, but adjust it back before returning */
60708 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
60709 +{
60710 + struct name_entry *nmatch;
60711 + char *p, *lastp = NULL;
60712 + struct acl_object_label *obj = NULL, *tmp;
60713 + struct acl_subject_label *tmpsubj;
60714 + char c = '\0';
60715 +
60716 + read_lock(&gr_inode_lock);
60717 +
60718 + p = name + len - 1;
60719 + do {
60720 + nmatch = lookup_name_entry(name);
60721 + if (lastp != NULL)
60722 + *lastp = c;
60723 +
60724 + if (nmatch == NULL)
60725 + goto next_component;
60726 + tmpsubj = current->acl;
60727 + do {
60728 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
60729 + if (obj != NULL) {
60730 + tmp = obj->globbed;
60731 + while (tmp) {
60732 + if (!glob_match(tmp->filename, name)) {
60733 + obj = tmp;
60734 + goto found_obj;
60735 + }
60736 + tmp = tmp->next;
60737 + }
60738 + goto found_obj;
60739 + }
60740 + } while ((tmpsubj = tmpsubj->parent_subject));
60741 +next_component:
60742 + /* end case */
60743 + if (p == name)
60744 + break;
60745 +
60746 + while (*p != '/')
60747 + p--;
60748 + if (p == name)
60749 + lastp = p + 1;
60750 + else {
60751 + lastp = p;
60752 + p--;
60753 + }
60754 + c = *lastp;
60755 + *lastp = '\0';
60756 + } while (1);
60757 +found_obj:
60758 + read_unlock(&gr_inode_lock);
60759 + /* obj returned will always be non-null */
60760 + return obj;
60761 +}
60762 +
60763 +/* returns 0 when allowing, non-zero on error
60764 + op of 0 is used for readdir, so we don't log the names of hidden files
60765 +*/
60766 +__u32
60767 +gr_handle_sysctl(const struct ctl_table *table, const int op)
60768 +{
60769 + ctl_table *tmp;
60770 + const char *proc_sys = "/proc/sys";
60771 + char *path;
60772 + struct acl_object_label *obj;
60773 + unsigned short len = 0, pos = 0, depth = 0, i;
60774 + __u32 err = 0;
60775 + __u32 mode = 0;
60776 +
60777 + if (unlikely(!(gr_status & GR_READY)))
60778 + return 0;
60779 +
60780 + /* for now, ignore operations on non-sysctl entries if it's not a
60781 + readdir*/
60782 + if (table->child != NULL && op != 0)
60783 + return 0;
60784 +
60785 + mode |= GR_FIND;
60786 + /* it's only a read if it's an entry, read on dirs is for readdir */
60787 + if (op & MAY_READ)
60788 + mode |= GR_READ;
60789 + if (op & MAY_WRITE)
60790 + mode |= GR_WRITE;
60791 +
60792 + preempt_disable();
60793 +
60794 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
60795 +
60796 + /* it's only a read/write if it's an actual entry, not a dir
60797 + (which are opened for readdir)
60798 + */
60799 +
60800 + /* convert the requested sysctl entry into a pathname */
60801 +
60802 + for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
60803 + len += strlen(tmp->procname);
60804 + len++;
60805 + depth++;
60806 + }
60807 +
60808 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
60809 + /* deny */
60810 + goto out;
60811 + }
60812 +
60813 + memset(path, 0, PAGE_SIZE);
60814 +
60815 + memcpy(path, proc_sys, strlen(proc_sys));
60816 +
60817 + pos += strlen(proc_sys);
60818 +
60819 + for (; depth > 0; depth--) {
60820 + path[pos] = '/';
60821 + pos++;
60822 + for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
60823 + if (depth == i) {
60824 + memcpy(path + pos, tmp->procname,
60825 + strlen(tmp->procname));
60826 + pos += strlen(tmp->procname);
60827 + }
60828 + i++;
60829 + }
60830 + }
60831 +
60832 + obj = gr_lookup_by_name(path, pos);
60833 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
60834 +
60835 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
60836 + ((err & mode) != mode))) {
60837 + __u32 new_mode = mode;
60838 +
60839 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
60840 +
60841 + err = 0;
60842 + gr_log_learn_sysctl(path, new_mode);
60843 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
60844 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
60845 + err = -ENOENT;
60846 + } else if (!(err & GR_FIND)) {
60847 + err = -ENOENT;
60848 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
60849 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
60850 + path, (mode & GR_READ) ? " reading" : "",
60851 + (mode & GR_WRITE) ? " writing" : "");
60852 + err = -EACCES;
60853 + } else if ((err & mode) != mode) {
60854 + err = -EACCES;
60855 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
60856 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
60857 + path, (mode & GR_READ) ? " reading" : "",
60858 + (mode & GR_WRITE) ? " writing" : "");
60859 + err = 0;
60860 + } else
60861 + err = 0;
60862 +
60863 + out:
60864 + preempt_enable();
60865 +
60866 + return err;
60867 +}
60868 +#endif
60869 +
60870 +int
60871 +gr_handle_proc_ptrace(struct task_struct *task)
60872 +{
60873 + struct file *filp;
60874 + struct task_struct *tmp = task;
60875 + struct task_struct *curtemp = current;
60876 + __u32 retmode;
60877 +
60878 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
60879 + if (unlikely(!(gr_status & GR_READY)))
60880 + return 0;
60881 +#endif
60882 +
60883 + read_lock(&tasklist_lock);
60884 + read_lock(&grsec_exec_file_lock);
60885 + filp = task->exec_file;
60886 +
60887 + while (tmp->pid > 0) {
60888 + if (tmp == curtemp)
60889 + break;
60890 + tmp = tmp->real_parent;
60891 + }
60892 +
60893 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
60894 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
60895 + read_unlock(&grsec_exec_file_lock);
60896 + read_unlock(&tasklist_lock);
60897 + return 1;
60898 + }
60899 +
60900 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60901 + if (!(gr_status & GR_READY)) {
60902 + read_unlock(&grsec_exec_file_lock);
60903 + read_unlock(&tasklist_lock);
60904 + return 0;
60905 + }
60906 +#endif
60907 +
60908 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
60909 + read_unlock(&grsec_exec_file_lock);
60910 + read_unlock(&tasklist_lock);
60911 +
60912 + if (retmode & GR_NOPTRACE)
60913 + return 1;
60914 +
60915 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
60916 + && (current->acl != task->acl || (current->acl != current->role->root_label
60917 + && current->pid != task->pid)))
60918 + return 1;
60919 +
60920 + return 0;
60921 +}
60922 +
60923 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
60924 +{
60925 + if (unlikely(!(gr_status & GR_READY)))
60926 + return;
60927 +
60928 + if (!(current->role->roletype & GR_ROLE_GOD))
60929 + return;
60930 +
60931 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
60932 + p->role->rolename, gr_task_roletype_to_char(p),
60933 + p->acl->filename);
60934 +}
60935 +
60936 +int
60937 +gr_handle_ptrace(struct task_struct *task, const long request)
60938 +{
60939 + struct task_struct *tmp = task;
60940 + struct task_struct *curtemp = current;
60941 + __u32 retmode;
60942 +
60943 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
60944 + if (unlikely(!(gr_status & GR_READY)))
60945 + return 0;
60946 +#endif
60947 +
60948 + read_lock(&tasklist_lock);
60949 + while (tmp->pid > 0) {
60950 + if (tmp == curtemp)
60951 + break;
60952 + tmp = tmp->real_parent;
60953 + }
60954 +
60955 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
60956 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
60957 + read_unlock(&tasklist_lock);
60958 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60959 + return 1;
60960 + }
60961 + read_unlock(&tasklist_lock);
60962 +
60963 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60964 + if (!(gr_status & GR_READY))
60965 + return 0;
60966 +#endif
60967 +
60968 + read_lock(&grsec_exec_file_lock);
60969 + if (unlikely(!task->exec_file)) {
60970 + read_unlock(&grsec_exec_file_lock);
60971 + return 0;
60972 + }
60973 +
60974 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
60975 + read_unlock(&grsec_exec_file_lock);
60976 +
60977 + if (retmode & GR_NOPTRACE) {
60978 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60979 + return 1;
60980 + }
60981 +
60982 + if (retmode & GR_PTRACERD) {
60983 + switch (request) {
60984 + case PTRACE_POKETEXT:
60985 + case PTRACE_POKEDATA:
60986 + case PTRACE_POKEUSR:
60987 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
60988 + case PTRACE_SETREGS:
60989 + case PTRACE_SETFPREGS:
60990 +#endif
60991 +#ifdef CONFIG_X86
60992 + case PTRACE_SETFPXREGS:
60993 +#endif
60994 +#ifdef CONFIG_ALTIVEC
60995 + case PTRACE_SETVRREGS:
60996 +#endif
60997 + return 1;
60998 + default:
60999 + return 0;
61000 + }
61001 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
61002 + !(current->role->roletype & GR_ROLE_GOD) &&
61003 + (current->acl != task->acl)) {
61004 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
61005 + return 1;
61006 + }
61007 +
61008 + return 0;
61009 +}
61010 +
61011 +static int is_writable_mmap(const struct file *filp)
61012 +{
61013 + struct task_struct *task = current;
61014 + struct acl_object_label *obj, *obj2;
61015 +
61016 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
61017 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
61018 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
61019 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
61020 + task->role->root_label);
61021 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
61022 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
61023 + return 1;
61024 + }
61025 + }
61026 + return 0;
61027 +}
61028 +
61029 +int
61030 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
61031 +{
61032 + __u32 mode;
61033 +
61034 + if (unlikely(!file || !(prot & PROT_EXEC)))
61035 + return 1;
61036 +
61037 + if (is_writable_mmap(file))
61038 + return 0;
61039 +
61040 + mode =
61041 + gr_search_file(file->f_path.dentry,
61042 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
61043 + file->f_path.mnt);
61044 +
61045 + if (!gr_tpe_allow(file))
61046 + return 0;
61047 +
61048 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
61049 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
61050 + return 0;
61051 + } else if (unlikely(!(mode & GR_EXEC))) {
61052 + return 0;
61053 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
61054 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
61055 + return 1;
61056 + }
61057 +
61058 + return 1;
61059 +}
61060 +
61061 +int
61062 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
61063 +{
61064 + __u32 mode;
61065 +
61066 + if (unlikely(!file || !(prot & PROT_EXEC)))
61067 + return 1;
61068 +
61069 + if (is_writable_mmap(file))
61070 + return 0;
61071 +
61072 + mode =
61073 + gr_search_file(file->f_path.dentry,
61074 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
61075 + file->f_path.mnt);
61076 +
61077 + if (!gr_tpe_allow(file))
61078 + return 0;
61079 +
61080 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
61081 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
61082 + return 0;
61083 + } else if (unlikely(!(mode & GR_EXEC))) {
61084 + return 0;
61085 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
61086 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
61087 + return 1;
61088 + }
61089 +
61090 + return 1;
61091 +}
61092 +
61093 +void
61094 +gr_acl_handle_psacct(struct task_struct *task, const long code)
61095 +{
61096 + unsigned long runtime;
61097 + unsigned long cputime;
61098 + unsigned int wday, cday;
61099 + __u8 whr, chr;
61100 + __u8 wmin, cmin;
61101 + __u8 wsec, csec;
61102 + struct timespec timeval;
61103 +
61104 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
61105 + !(task->acl->mode & GR_PROCACCT)))
61106 + return;
61107 +
61108 + do_posix_clock_monotonic_gettime(&timeval);
61109 + runtime = timeval.tv_sec - task->start_time.tv_sec;
61110 + wday = runtime / (3600 * 24);
61111 + runtime -= wday * (3600 * 24);
61112 + whr = runtime / 3600;
61113 + runtime -= whr * 3600;
61114 + wmin = runtime / 60;
61115 + runtime -= wmin * 60;
61116 + wsec = runtime;
61117 +
61118 + cputime = (task->utime + task->stime) / HZ;
61119 + cday = cputime / (3600 * 24);
61120 + cputime -= cday * (3600 * 24);
61121 + chr = cputime / 3600;
61122 + cputime -= chr * 3600;
61123 + cmin = cputime / 60;
61124 + cputime -= cmin * 60;
61125 + csec = cputime;
61126 +
61127 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
61128 +
61129 + return;
61130 +}
61131 +
61132 +void gr_set_kernel_label(struct task_struct *task)
61133 +{
61134 + if (gr_status & GR_READY) {
61135 + task->role = kernel_role;
61136 + task->acl = kernel_role->root_label;
61137 + }
61138 + return;
61139 +}
61140 +
61141 +#ifdef CONFIG_TASKSTATS
61142 +int gr_is_taskstats_denied(int pid)
61143 +{
61144 + struct task_struct *task;
61145 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61146 + const struct cred *cred;
61147 +#endif
61148 + int ret = 0;
61149 +
61150 + /* restrict taskstats viewing to un-chrooted root users
61151 + who have the 'view' subject flag if the RBAC system is enabled
61152 + */
61153 +
61154 + rcu_read_lock();
61155 + read_lock(&tasklist_lock);
61156 + task = find_task_by_vpid(pid);
61157 + if (task) {
61158 +#ifdef CONFIG_GRKERNSEC_CHROOT
61159 + if (proc_is_chrooted(task))
61160 + ret = -EACCES;
61161 +#endif
61162 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61163 + cred = __task_cred(task);
61164 +#ifdef CONFIG_GRKERNSEC_PROC_USER
61165 + if (cred->uid != 0)
61166 + ret = -EACCES;
61167 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61168 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
61169 + ret = -EACCES;
61170 +#endif
61171 +#endif
61172 + if (gr_status & GR_READY) {
61173 + if (!(task->acl->mode & GR_VIEW))
61174 + ret = -EACCES;
61175 + }
61176 + } else
61177 + ret = -ENOENT;
61178 +
61179 + read_unlock(&tasklist_lock);
61180 + rcu_read_unlock();
61181 +
61182 + return ret;
61183 +}
61184 +#endif
61185 +
61186 +/* AUXV entries are filled via a descendant of search_binary_handler
61187 + after we've already applied the subject for the target
61188 +*/
61189 +int gr_acl_enable_at_secure(void)
61190 +{
61191 + if (unlikely(!(gr_status & GR_READY)))
61192 + return 0;
61193 +
61194 + if (current->acl->mode & GR_ATSECURE)
61195 + return 1;
61196 +
61197 + return 0;
61198 +}
61199 +
61200 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
61201 +{
61202 + struct task_struct *task = current;
61203 + struct dentry *dentry = file->f_path.dentry;
61204 + struct vfsmount *mnt = file->f_path.mnt;
61205 + struct acl_object_label *obj, *tmp;
61206 + struct acl_subject_label *subj;
61207 + unsigned int bufsize;
61208 + int is_not_root;
61209 + char *path;
61210 + dev_t dev = __get_dev(dentry);
61211 +
61212 + if (unlikely(!(gr_status & GR_READY)))
61213 + return 1;
61214 +
61215 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
61216 + return 1;
61217 +
61218 + /* ignore Eric Biederman */
61219 + if (IS_PRIVATE(dentry->d_inode))
61220 + return 1;
61221 +
61222 + subj = task->acl;
61223 + do {
61224 + obj = lookup_acl_obj_label(ino, dev, subj);
61225 + if (obj != NULL)
61226 + return (obj->mode & GR_FIND) ? 1 : 0;
61227 + } while ((subj = subj->parent_subject));
61228 +
61229 + /* this is purely an optimization since we're looking for an object
61230 + for the directory we're doing a readdir on
61231 + if it's possible for any globbed object to match the entry we're
61232 + filling into the directory, then the object we find here will be
61233 + an anchor point with attached globbed objects
61234 + */
61235 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
61236 + if (obj->globbed == NULL)
61237 + return (obj->mode & GR_FIND) ? 1 : 0;
61238 +
61239 + is_not_root = ((obj->filename[0] == '/') &&
61240 + (obj->filename[1] == '\0')) ? 0 : 1;
61241 + bufsize = PAGE_SIZE - namelen - is_not_root;
61242 +
61243 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
61244 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
61245 + return 1;
61246 +
61247 + preempt_disable();
61248 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
61249 + bufsize);
61250 +
61251 + bufsize = strlen(path);
61252 +
61253 + /* if base is "/", don't append an additional slash */
61254 + if (is_not_root)
61255 + *(path + bufsize) = '/';
61256 + memcpy(path + bufsize + is_not_root, name, namelen);
61257 + *(path + bufsize + namelen + is_not_root) = '\0';
61258 +
61259 + tmp = obj->globbed;
61260 + while (tmp) {
61261 + if (!glob_match(tmp->filename, path)) {
61262 + preempt_enable();
61263 + return (tmp->mode & GR_FIND) ? 1 : 0;
61264 + }
61265 + tmp = tmp->next;
61266 + }
61267 + preempt_enable();
61268 + return (obj->mode & GR_FIND) ? 1 : 0;
61269 +}
61270 +
61271 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
61272 +EXPORT_SYMBOL(gr_acl_is_enabled);
61273 +#endif
61274 +EXPORT_SYMBOL(gr_learn_resource);
61275 +EXPORT_SYMBOL(gr_set_kernel_label);
61276 +#ifdef CONFIG_SECURITY
61277 +EXPORT_SYMBOL(gr_check_user_change);
61278 +EXPORT_SYMBOL(gr_check_group_change);
61279 +#endif
61280 +
61281 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
61282 new file mode 100644
61283 index 0000000..34fefda
61284 --- /dev/null
61285 +++ b/grsecurity/gracl_alloc.c
61286 @@ -0,0 +1,105 @@
61287 +#include <linux/kernel.h>
61288 +#include <linux/mm.h>
61289 +#include <linux/slab.h>
61290 +#include <linux/vmalloc.h>
61291 +#include <linux/gracl.h>
61292 +#include <linux/grsecurity.h>
61293 +
61294 +static unsigned long alloc_stack_next = 1;
61295 +static unsigned long alloc_stack_size = 1;
61296 +static void **alloc_stack;
61297 +
61298 +static __inline__ int
61299 +alloc_pop(void)
61300 +{
61301 + if (alloc_stack_next == 1)
61302 + return 0;
61303 +
61304 + kfree(alloc_stack[alloc_stack_next - 2]);
61305 +
61306 + alloc_stack_next--;
61307 +
61308 + return 1;
61309 +}
61310 +
61311 +static __inline__ int
61312 +alloc_push(void *buf)
61313 +{
61314 + if (alloc_stack_next >= alloc_stack_size)
61315 + return 1;
61316 +
61317 + alloc_stack[alloc_stack_next - 1] = buf;
61318 +
61319 + alloc_stack_next++;
61320 +
61321 + return 0;
61322 +}
61323 +
61324 +void *
61325 +acl_alloc(unsigned long len)
61326 +{
61327 + void *ret = NULL;
61328 +
61329 + if (!len || len > PAGE_SIZE)
61330 + goto out;
61331 +
61332 + ret = kmalloc(len, GFP_KERNEL);
61333 +
61334 + if (ret) {
61335 + if (alloc_push(ret)) {
61336 + kfree(ret);
61337 + ret = NULL;
61338 + }
61339 + }
61340 +
61341 +out:
61342 + return ret;
61343 +}
61344 +
61345 +void *
61346 +acl_alloc_num(unsigned long num, unsigned long len)
61347 +{
61348 + if (!len || (num > (PAGE_SIZE / len)))
61349 + return NULL;
61350 +
61351 + return acl_alloc(num * len);
61352 +}
61353 +
61354 +void
61355 +acl_free_all(void)
61356 +{
61357 + if (gr_acl_is_enabled() || !alloc_stack)
61358 + return;
61359 +
61360 + while (alloc_pop()) ;
61361 +
61362 + if (alloc_stack) {
61363 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
61364 + kfree(alloc_stack);
61365 + else
61366 + vfree(alloc_stack);
61367 + }
61368 +
61369 + alloc_stack = NULL;
61370 + alloc_stack_size = 1;
61371 + alloc_stack_next = 1;
61372 +
61373 + return;
61374 +}
61375 +
61376 +int
61377 +acl_alloc_stack_init(unsigned long size)
61378 +{
61379 + if ((size * sizeof (void *)) <= PAGE_SIZE)
61380 + alloc_stack =
61381 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
61382 + else
61383 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
61384 +
61385 + alloc_stack_size = size;
61386 +
61387 + if (!alloc_stack)
61388 + return 0;
61389 + else
61390 + return 1;
61391 +}
61392 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
61393 new file mode 100644
61394 index 0000000..955ddfb
61395 --- /dev/null
61396 +++ b/grsecurity/gracl_cap.c
61397 @@ -0,0 +1,101 @@
61398 +#include <linux/kernel.h>
61399 +#include <linux/module.h>
61400 +#include <linux/sched.h>
61401 +#include <linux/gracl.h>
61402 +#include <linux/grsecurity.h>
61403 +#include <linux/grinternal.h>
61404 +
61405 +extern const char *captab_log[];
61406 +extern int captab_log_entries;
61407 +
61408 +int
61409 +gr_acl_is_capable(const int cap)
61410 +{
61411 + struct task_struct *task = current;
61412 + const struct cred *cred = current_cred();
61413 + struct acl_subject_label *curracl;
61414 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
61415 + kernel_cap_t cap_audit = __cap_empty_set;
61416 +
61417 + if (!gr_acl_is_enabled())
61418 + return 1;
61419 +
61420 + curracl = task->acl;
61421 +
61422 + cap_drop = curracl->cap_lower;
61423 + cap_mask = curracl->cap_mask;
61424 + cap_audit = curracl->cap_invert_audit;
61425 +
61426 + while ((curracl = curracl->parent_subject)) {
61427 + /* if the cap isn't specified in the current computed mask but is specified in the
61428 + current level subject, and is lowered in the current level subject, then add
61429 + it to the set of dropped capabilities
61430 + otherwise, add the current level subject's mask to the current computed mask
61431 + */
61432 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
61433 + cap_raise(cap_mask, cap);
61434 + if (cap_raised(curracl->cap_lower, cap))
61435 + cap_raise(cap_drop, cap);
61436 + if (cap_raised(curracl->cap_invert_audit, cap))
61437 + cap_raise(cap_audit, cap);
61438 + }
61439 + }
61440 +
61441 + if (!cap_raised(cap_drop, cap)) {
61442 + if (cap_raised(cap_audit, cap))
61443 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
61444 + return 1;
61445 + }
61446 +
61447 + curracl = task->acl;
61448 +
61449 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
61450 + && cap_raised(cred->cap_effective, cap)) {
61451 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
61452 + task->role->roletype, cred->uid,
61453 + cred->gid, task->exec_file ?
61454 + gr_to_filename(task->exec_file->f_path.dentry,
61455 + task->exec_file->f_path.mnt) : curracl->filename,
61456 + curracl->filename, 0UL,
61457 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
61458 + return 1;
61459 + }
61460 +
61461 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
61462 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
61463 + return 0;
61464 +}
61465 +
61466 +int
61467 +gr_acl_is_capable_nolog(const int cap)
61468 +{
61469 + struct acl_subject_label *curracl;
61470 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
61471 +
61472 + if (!gr_acl_is_enabled())
61473 + return 1;
61474 +
61475 + curracl = current->acl;
61476 +
61477 + cap_drop = curracl->cap_lower;
61478 + cap_mask = curracl->cap_mask;
61479 +
61480 + while ((curracl = curracl->parent_subject)) {
61481 + /* if the cap isn't specified in the current computed mask but is specified in the
61482 + current level subject, and is lowered in the current level subject, then add
61483 + it to the set of dropped capabilities
61484 + otherwise, add the current level subject's mask to the current computed mask
61485 + */
61486 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
61487 + cap_raise(cap_mask, cap);
61488 + if (cap_raised(curracl->cap_lower, cap))
61489 + cap_raise(cap_drop, cap);
61490 + }
61491 + }
61492 +
61493 + if (!cap_raised(cap_drop, cap))
61494 + return 1;
61495 +
61496 + return 0;
61497 +}
61498 +
61499 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
61500 new file mode 100644
61501 index 0000000..523e7e8
61502 --- /dev/null
61503 +++ b/grsecurity/gracl_fs.c
61504 @@ -0,0 +1,435 @@
61505 +#include <linux/kernel.h>
61506 +#include <linux/sched.h>
61507 +#include <linux/types.h>
61508 +#include <linux/fs.h>
61509 +#include <linux/file.h>
61510 +#include <linux/stat.h>
61511 +#include <linux/grsecurity.h>
61512 +#include <linux/grinternal.h>
61513 +#include <linux/gracl.h>
61514 +
61515 +umode_t
61516 +gr_acl_umask(void)
61517 +{
61518 + if (unlikely(!gr_acl_is_enabled()))
61519 + return 0;
61520 +
61521 + return current->role->umask;
61522 +}
61523 +
61524 +__u32
61525 +gr_acl_handle_hidden_file(const struct dentry * dentry,
61526 + const struct vfsmount * mnt)
61527 +{
61528 + __u32 mode;
61529 +
61530 + if (unlikely(!dentry->d_inode))
61531 + return GR_FIND;
61532 +
61533 + mode =
61534 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
61535 +
61536 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
61537 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
61538 + return mode;
61539 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
61540 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
61541 + return 0;
61542 + } else if (unlikely(!(mode & GR_FIND)))
61543 + return 0;
61544 +
61545 + return GR_FIND;
61546 +}
61547 +
61548 +__u32
61549 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
61550 + int acc_mode)
61551 +{
61552 + __u32 reqmode = GR_FIND;
61553 + __u32 mode;
61554 +
61555 + if (unlikely(!dentry->d_inode))
61556 + return reqmode;
61557 +
61558 + if (acc_mode & MAY_APPEND)
61559 + reqmode |= GR_APPEND;
61560 + else if (acc_mode & MAY_WRITE)
61561 + reqmode |= GR_WRITE;
61562 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
61563 + reqmode |= GR_READ;
61564 +
61565 + mode =
61566 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
61567 + mnt);
61568 +
61569 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
61570 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
61571 + reqmode & GR_READ ? " reading" : "",
61572 + reqmode & GR_WRITE ? " writing" : reqmode &
61573 + GR_APPEND ? " appending" : "");
61574 + return reqmode;
61575 + } else
61576 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
61577 + {
61578 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
61579 + reqmode & GR_READ ? " reading" : "",
61580 + reqmode & GR_WRITE ? " writing" : reqmode &
61581 + GR_APPEND ? " appending" : "");
61582 + return 0;
61583 + } else if (unlikely((mode & reqmode) != reqmode))
61584 + return 0;
61585 +
61586 + return reqmode;
61587 +}
61588 +
61589 +__u32
61590 +gr_acl_handle_creat(const struct dentry * dentry,
61591 + const struct dentry * p_dentry,
61592 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
61593 + const int imode)
61594 +{
61595 + __u32 reqmode = GR_WRITE | GR_CREATE;
61596 + __u32 mode;
61597 +
61598 + if (acc_mode & MAY_APPEND)
61599 + reqmode |= GR_APPEND;
61600 + // if a directory was required or the directory already exists, then
61601 + // don't count this open as a read
61602 + if ((acc_mode & MAY_READ) &&
61603 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
61604 + reqmode |= GR_READ;
61605 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
61606 + reqmode |= GR_SETID;
61607 +
61608 + mode =
61609 + gr_check_create(dentry, p_dentry, p_mnt,
61610 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
61611 +
61612 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
61613 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
61614 + reqmode & GR_READ ? " reading" : "",
61615 + reqmode & GR_WRITE ? " writing" : reqmode &
61616 + GR_APPEND ? " appending" : "");
61617 + return reqmode;
61618 + } else
61619 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
61620 + {
61621 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
61622 + reqmode & GR_READ ? " reading" : "",
61623 + reqmode & GR_WRITE ? " writing" : reqmode &
61624 + GR_APPEND ? " appending" : "");
61625 + return 0;
61626 + } else if (unlikely((mode & reqmode) != reqmode))
61627 + return 0;
61628 +
61629 + return reqmode;
61630 +}
61631 +
61632 +__u32
61633 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
61634 + const int fmode)
61635 +{
61636 + __u32 mode, reqmode = GR_FIND;
61637 +
61638 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
61639 + reqmode |= GR_EXEC;
61640 + if (fmode & S_IWOTH)
61641 + reqmode |= GR_WRITE;
61642 + if (fmode & S_IROTH)
61643 + reqmode |= GR_READ;
61644 +
61645 + mode =
61646 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
61647 + mnt);
61648 +
61649 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
61650 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
61651 + reqmode & GR_READ ? " reading" : "",
61652 + reqmode & GR_WRITE ? " writing" : "",
61653 + reqmode & GR_EXEC ? " executing" : "");
61654 + return reqmode;
61655 + } else
61656 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
61657 + {
61658 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
61659 + reqmode & GR_READ ? " reading" : "",
61660 + reqmode & GR_WRITE ? " writing" : "",
61661 + reqmode & GR_EXEC ? " executing" : "");
61662 + return 0;
61663 + } else if (unlikely((mode & reqmode) != reqmode))
61664 + return 0;
61665 +
61666 + return reqmode;
61667 +}
61668 +
61669 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
61670 +{
61671 + __u32 mode;
61672 +
61673 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
61674 +
61675 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
61676 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
61677 + return mode;
61678 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
61679 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
61680 + return 0;
61681 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
61682 + return 0;
61683 +
61684 + return (reqmode);
61685 +}
61686 +
61687 +__u32
61688 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
61689 +{
61690 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
61691 +}
61692 +
61693 +__u32
61694 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
61695 +{
61696 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
61697 +}
61698 +
61699 +__u32
61700 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
61701 +{
61702 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
61703 +}
61704 +
61705 +__u32
61706 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
61707 +{
61708 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
61709 +}
61710 +
61711 +__u32
61712 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
61713 + umode_t *modeptr)
61714 +{
61715 + mode_t mode;
61716 +
61717 + *modeptr &= ~(mode_t)gr_acl_umask();
61718 + mode = *modeptr;
61719 +
61720 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
61721 + return 1;
61722 +
61723 + if (unlikely(mode & (S_ISUID | S_ISGID))) {
61724 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
61725 + GR_CHMOD_ACL_MSG);
61726 + } else {
61727 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
61728 + }
61729 +}
61730 +
61731 +__u32
61732 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
61733 +{
61734 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
61735 +}
61736 +
61737 +__u32
61738 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
61739 +{
61740 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
61741 +}
61742 +
61743 +__u32
61744 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
61745 +{
61746 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
61747 +}
61748 +
61749 +__u32
61750 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
61751 +{
61752 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
61753 + GR_UNIXCONNECT_ACL_MSG);
61754 +}
61755 +
61756 +/* hardlinks require at minimum create and link permission,
61757 + any additional privilege required is based on the
61758 + privilege of the file being linked to
61759 +*/
61760 +__u32
61761 +gr_acl_handle_link(const struct dentry * new_dentry,
61762 + const struct dentry * parent_dentry,
61763 + const struct vfsmount * parent_mnt,
61764 + const struct dentry * old_dentry,
61765 + const struct vfsmount * old_mnt, const char *to)
61766 +{
61767 + __u32 mode;
61768 + __u32 needmode = GR_CREATE | GR_LINK;
61769 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
61770 +
61771 + mode =
61772 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
61773 + old_mnt);
61774 +
61775 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
61776 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
61777 + return mode;
61778 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
61779 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
61780 + return 0;
61781 + } else if (unlikely((mode & needmode) != needmode))
61782 + return 0;
61783 +
61784 + return 1;
61785 +}
61786 +
61787 +__u32
61788 +gr_acl_handle_symlink(const struct dentry * new_dentry,
61789 + const struct dentry * parent_dentry,
61790 + const struct vfsmount * parent_mnt, const char *from)
61791 +{
61792 + __u32 needmode = GR_WRITE | GR_CREATE;
61793 + __u32 mode;
61794 +
61795 + mode =
61796 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
61797 + GR_CREATE | GR_AUDIT_CREATE |
61798 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
61799 +
61800 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
61801 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
61802 + return mode;
61803 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
61804 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
61805 + return 0;
61806 + } else if (unlikely((mode & needmode) != needmode))
61807 + return 0;
61808 +
61809 + return (GR_WRITE | GR_CREATE);
61810 +}
61811 +
61812 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
61813 +{
61814 + __u32 mode;
61815 +
61816 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
61817 +
61818 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
61819 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
61820 + return mode;
61821 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
61822 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
61823 + return 0;
61824 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
61825 + return 0;
61826 +
61827 + return (reqmode);
61828 +}
61829 +
61830 +__u32
61831 +gr_acl_handle_mknod(const struct dentry * new_dentry,
61832 + const struct dentry * parent_dentry,
61833 + const struct vfsmount * parent_mnt,
61834 + const int mode)
61835 +{
61836 + __u32 reqmode = GR_WRITE | GR_CREATE;
61837 + if (unlikely(mode & (S_ISUID | S_ISGID)))
61838 + reqmode |= GR_SETID;
61839 +
61840 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
61841 + reqmode, GR_MKNOD_ACL_MSG);
61842 +}
61843 +
61844 +__u32
61845 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
61846 + const struct dentry *parent_dentry,
61847 + const struct vfsmount *parent_mnt)
61848 +{
61849 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
61850 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
61851 +}
61852 +
61853 +#define RENAME_CHECK_SUCCESS(old, new) \
61854 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
61855 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
61856 +
61857 +int
61858 +gr_acl_handle_rename(struct dentry *new_dentry,
61859 + struct dentry *parent_dentry,
61860 + const struct vfsmount *parent_mnt,
61861 + struct dentry *old_dentry,
61862 + struct inode *old_parent_inode,
61863 + struct vfsmount *old_mnt, const char *newname)
61864 +{
61865 + __u32 comp1, comp2;
61866 + int error = 0;
61867 +
61868 + if (unlikely(!gr_acl_is_enabled()))
61869 + return 0;
61870 +
61871 + if (!new_dentry->d_inode) {
61872 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
61873 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
61874 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
61875 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
61876 + GR_DELETE | GR_AUDIT_DELETE |
61877 + GR_AUDIT_READ | GR_AUDIT_WRITE |
61878 + GR_SUPPRESS, old_mnt);
61879 + } else {
61880 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
61881 + GR_CREATE | GR_DELETE |
61882 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
61883 + GR_AUDIT_READ | GR_AUDIT_WRITE |
61884 + GR_SUPPRESS, parent_mnt);
61885 + comp2 =
61886 + gr_search_file(old_dentry,
61887 + GR_READ | GR_WRITE | GR_AUDIT_READ |
61888 + GR_DELETE | GR_AUDIT_DELETE |
61889 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
61890 + }
61891 +
61892 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
61893 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
61894 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
61895 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
61896 + && !(comp2 & GR_SUPPRESS)) {
61897 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
61898 + error = -EACCES;
61899 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
61900 + error = -EACCES;
61901 +
61902 + return error;
61903 +}
61904 +
61905 +void
61906 +gr_acl_handle_exit(void)
61907 +{
61908 + u16 id;
61909 + char *rolename;
61910 + struct file *exec_file;
61911 +
61912 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
61913 + !(current->role->roletype & GR_ROLE_PERSIST))) {
61914 + id = current->acl_role_id;
61915 + rolename = current->role->rolename;
61916 + gr_set_acls(1);
61917 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
61918 + }
61919 +
61920 + write_lock(&grsec_exec_file_lock);
61921 + exec_file = current->exec_file;
61922 + current->exec_file = NULL;
61923 + write_unlock(&grsec_exec_file_lock);
61924 +
61925 + if (exec_file)
61926 + fput(exec_file);
61927 +}
61928 +
61929 +int
61930 +gr_acl_handle_procpidmem(const struct task_struct *task)
61931 +{
61932 + if (unlikely(!gr_acl_is_enabled()))
61933 + return 0;
61934 +
61935 + if (task != current && task->acl->mode & GR_PROTPROCFD)
61936 + return -EACCES;
61937 +
61938 + return 0;
61939 +}
61940 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
61941 new file mode 100644
61942 index 0000000..cd07b96
61943 --- /dev/null
61944 +++ b/grsecurity/gracl_ip.c
61945 @@ -0,0 +1,382 @@
61946 +#include <linux/kernel.h>
61947 +#include <asm/uaccess.h>
61948 +#include <asm/errno.h>
61949 +#include <net/sock.h>
61950 +#include <linux/file.h>
61951 +#include <linux/fs.h>
61952 +#include <linux/net.h>
61953 +#include <linux/in.h>
61954 +#include <linux/skbuff.h>
61955 +#include <linux/ip.h>
61956 +#include <linux/udp.h>
61957 +#include <linux/smp_lock.h>
61958 +#include <linux/types.h>
61959 +#include <linux/sched.h>
61960 +#include <linux/netdevice.h>
61961 +#include <linux/inetdevice.h>
61962 +#include <linux/gracl.h>
61963 +#include <linux/grsecurity.h>
61964 +#include <linux/grinternal.h>
61965 +
61966 +#define GR_BIND 0x01
61967 +#define GR_CONNECT 0x02
61968 +#define GR_INVERT 0x04
61969 +#define GR_BINDOVERRIDE 0x08
61970 +#define GR_CONNECTOVERRIDE 0x10
61971 +#define GR_SOCK_FAMILY 0x20
61972 +
61973 +static const char * gr_protocols[IPPROTO_MAX] = {
61974 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
61975 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
61976 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
61977 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
61978 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
61979 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
61980 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
61981 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
61982 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
61983 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
61984 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
61985 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
61986 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
61987 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
61988 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
61989 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
61990 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
61991 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
61992 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
61993 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
61994 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
61995 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
61996 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
61997 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
61998 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
61999 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
62000 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
62001 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
62002 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
62003 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
62004 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
62005 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
62006 + };
62007 +
62008 +static const char * gr_socktypes[SOCK_MAX] = {
62009 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
62010 + "unknown:7", "unknown:8", "unknown:9", "packet"
62011 + };
62012 +
62013 +static const char * gr_sockfamilies[AF_MAX+1] = {
62014 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
62015 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
62016 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
62017 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
62018 + };
62019 +
62020 +const char *
62021 +gr_proto_to_name(unsigned char proto)
62022 +{
62023 + return gr_protocols[proto];
62024 +}
62025 +
62026 +const char *
62027 +gr_socktype_to_name(unsigned char type)
62028 +{
62029 + return gr_socktypes[type];
62030 +}
62031 +
62032 +const char *
62033 +gr_sockfamily_to_name(unsigned char family)
62034 +{
62035 + return gr_sockfamilies[family];
62036 +}
62037 +
62038 +int
62039 +gr_search_socket(const int domain, const int type, const int protocol)
62040 +{
62041 + struct acl_subject_label *curr;
62042 + const struct cred *cred = current_cred();
62043 +
62044 + if (unlikely(!gr_acl_is_enabled()))
62045 + goto exit;
62046 +
62047 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
62048 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
62049 + goto exit; // let the kernel handle it
62050 +
62051 + curr = current->acl;
62052 +
62053 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
62054 + /* the family is allowed, if this is PF_INET allow it only if
62055 + the extra sock type/protocol checks pass */
62056 + if (domain == PF_INET)
62057 + goto inet_check;
62058 + goto exit;
62059 + } else {
62060 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
62061 + __u32 fakeip = 0;
62062 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
62063 + current->role->roletype, cred->uid,
62064 + cred->gid, current->exec_file ?
62065 + gr_to_filename(current->exec_file->f_path.dentry,
62066 + current->exec_file->f_path.mnt) :
62067 + curr->filename, curr->filename,
62068 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
62069 + &current->signal->saved_ip);
62070 + goto exit;
62071 + }
62072 + goto exit_fail;
62073 + }
62074 +
62075 +inet_check:
62076 + /* the rest of this checking is for IPv4 only */
62077 + if (!curr->ips)
62078 + goto exit;
62079 +
62080 + if ((curr->ip_type & (1 << type)) &&
62081 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
62082 + goto exit;
62083 +
62084 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
62085 + /* we don't place acls on raw sockets , and sometimes
62086 + dgram/ip sockets are opened for ioctl and not
62087 + bind/connect, so we'll fake a bind learn log */
62088 + if (type == SOCK_RAW || type == SOCK_PACKET) {
62089 + __u32 fakeip = 0;
62090 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
62091 + current->role->roletype, cred->uid,
62092 + cred->gid, current->exec_file ?
62093 + gr_to_filename(current->exec_file->f_path.dentry,
62094 + current->exec_file->f_path.mnt) :
62095 + curr->filename, curr->filename,
62096 + &fakeip, 0, type,
62097 + protocol, GR_CONNECT, &current->signal->saved_ip);
62098 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
62099 + __u32 fakeip = 0;
62100 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
62101 + current->role->roletype, cred->uid,
62102 + cred->gid, current->exec_file ?
62103 + gr_to_filename(current->exec_file->f_path.dentry,
62104 + current->exec_file->f_path.mnt) :
62105 + curr->filename, curr->filename,
62106 + &fakeip, 0, type,
62107 + protocol, GR_BIND, &current->signal->saved_ip);
62108 + }
62109 + /* we'll log when they use connect or bind */
62110 + goto exit;
62111 + }
62112 +
62113 +exit_fail:
62114 + if (domain == PF_INET)
62115 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
62116 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
62117 + else
62118 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
62119 + gr_socktype_to_name(type), protocol);
62120 +
62121 + return 0;
62122 +exit:
62123 + return 1;
62124 +}
62125 +
62126 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
62127 +{
62128 + if ((ip->mode & mode) &&
62129 + (ip_port >= ip->low) &&
62130 + (ip_port <= ip->high) &&
62131 + ((ntohl(ip_addr) & our_netmask) ==
62132 + (ntohl(our_addr) & our_netmask))
62133 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
62134 + && (ip->type & (1 << type))) {
62135 + if (ip->mode & GR_INVERT)
62136 + return 2; // specifically denied
62137 + else
62138 + return 1; // allowed
62139 + }
62140 +
62141 + return 0; // not specifically allowed, may continue parsing
62142 +}
62143 +
62144 +static int
62145 +gr_search_connectbind(const int full_mode, struct sock *sk,
62146 + struct sockaddr_in *addr, const int type)
62147 +{
62148 + char iface[IFNAMSIZ] = {0};
62149 + struct acl_subject_label *curr;
62150 + struct acl_ip_label *ip;
62151 + struct inet_sock *isk;
62152 + struct net_device *dev;
62153 + struct in_device *idev;
62154 + unsigned long i;
62155 + int ret;
62156 + int mode = full_mode & (GR_BIND | GR_CONNECT);
62157 + __u32 ip_addr = 0;
62158 + __u32 our_addr;
62159 + __u32 our_netmask;
62160 + char *p;
62161 + __u16 ip_port = 0;
62162 + const struct cred *cred = current_cred();
62163 +
62164 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
62165 + return 0;
62166 +
62167 + curr = current->acl;
62168 + isk = inet_sk(sk);
62169 +
62170 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
62171 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
62172 + addr->sin_addr.s_addr = curr->inaddr_any_override;
62173 + if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
62174 + struct sockaddr_in saddr;
62175 + int err;
62176 +
62177 + saddr.sin_family = AF_INET;
62178 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
62179 + saddr.sin_port = isk->sport;
62180 +
62181 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
62182 + if (err)
62183 + return err;
62184 +
62185 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
62186 + if (err)
62187 + return err;
62188 + }
62189 +
62190 + if (!curr->ips)
62191 + return 0;
62192 +
62193 + ip_addr = addr->sin_addr.s_addr;
62194 + ip_port = ntohs(addr->sin_port);
62195 +
62196 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
62197 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
62198 + current->role->roletype, cred->uid,
62199 + cred->gid, current->exec_file ?
62200 + gr_to_filename(current->exec_file->f_path.dentry,
62201 + current->exec_file->f_path.mnt) :
62202 + curr->filename, curr->filename,
62203 + &ip_addr, ip_port, type,
62204 + sk->sk_protocol, mode, &current->signal->saved_ip);
62205 + return 0;
62206 + }
62207 +
62208 + for (i = 0; i < curr->ip_num; i++) {
62209 + ip = *(curr->ips + i);
62210 + if (ip->iface != NULL) {
62211 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
62212 + p = strchr(iface, ':');
62213 + if (p != NULL)
62214 + *p = '\0';
62215 + dev = dev_get_by_name(sock_net(sk), iface);
62216 + if (dev == NULL)
62217 + continue;
62218 + idev = in_dev_get(dev);
62219 + if (idev == NULL) {
62220 + dev_put(dev);
62221 + continue;
62222 + }
62223 + rcu_read_lock();
62224 + for_ifa(idev) {
62225 + if (!strcmp(ip->iface, ifa->ifa_label)) {
62226 + our_addr = ifa->ifa_address;
62227 + our_netmask = 0xffffffff;
62228 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
62229 + if (ret == 1) {
62230 + rcu_read_unlock();
62231 + in_dev_put(idev);
62232 + dev_put(dev);
62233 + return 0;
62234 + } else if (ret == 2) {
62235 + rcu_read_unlock();
62236 + in_dev_put(idev);
62237 + dev_put(dev);
62238 + goto denied;
62239 + }
62240 + }
62241 + } endfor_ifa(idev);
62242 + rcu_read_unlock();
62243 + in_dev_put(idev);
62244 + dev_put(dev);
62245 + } else {
62246 + our_addr = ip->addr;
62247 + our_netmask = ip->netmask;
62248 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
62249 + if (ret == 1)
62250 + return 0;
62251 + else if (ret == 2)
62252 + goto denied;
62253 + }
62254 + }
62255 +
62256 +denied:
62257 + if (mode == GR_BIND)
62258 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
62259 + else if (mode == GR_CONNECT)
62260 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
62261 +
62262 + return -EACCES;
62263 +}
62264 +
62265 +int
62266 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
62267 +{
62268 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
62269 +}
62270 +
62271 +int
62272 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
62273 +{
62274 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
62275 +}
62276 +
62277 +int gr_search_listen(struct socket *sock)
62278 +{
62279 + struct sock *sk = sock->sk;
62280 + struct sockaddr_in addr;
62281 +
62282 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
62283 + addr.sin_port = inet_sk(sk)->sport;
62284 +
62285 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
62286 +}
62287 +
62288 +int gr_search_accept(struct socket *sock)
62289 +{
62290 + struct sock *sk = sock->sk;
62291 + struct sockaddr_in addr;
62292 +
62293 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
62294 + addr.sin_port = inet_sk(sk)->sport;
62295 +
62296 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
62297 +}
62298 +
62299 +int
62300 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
62301 +{
62302 + if (addr)
62303 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
62304 + else {
62305 + struct sockaddr_in sin;
62306 + const struct inet_sock *inet = inet_sk(sk);
62307 +
62308 + sin.sin_addr.s_addr = inet->daddr;
62309 + sin.sin_port = inet->dport;
62310 +
62311 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
62312 + }
62313 +}
62314 +
62315 +int
62316 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
62317 +{
62318 + struct sockaddr_in sin;
62319 +
62320 + if (unlikely(skb->len < sizeof (struct udphdr)))
62321 + return 0; // skip this packet
62322 +
62323 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
62324 + sin.sin_port = udp_hdr(skb)->source;
62325 +
62326 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
62327 +}
62328 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
62329 new file mode 100644
62330 index 0000000..34bdd46
62331 --- /dev/null
62332 +++ b/grsecurity/gracl_learn.c
62333 @@ -0,0 +1,208 @@
62334 +#include <linux/kernel.h>
62335 +#include <linux/mm.h>
62336 +#include <linux/sched.h>
62337 +#include <linux/poll.h>
62338 +#include <linux/smp_lock.h>
62339 +#include <linux/string.h>
62340 +#include <linux/file.h>
62341 +#include <linux/types.h>
62342 +#include <linux/vmalloc.h>
62343 +#include <linux/grinternal.h>
62344 +
62345 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
62346 + size_t count, loff_t *ppos);
62347 +extern int gr_acl_is_enabled(void);
62348 +
62349 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
62350 +static int gr_learn_attached;
62351 +
62352 +/* use a 512k buffer */
62353 +#define LEARN_BUFFER_SIZE (512 * 1024)
62354 +
62355 +static DEFINE_SPINLOCK(gr_learn_lock);
62356 +static DEFINE_MUTEX(gr_learn_user_mutex);
62357 +
62358 +/* we need to maintain two buffers, so that the kernel context of grlearn
62359 + uses a semaphore around the userspace copying, and the other kernel contexts
62360 + use a spinlock when copying into the buffer, since they cannot sleep
62361 +*/
62362 +static char *learn_buffer;
62363 +static char *learn_buffer_user;
62364 +static int learn_buffer_len;
62365 +static int learn_buffer_user_len;
62366 +
62367 +static ssize_t
62368 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
62369 +{
62370 + DECLARE_WAITQUEUE(wait, current);
62371 + ssize_t retval = 0;
62372 +
62373 + add_wait_queue(&learn_wait, &wait);
62374 + set_current_state(TASK_INTERRUPTIBLE);
62375 + do {
62376 + mutex_lock(&gr_learn_user_mutex);
62377 + spin_lock(&gr_learn_lock);
62378 + if (learn_buffer_len)
62379 + break;
62380 + spin_unlock(&gr_learn_lock);
62381 + mutex_unlock(&gr_learn_user_mutex);
62382 + if (file->f_flags & O_NONBLOCK) {
62383 + retval = -EAGAIN;
62384 + goto out;
62385 + }
62386 + if (signal_pending(current)) {
62387 + retval = -ERESTARTSYS;
62388 + goto out;
62389 + }
62390 +
62391 + schedule();
62392 + } while (1);
62393 +
62394 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
62395 + learn_buffer_user_len = learn_buffer_len;
62396 + retval = learn_buffer_len;
62397 + learn_buffer_len = 0;
62398 +
62399 + spin_unlock(&gr_learn_lock);
62400 +
62401 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
62402 + retval = -EFAULT;
62403 +
62404 + mutex_unlock(&gr_learn_user_mutex);
62405 +out:
62406 + set_current_state(TASK_RUNNING);
62407 + remove_wait_queue(&learn_wait, &wait);
62408 + return retval;
62409 +}
62410 +
62411 +static unsigned int
62412 +poll_learn(struct file * file, poll_table * wait)
62413 +{
62414 + poll_wait(file, &learn_wait, wait);
62415 +
62416 + if (learn_buffer_len)
62417 + return (POLLIN | POLLRDNORM);
62418 +
62419 + return 0;
62420 +}
62421 +
62422 +void
62423 +gr_clear_learn_entries(void)
62424 +{
62425 + char *tmp;
62426 +
62427 + mutex_lock(&gr_learn_user_mutex);
62428 + spin_lock(&gr_learn_lock);
62429 + tmp = learn_buffer;
62430 + learn_buffer = NULL;
62431 + spin_unlock(&gr_learn_lock);
62432 + if (tmp)
62433 + vfree(tmp);
62434 + if (learn_buffer_user != NULL) {
62435 + vfree(learn_buffer_user);
62436 + learn_buffer_user = NULL;
62437 + }
62438 + learn_buffer_len = 0;
62439 + mutex_unlock(&gr_learn_user_mutex);
62440 +
62441 + return;
62442 +}
62443 +
62444 +void
62445 +gr_add_learn_entry(const char *fmt, ...)
62446 +{
62447 + va_list args;
62448 + unsigned int len;
62449 +
62450 + if (!gr_learn_attached)
62451 + return;
62452 +
62453 + spin_lock(&gr_learn_lock);
62454 +
62455 + /* leave a gap at the end so we know when it's "full" but don't have to
62456 + compute the exact length of the string we're trying to append
62457 + */
62458 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
62459 + spin_unlock(&gr_learn_lock);
62460 + wake_up_interruptible(&learn_wait);
62461 + return;
62462 + }
62463 + if (learn_buffer == NULL) {
62464 + spin_unlock(&gr_learn_lock);
62465 + return;
62466 + }
62467 +
62468 + va_start(args, fmt);
62469 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
62470 + va_end(args);
62471 +
62472 + learn_buffer_len += len + 1;
62473 +
62474 + spin_unlock(&gr_learn_lock);
62475 + wake_up_interruptible(&learn_wait);
62476 +
62477 + return;
62478 +}
62479 +
62480 +static int
62481 +open_learn(struct inode *inode, struct file *file)
62482 +{
62483 + if (file->f_mode & FMODE_READ && gr_learn_attached)
62484 + return -EBUSY;
62485 + if (file->f_mode & FMODE_READ) {
62486 + int retval = 0;
62487 + mutex_lock(&gr_learn_user_mutex);
62488 + if (learn_buffer == NULL)
62489 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
62490 + if (learn_buffer_user == NULL)
62491 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
62492 + if (learn_buffer == NULL) {
62493 + retval = -ENOMEM;
62494 + goto out_error;
62495 + }
62496 + if (learn_buffer_user == NULL) {
62497 + retval = -ENOMEM;
62498 + goto out_error;
62499 + }
62500 + learn_buffer_len = 0;
62501 + learn_buffer_user_len = 0;
62502 + gr_learn_attached = 1;
62503 +out_error:
62504 + mutex_unlock(&gr_learn_user_mutex);
62505 + return retval;
62506 + }
62507 + return 0;
62508 +}
62509 +
62510 +static int
62511 +close_learn(struct inode *inode, struct file *file)
62512 +{
62513 + if (file->f_mode & FMODE_READ) {
62514 + char *tmp = NULL;
62515 + mutex_lock(&gr_learn_user_mutex);
62516 + spin_lock(&gr_learn_lock);
62517 + tmp = learn_buffer;
62518 + learn_buffer = NULL;
62519 + spin_unlock(&gr_learn_lock);
62520 + if (tmp)
62521 + vfree(tmp);
62522 + if (learn_buffer_user != NULL) {
62523 + vfree(learn_buffer_user);
62524 + learn_buffer_user = NULL;
62525 + }
62526 + learn_buffer_len = 0;
62527 + learn_buffer_user_len = 0;
62528 + gr_learn_attached = 0;
62529 + mutex_unlock(&gr_learn_user_mutex);
62530 + }
62531 +
62532 + return 0;
62533 +}
62534 +
62535 +const struct file_operations grsec_fops = {
62536 + .read = read_learn,
62537 + .write = write_grsec_handler,
62538 + .open = open_learn,
62539 + .release = close_learn,
62540 + .poll = poll_learn,
62541 +};
62542 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
62543 new file mode 100644
62544 index 0000000..70b2179
62545 --- /dev/null
62546 +++ b/grsecurity/gracl_res.c
62547 @@ -0,0 +1,67 @@
62548 +#include <linux/kernel.h>
62549 +#include <linux/sched.h>
62550 +#include <linux/gracl.h>
62551 +#include <linux/grinternal.h>
62552 +
62553 +static const char *restab_log[] = {
62554 + [RLIMIT_CPU] = "RLIMIT_CPU",
62555 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
62556 + [RLIMIT_DATA] = "RLIMIT_DATA",
62557 + [RLIMIT_STACK] = "RLIMIT_STACK",
62558 + [RLIMIT_CORE] = "RLIMIT_CORE",
62559 + [RLIMIT_RSS] = "RLIMIT_RSS",
62560 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
62561 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
62562 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
62563 + [RLIMIT_AS] = "RLIMIT_AS",
62564 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
62565 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
62566 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
62567 + [RLIMIT_NICE] = "RLIMIT_NICE",
62568 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
62569 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
62570 + [GR_CRASH_RES] = "RLIMIT_CRASH"
62571 +};
62572 +
62573 +void
62574 +gr_log_resource(const struct task_struct *task,
62575 + const int res, const unsigned long wanted, const int gt)
62576 +{
62577 + const struct cred *cred;
62578 + unsigned long rlim;
62579 +
62580 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
62581 + return;
62582 +
62583 + // not yet supported resource
62584 + if (unlikely(!restab_log[res]))
62585 + return;
62586 +
62587 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
62588 + rlim = task->signal->rlim[res].rlim_max;
62589 + else
62590 + rlim = task->signal->rlim[res].rlim_cur;
62591 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
62592 + return;
62593 +
62594 + rcu_read_lock();
62595 + cred = __task_cred(task);
62596 +
62597 + if (res == RLIMIT_NPROC &&
62598 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
62599 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
62600 + goto out_rcu_unlock;
62601 + else if (res == RLIMIT_MEMLOCK &&
62602 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
62603 + goto out_rcu_unlock;
62604 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
62605 + goto out_rcu_unlock;
62606 + rcu_read_unlock();
62607 +
62608 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
62609 +
62610 + return;
62611 +out_rcu_unlock:
62612 + rcu_read_unlock();
62613 + return;
62614 +}
62615 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
62616 new file mode 100644
62617 index 0000000..1d1b734
62618 --- /dev/null
62619 +++ b/grsecurity/gracl_segv.c
62620 @@ -0,0 +1,284 @@
62621 +#include <linux/kernel.h>
62622 +#include <linux/mm.h>
62623 +#include <asm/uaccess.h>
62624 +#include <asm/errno.h>
62625 +#include <asm/mman.h>
62626 +#include <net/sock.h>
62627 +#include <linux/file.h>
62628 +#include <linux/fs.h>
62629 +#include <linux/net.h>
62630 +#include <linux/in.h>
62631 +#include <linux/smp_lock.h>
62632 +#include <linux/slab.h>
62633 +#include <linux/types.h>
62634 +#include <linux/sched.h>
62635 +#include <linux/timer.h>
62636 +#include <linux/gracl.h>
62637 +#include <linux/grsecurity.h>
62638 +#include <linux/grinternal.h>
62639 +
62640 +static struct crash_uid *uid_set;
62641 +static unsigned short uid_used;
62642 +static DEFINE_SPINLOCK(gr_uid_lock);
62643 +extern rwlock_t gr_inode_lock;
62644 +extern struct acl_subject_label *
62645 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
62646 + struct acl_role_label *role);
62647 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
62648 +
62649 +int
62650 +gr_init_uidset(void)
62651 +{
62652 + uid_set =
62653 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
62654 + uid_used = 0;
62655 +
62656 + return uid_set ? 1 : 0;
62657 +}
62658 +
62659 +void
62660 +gr_free_uidset(void)
62661 +{
62662 + if (uid_set)
62663 + kfree(uid_set);
62664 +
62665 + return;
62666 +}
62667 +
62668 +int
62669 +gr_find_uid(const uid_t uid)
62670 +{
62671 + struct crash_uid *tmp = uid_set;
62672 + uid_t buid;
62673 + int low = 0, high = uid_used - 1, mid;
62674 +
62675 + while (high >= low) {
62676 + mid = (low + high) >> 1;
62677 + buid = tmp[mid].uid;
62678 + if (buid == uid)
62679 + return mid;
62680 + if (buid > uid)
62681 + high = mid - 1;
62682 + if (buid < uid)
62683 + low = mid + 1;
62684 + }
62685 +
62686 + return -1;
62687 +}
62688 +
62689 +static __inline__ void
62690 +gr_insertsort(void)
62691 +{
62692 + unsigned short i, j;
62693 + struct crash_uid index;
62694 +
62695 + for (i = 1; i < uid_used; i++) {
62696 + index = uid_set[i];
62697 + j = i;
62698 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
62699 + uid_set[j] = uid_set[j - 1];
62700 + j--;
62701 + }
62702 + uid_set[j] = index;
62703 + }
62704 +
62705 + return;
62706 +}
62707 +
62708 +static __inline__ void
62709 +gr_insert_uid(const uid_t uid, const unsigned long expires)
62710 +{
62711 + int loc;
62712 +
62713 + if (uid_used == GR_UIDTABLE_MAX)
62714 + return;
62715 +
62716 + loc = gr_find_uid(uid);
62717 +
62718 + if (loc >= 0) {
62719 + uid_set[loc].expires = expires;
62720 + return;
62721 + }
62722 +
62723 + uid_set[uid_used].uid = uid;
62724 + uid_set[uid_used].expires = expires;
62725 + uid_used++;
62726 +
62727 + gr_insertsort();
62728 +
62729 + return;
62730 +}
62731 +
62732 +void
62733 +gr_remove_uid(const unsigned short loc)
62734 +{
62735 + unsigned short i;
62736 +
62737 + for (i = loc + 1; i < uid_used; i++)
62738 + uid_set[i - 1] = uid_set[i];
62739 +
62740 + uid_used--;
62741 +
62742 + return;
62743 +}
62744 +
62745 +int
62746 +gr_check_crash_uid(const uid_t uid)
62747 +{
62748 + int loc;
62749 + int ret = 0;
62750 +
62751 + if (unlikely(!gr_acl_is_enabled()))
62752 + return 0;
62753 +
62754 + spin_lock(&gr_uid_lock);
62755 + loc = gr_find_uid(uid);
62756 +
62757 + if (loc < 0)
62758 + goto out_unlock;
62759 +
62760 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
62761 + gr_remove_uid(loc);
62762 + else
62763 + ret = 1;
62764 +
62765 +out_unlock:
62766 + spin_unlock(&gr_uid_lock);
62767 + return ret;
62768 +}
62769 +
62770 +static __inline__ int
62771 +proc_is_setxid(const struct cred *cred)
62772 +{
62773 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
62774 + cred->uid != cred->fsuid)
62775 + return 1;
62776 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
62777 + cred->gid != cred->fsgid)
62778 + return 1;
62779 +
62780 + return 0;
62781 +}
62782 +
62783 +void
62784 +gr_handle_crash(struct task_struct *task, const int sig)
62785 +{
62786 + struct acl_subject_label *curr;
62787 + struct task_struct *tsk, *tsk2;
62788 + const struct cred *cred;
62789 + const struct cred *cred2;
62790 +
62791 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
62792 + return;
62793 +
62794 + if (unlikely(!gr_acl_is_enabled()))
62795 + return;
62796 +
62797 + curr = task->acl;
62798 +
62799 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
62800 + return;
62801 +
62802 + if (time_before_eq(curr->expires, get_seconds())) {
62803 + curr->expires = 0;
62804 + curr->crashes = 0;
62805 + }
62806 +
62807 + curr->crashes++;
62808 +
62809 + if (!curr->expires)
62810 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
62811 +
62812 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
62813 + time_after(curr->expires, get_seconds())) {
62814 + rcu_read_lock();
62815 + cred = __task_cred(task);
62816 + if (cred->uid && proc_is_setxid(cred)) {
62817 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
62818 + spin_lock(&gr_uid_lock);
62819 + gr_insert_uid(cred->uid, curr->expires);
62820 + spin_unlock(&gr_uid_lock);
62821 + curr->expires = 0;
62822 + curr->crashes = 0;
62823 + read_lock(&tasklist_lock);
62824 + do_each_thread(tsk2, tsk) {
62825 + cred2 = __task_cred(tsk);
62826 + if (tsk != task && cred2->uid == cred->uid)
62827 + gr_fake_force_sig(SIGKILL, tsk);
62828 + } while_each_thread(tsk2, tsk);
62829 + read_unlock(&tasklist_lock);
62830 + } else {
62831 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
62832 + read_lock(&tasklist_lock);
62833 + read_lock(&grsec_exec_file_lock);
62834 + do_each_thread(tsk2, tsk) {
62835 + if (likely(tsk != task)) {
62836 + // if this thread has the same subject as the one that triggered
62837 + // RES_CRASH and it's the same binary, kill it
62838 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
62839 + gr_fake_force_sig(SIGKILL, tsk);
62840 + }
62841 + } while_each_thread(tsk2, tsk);
62842 + read_unlock(&grsec_exec_file_lock);
62843 + read_unlock(&tasklist_lock);
62844 + }
62845 + rcu_read_unlock();
62846 + }
62847 +
62848 + return;
62849 +}
62850 +
62851 +int
62852 +gr_check_crash_exec(const struct file *filp)
62853 +{
62854 + struct acl_subject_label *curr;
62855 +
62856 + if (unlikely(!gr_acl_is_enabled()))
62857 + return 0;
62858 +
62859 + read_lock(&gr_inode_lock);
62860 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
62861 + filp->f_path.dentry->d_inode->i_sb->s_dev,
62862 + current->role);
62863 + read_unlock(&gr_inode_lock);
62864 +
62865 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
62866 + (!curr->crashes && !curr->expires))
62867 + return 0;
62868 +
62869 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
62870 + time_after(curr->expires, get_seconds()))
62871 + return 1;
62872 + else if (time_before_eq(curr->expires, get_seconds())) {
62873 + curr->crashes = 0;
62874 + curr->expires = 0;
62875 + }
62876 +
62877 + return 0;
62878 +}
62879 +
62880 +void
62881 +gr_handle_alertkill(struct task_struct *task)
62882 +{
62883 + struct acl_subject_label *curracl;
62884 + __u32 curr_ip;
62885 + struct task_struct *p, *p2;
62886 +
62887 + if (unlikely(!gr_acl_is_enabled()))
62888 + return;
62889 +
62890 + curracl = task->acl;
62891 + curr_ip = task->signal->curr_ip;
62892 +
62893 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
62894 + read_lock(&tasklist_lock);
62895 + do_each_thread(p2, p) {
62896 + if (p->signal->curr_ip == curr_ip)
62897 + gr_fake_force_sig(SIGKILL, p);
62898 + } while_each_thread(p2, p);
62899 + read_unlock(&tasklist_lock);
62900 + } else if (curracl->mode & GR_KILLPROC)
62901 + gr_fake_force_sig(SIGKILL, task);
62902 +
62903 + return;
62904 +}
62905 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
62906 new file mode 100644
62907 index 0000000..9d83a69
62908 --- /dev/null
62909 +++ b/grsecurity/gracl_shm.c
62910 @@ -0,0 +1,40 @@
62911 +#include <linux/kernel.h>
62912 +#include <linux/mm.h>
62913 +#include <linux/sched.h>
62914 +#include <linux/file.h>
62915 +#include <linux/ipc.h>
62916 +#include <linux/gracl.h>
62917 +#include <linux/grsecurity.h>
62918 +#include <linux/grinternal.h>
62919 +
62920 +int
62921 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62922 + const time_t shm_createtime, const uid_t cuid, const int shmid)
62923 +{
62924 + struct task_struct *task;
62925 +
62926 + if (!gr_acl_is_enabled())
62927 + return 1;
62928 +
62929 + rcu_read_lock();
62930 + read_lock(&tasklist_lock);
62931 +
62932 + task = find_task_by_vpid(shm_cprid);
62933 +
62934 + if (unlikely(!task))
62935 + task = find_task_by_vpid(shm_lapid);
62936 +
62937 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
62938 + (task->pid == shm_lapid)) &&
62939 + (task->acl->mode & GR_PROTSHM) &&
62940 + (task->acl != current->acl))) {
62941 + read_unlock(&tasklist_lock);
62942 + rcu_read_unlock();
62943 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
62944 + return 0;
62945 + }
62946 + read_unlock(&tasklist_lock);
62947 + rcu_read_unlock();
62948 +
62949 + return 1;
62950 +}
62951 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
62952 new file mode 100644
62953 index 0000000..bc0be01
62954 --- /dev/null
62955 +++ b/grsecurity/grsec_chdir.c
62956 @@ -0,0 +1,19 @@
62957 +#include <linux/kernel.h>
62958 +#include <linux/sched.h>
62959 +#include <linux/fs.h>
62960 +#include <linux/file.h>
62961 +#include <linux/grsecurity.h>
62962 +#include <linux/grinternal.h>
62963 +
62964 +void
62965 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
62966 +{
62967 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
62968 + if ((grsec_enable_chdir && grsec_enable_group &&
62969 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
62970 + !grsec_enable_group)) {
62971 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
62972 + }
62973 +#endif
62974 + return;
62975 +}
62976 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
62977 new file mode 100644
62978 index 0000000..197bdd5
62979 --- /dev/null
62980 +++ b/grsecurity/grsec_chroot.c
62981 @@ -0,0 +1,386 @@
62982 +#include <linux/kernel.h>
62983 +#include <linux/module.h>
62984 +#include <linux/sched.h>
62985 +#include <linux/file.h>
62986 +#include <linux/fs.h>
62987 +#include <linux/mount.h>
62988 +#include <linux/types.h>
62989 +#include <linux/pid_namespace.h>
62990 +#include <linux/grsecurity.h>
62991 +#include <linux/grinternal.h>
62992 +
62993 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
62994 +{
62995 +#ifdef CONFIG_GRKERNSEC
62996 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
62997 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
62998 + task->gr_is_chrooted = 1;
62999 + else
63000 + task->gr_is_chrooted = 0;
63001 +
63002 + task->gr_chroot_dentry = path->dentry;
63003 +#endif
63004 + return;
63005 +}
63006 +
63007 +void gr_clear_chroot_entries(struct task_struct *task)
63008 +{
63009 +#ifdef CONFIG_GRKERNSEC
63010 + task->gr_is_chrooted = 0;
63011 + task->gr_chroot_dentry = NULL;
63012 +#endif
63013 + return;
63014 +}
63015 +
63016 +int
63017 +gr_handle_chroot_unix(const pid_t pid)
63018 +{
63019 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
63020 + struct task_struct *p;
63021 +
63022 + if (unlikely(!grsec_enable_chroot_unix))
63023 + return 1;
63024 +
63025 + if (likely(!proc_is_chrooted(current)))
63026 + return 1;
63027 +
63028 + rcu_read_lock();
63029 + read_lock(&tasklist_lock);
63030 +
63031 + p = find_task_by_vpid_unrestricted(pid);
63032 + if (unlikely(p && !have_same_root(current, p))) {
63033 + read_unlock(&tasklist_lock);
63034 + rcu_read_unlock();
63035 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
63036 + return 0;
63037 + }
63038 + read_unlock(&tasklist_lock);
63039 + rcu_read_unlock();
63040 +#endif
63041 + return 1;
63042 +}
63043 +
63044 +int
63045 +gr_handle_chroot_nice(void)
63046 +{
63047 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
63048 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
63049 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
63050 + return -EPERM;
63051 + }
63052 +#endif
63053 + return 0;
63054 +}
63055 +
63056 +int
63057 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
63058 +{
63059 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
63060 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
63061 + && proc_is_chrooted(current)) {
63062 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
63063 + return -EACCES;
63064 + }
63065 +#endif
63066 + return 0;
63067 +}
63068 +
63069 +int
63070 +gr_handle_chroot_rawio(const struct inode *inode)
63071 +{
63072 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63073 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
63074 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
63075 + return 1;
63076 +#endif
63077 + return 0;
63078 +}
63079 +
63080 +int
63081 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
63082 +{
63083 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63084 + struct task_struct *p;
63085 + int ret = 0;
63086 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
63087 + return ret;
63088 +
63089 + read_lock(&tasklist_lock);
63090 + do_each_pid_task(pid, type, p) {
63091 + if (!have_same_root(current, p)) {
63092 + ret = 1;
63093 + goto out;
63094 + }
63095 + } while_each_pid_task(pid, type, p);
63096 +out:
63097 + read_unlock(&tasklist_lock);
63098 + return ret;
63099 +#endif
63100 + return 0;
63101 +}
63102 +
63103 +int
63104 +gr_pid_is_chrooted(struct task_struct *p)
63105 +{
63106 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63107 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
63108 + return 0;
63109 +
63110 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
63111 + !have_same_root(current, p)) {
63112 + return 1;
63113 + }
63114 +#endif
63115 + return 0;
63116 +}
63117 +
63118 +EXPORT_SYMBOL(gr_pid_is_chrooted);
63119 +
63120 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
63121 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
63122 +{
63123 + struct dentry *dentry = (struct dentry *)u_dentry;
63124 + struct vfsmount *mnt = (struct vfsmount *)u_mnt;
63125 + struct dentry *realroot;
63126 + struct vfsmount *realrootmnt;
63127 + struct dentry *currentroot;
63128 + struct vfsmount *currentmnt;
63129 + struct task_struct *reaper = &init_task;
63130 + int ret = 1;
63131 +
63132 + read_lock(&reaper->fs->lock);
63133 + realrootmnt = mntget(reaper->fs->root.mnt);
63134 + realroot = dget(reaper->fs->root.dentry);
63135 + read_unlock(&reaper->fs->lock);
63136 +
63137 + read_lock(&current->fs->lock);
63138 + currentmnt = mntget(current->fs->root.mnt);
63139 + currentroot = dget(current->fs->root.dentry);
63140 + read_unlock(&current->fs->lock);
63141 +
63142 + spin_lock(&dcache_lock);
63143 + for (;;) {
63144 + if (unlikely((dentry == realroot && mnt == realrootmnt)
63145 + || (dentry == currentroot && mnt == currentmnt)))
63146 + break;
63147 + if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
63148 + if (mnt->mnt_parent == mnt)
63149 + break;
63150 + dentry = mnt->mnt_mountpoint;
63151 + mnt = mnt->mnt_parent;
63152 + continue;
63153 + }
63154 + dentry = dentry->d_parent;
63155 + }
63156 + spin_unlock(&dcache_lock);
63157 +
63158 + dput(currentroot);
63159 + mntput(currentmnt);
63160 +
63161 + /* access is outside of chroot */
63162 + if (dentry == realroot && mnt == realrootmnt)
63163 + ret = 0;
63164 +
63165 + dput(realroot);
63166 + mntput(realrootmnt);
63167 + return ret;
63168 +}
63169 +#endif
63170 +
63171 +int
63172 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
63173 +{
63174 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
63175 + if (!grsec_enable_chroot_fchdir)
63176 + return 1;
63177 +
63178 + if (!proc_is_chrooted(current))
63179 + return 1;
63180 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
63181 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
63182 + return 0;
63183 + }
63184 +#endif
63185 + return 1;
63186 +}
63187 +
63188 +int
63189 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
63190 + const time_t shm_createtime)
63191 +{
63192 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
63193 + struct task_struct *p;
63194 + time_t starttime;
63195 +
63196 + if (unlikely(!grsec_enable_chroot_shmat))
63197 + return 1;
63198 +
63199 + if (likely(!proc_is_chrooted(current)))
63200 + return 1;
63201 +
63202 + rcu_read_lock();
63203 + read_lock(&tasklist_lock);
63204 +
63205 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
63206 + starttime = p->start_time.tv_sec;
63207 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
63208 + if (have_same_root(current, p)) {
63209 + goto allow;
63210 + } else {
63211 + read_unlock(&tasklist_lock);
63212 + rcu_read_unlock();
63213 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
63214 + return 0;
63215 + }
63216 + }
63217 + /* creator exited, pid reuse, fall through to next check */
63218 + }
63219 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
63220 + if (unlikely(!have_same_root(current, p))) {
63221 + read_unlock(&tasklist_lock);
63222 + rcu_read_unlock();
63223 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
63224 + return 0;
63225 + }
63226 + }
63227 +
63228 +allow:
63229 + read_unlock(&tasklist_lock);
63230 + rcu_read_unlock();
63231 +#endif
63232 + return 1;
63233 +}
63234 +
63235 +void
63236 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
63237 +{
63238 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
63239 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
63240 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
63241 +#endif
63242 + return;
63243 +}
63244 +
63245 +int
63246 +gr_handle_chroot_mknod(const struct dentry *dentry,
63247 + const struct vfsmount *mnt, const int mode)
63248 +{
63249 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
63250 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
63251 + proc_is_chrooted(current)) {
63252 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
63253 + return -EPERM;
63254 + }
63255 +#endif
63256 + return 0;
63257 +}
63258 +
63259 +int
63260 +gr_handle_chroot_mount(const struct dentry *dentry,
63261 + const struct vfsmount *mnt, const char *dev_name)
63262 +{
63263 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
63264 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
63265 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
63266 + return -EPERM;
63267 + }
63268 +#endif
63269 + return 0;
63270 +}
63271 +
63272 +int
63273 +gr_handle_chroot_pivot(void)
63274 +{
63275 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
63276 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
63277 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
63278 + return -EPERM;
63279 + }
63280 +#endif
63281 + return 0;
63282 +}
63283 +
63284 +int
63285 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
63286 +{
63287 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
63288 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
63289 + !gr_is_outside_chroot(dentry, mnt)) {
63290 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
63291 + return -EPERM;
63292 + }
63293 +#endif
63294 + return 0;
63295 +}
63296 +
63297 +extern const char *captab_log[];
63298 +extern int captab_log_entries;
63299 +
63300 +int
63301 +gr_chroot_is_capable(const int cap)
63302 +{
63303 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63304 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
63305 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
63306 + if (cap_raised(chroot_caps, cap)) {
63307 + const struct cred *creds = current_cred();
63308 + if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
63309 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
63310 + }
63311 + return 0;
63312 + }
63313 + }
63314 +#endif
63315 + return 1;
63316 +}
63317 +
63318 +int
63319 +gr_chroot_is_capable_nolog(const int cap)
63320 +{
63321 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63322 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
63323 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
63324 + if (cap_raised(chroot_caps, cap)) {
63325 + return 0;
63326 + }
63327 + }
63328 +#endif
63329 + return 1;
63330 +}
63331 +
63332 +int
63333 +gr_handle_chroot_sysctl(const int op)
63334 +{
63335 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
63336 + if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
63337 + && (op & MAY_WRITE))
63338 + return -EACCES;
63339 +#endif
63340 + return 0;
63341 +}
63342 +
63343 +void
63344 +gr_handle_chroot_chdir(struct path *path)
63345 +{
63346 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
63347 + if (grsec_enable_chroot_chdir)
63348 + set_fs_pwd(current->fs, path);
63349 +#endif
63350 + return;
63351 +}
63352 +
63353 +int
63354 +gr_handle_chroot_chmod(const struct dentry *dentry,
63355 + const struct vfsmount *mnt, const int mode)
63356 +{
63357 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
63358 + /* allow chmod +s on directories, but not on files */
63359 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
63360 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
63361 + proc_is_chrooted(current)) {
63362 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
63363 + return -EPERM;
63364 + }
63365 +#endif
63366 + return 0;
63367 +}
63368 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
63369 new file mode 100644
63370 index 0000000..40545bf
63371 --- /dev/null
63372 +++ b/grsecurity/grsec_disabled.c
63373 @@ -0,0 +1,437 @@
63374 +#include <linux/kernel.h>
63375 +#include <linux/module.h>
63376 +#include <linux/sched.h>
63377 +#include <linux/file.h>
63378 +#include <linux/fs.h>
63379 +#include <linux/kdev_t.h>
63380 +#include <linux/net.h>
63381 +#include <linux/in.h>
63382 +#include <linux/ip.h>
63383 +#include <linux/skbuff.h>
63384 +#include <linux/sysctl.h>
63385 +
63386 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
63387 +void
63388 +pax_set_initial_flags(struct linux_binprm *bprm)
63389 +{
63390 + return;
63391 +}
63392 +#endif
63393 +
63394 +#ifdef CONFIG_SYSCTL
63395 +__u32
63396 +gr_handle_sysctl(const struct ctl_table * table, const int op)
63397 +{
63398 + return 0;
63399 +}
63400 +#endif
63401 +
63402 +#ifdef CONFIG_TASKSTATS
63403 +int gr_is_taskstats_denied(int pid)
63404 +{
63405 + return 0;
63406 +}
63407 +#endif
63408 +
63409 +int
63410 +gr_acl_is_enabled(void)
63411 +{
63412 + return 0;
63413 +}
63414 +
63415 +void
63416 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
63417 +{
63418 + return;
63419 +}
63420 +
63421 +int
63422 +gr_handle_rawio(const struct inode *inode)
63423 +{
63424 + return 0;
63425 +}
63426 +
63427 +void
63428 +gr_acl_handle_psacct(struct task_struct *task, const long code)
63429 +{
63430 + return;
63431 +}
63432 +
63433 +int
63434 +gr_handle_ptrace(struct task_struct *task, const long request)
63435 +{
63436 + return 0;
63437 +}
63438 +
63439 +int
63440 +gr_handle_proc_ptrace(struct task_struct *task)
63441 +{
63442 + return 0;
63443 +}
63444 +
63445 +void
63446 +gr_learn_resource(const struct task_struct *task,
63447 + const int res, const unsigned long wanted, const int gt)
63448 +{
63449 + return;
63450 +}
63451 +
63452 +int
63453 +gr_set_acls(const int type)
63454 +{
63455 + return 0;
63456 +}
63457 +
63458 +int
63459 +gr_check_hidden_task(const struct task_struct *tsk)
63460 +{
63461 + return 0;
63462 +}
63463 +
63464 +int
63465 +gr_check_protected_task(const struct task_struct *task)
63466 +{
63467 + return 0;
63468 +}
63469 +
63470 +int
63471 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
63472 +{
63473 + return 0;
63474 +}
63475 +
63476 +void
63477 +gr_copy_label(struct task_struct *tsk)
63478 +{
63479 + return;
63480 +}
63481 +
63482 +void
63483 +gr_set_pax_flags(struct task_struct *task)
63484 +{
63485 + return;
63486 +}
63487 +
63488 +int
63489 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
63490 + const int unsafe_share)
63491 +{
63492 + return 0;
63493 +}
63494 +
63495 +void
63496 +gr_handle_delete(const ino_t ino, const dev_t dev)
63497 +{
63498 + return;
63499 +}
63500 +
63501 +void
63502 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
63503 +{
63504 + return;
63505 +}
63506 +
63507 +void
63508 +gr_handle_crash(struct task_struct *task, const int sig)
63509 +{
63510 + return;
63511 +}
63512 +
63513 +int
63514 +gr_check_crash_exec(const struct file *filp)
63515 +{
63516 + return 0;
63517 +}
63518 +
63519 +int
63520 +gr_check_crash_uid(const uid_t uid)
63521 +{
63522 + return 0;
63523 +}
63524 +
63525 +void
63526 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
63527 + struct dentry *old_dentry,
63528 + struct dentry *new_dentry,
63529 + struct vfsmount *mnt, const __u8 replace)
63530 +{
63531 + return;
63532 +}
63533 +
63534 +int
63535 +gr_search_socket(const int family, const int type, const int protocol)
63536 +{
63537 + return 1;
63538 +}
63539 +
63540 +int
63541 +gr_search_connectbind(const int mode, const struct socket *sock,
63542 + const struct sockaddr_in *addr)
63543 +{
63544 + return 0;
63545 +}
63546 +
63547 +void
63548 +gr_handle_alertkill(struct task_struct *task)
63549 +{
63550 + return;
63551 +}
63552 +
63553 +__u32
63554 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
63555 +{
63556 + return 1;
63557 +}
63558 +
63559 +__u32
63560 +gr_acl_handle_hidden_file(const struct dentry * dentry,
63561 + const struct vfsmount * mnt)
63562 +{
63563 + return 1;
63564 +}
63565 +
63566 +__u32
63567 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
63568 + int acc_mode)
63569 +{
63570 + return 1;
63571 +}
63572 +
63573 +__u32
63574 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
63575 +{
63576 + return 1;
63577 +}
63578 +
63579 +__u32
63580 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
63581 +{
63582 + return 1;
63583 +}
63584 +
63585 +int
63586 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
63587 + unsigned int *vm_flags)
63588 +{
63589 + return 1;
63590 +}
63591 +
63592 +__u32
63593 +gr_acl_handle_truncate(const struct dentry * dentry,
63594 + const struct vfsmount * mnt)
63595 +{
63596 + return 1;
63597 +}
63598 +
63599 +__u32
63600 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
63601 +{
63602 + return 1;
63603 +}
63604 +
63605 +__u32
63606 +gr_acl_handle_access(const struct dentry * dentry,
63607 + const struct vfsmount * mnt, const int fmode)
63608 +{
63609 + return 1;
63610 +}
63611 +
63612 +__u32
63613 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
63614 + umode_t *mode)
63615 +{
63616 + return 1;
63617 +}
63618 +
63619 +__u32
63620 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
63621 +{
63622 + return 1;
63623 +}
63624 +
63625 +__u32
63626 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
63627 +{
63628 + return 1;
63629 +}
63630 +
63631 +void
63632 +grsecurity_init(void)
63633 +{
63634 + return;
63635 +}
63636 +
63637 +umode_t gr_acl_umask(void)
63638 +{
63639 + return 0;
63640 +}
63641 +
63642 +__u32
63643 +gr_acl_handle_mknod(const struct dentry * new_dentry,
63644 + const struct dentry * parent_dentry,
63645 + const struct vfsmount * parent_mnt,
63646 + const int mode)
63647 +{
63648 + return 1;
63649 +}
63650 +
63651 +__u32
63652 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
63653 + const struct dentry * parent_dentry,
63654 + const struct vfsmount * parent_mnt)
63655 +{
63656 + return 1;
63657 +}
63658 +
63659 +__u32
63660 +gr_acl_handle_symlink(const struct dentry * new_dentry,
63661 + const struct dentry * parent_dentry,
63662 + const struct vfsmount * parent_mnt, const char *from)
63663 +{
63664 + return 1;
63665 +}
63666 +
63667 +__u32
63668 +gr_acl_handle_link(const struct dentry * new_dentry,
63669 + const struct dentry * parent_dentry,
63670 + const struct vfsmount * parent_mnt,
63671 + const struct dentry * old_dentry,
63672 + const struct vfsmount * old_mnt, const char *to)
63673 +{
63674 + return 1;
63675 +}
63676 +
63677 +int
63678 +gr_acl_handle_rename(const struct dentry *new_dentry,
63679 + const struct dentry *parent_dentry,
63680 + const struct vfsmount *parent_mnt,
63681 + const struct dentry *old_dentry,
63682 + const struct inode *old_parent_inode,
63683 + const struct vfsmount *old_mnt, const char *newname)
63684 +{
63685 + return 0;
63686 +}
63687 +
63688 +int
63689 +gr_acl_handle_filldir(const struct file *file, const char *name,
63690 + const int namelen, const ino_t ino)
63691 +{
63692 + return 1;
63693 +}
63694 +
63695 +int
63696 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
63697 + const time_t shm_createtime, const uid_t cuid, const int shmid)
63698 +{
63699 + return 1;
63700 +}
63701 +
63702 +int
63703 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
63704 +{
63705 + return 0;
63706 +}
63707 +
63708 +int
63709 +gr_search_accept(const struct socket *sock)
63710 +{
63711 + return 0;
63712 +}
63713 +
63714 +int
63715 +gr_search_listen(const struct socket *sock)
63716 +{
63717 + return 0;
63718 +}
63719 +
63720 +int
63721 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
63722 +{
63723 + return 0;
63724 +}
63725 +
63726 +__u32
63727 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
63728 +{
63729 + return 1;
63730 +}
63731 +
63732 +__u32
63733 +gr_acl_handle_creat(const struct dentry * dentry,
63734 + const struct dentry * p_dentry,
63735 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
63736 + const int imode)
63737 +{
63738 + return 1;
63739 +}
63740 +
63741 +void
63742 +gr_acl_handle_exit(void)
63743 +{
63744 + return;
63745 +}
63746 +
63747 +int
63748 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
63749 +{
63750 + return 1;
63751 +}
63752 +
63753 +void
63754 +gr_set_role_label(const uid_t uid, const gid_t gid)
63755 +{
63756 + return;
63757 +}
63758 +
63759 +int
63760 +gr_acl_handle_procpidmem(const struct task_struct *task)
63761 +{
63762 + return 0;
63763 +}
63764 +
63765 +int
63766 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
63767 +{
63768 + return 0;
63769 +}
63770 +
63771 +int
63772 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
63773 +{
63774 + return 0;
63775 +}
63776 +
63777 +void
63778 +gr_set_kernel_label(struct task_struct *task)
63779 +{
63780 + return;
63781 +}
63782 +
63783 +int
63784 +gr_check_user_change(int real, int effective, int fs)
63785 +{
63786 + return 0;
63787 +}
63788 +
63789 +int
63790 +gr_check_group_change(int real, int effective, int fs)
63791 +{
63792 + return 0;
63793 +}
63794 +
63795 +int gr_acl_enable_at_secure(void)
63796 +{
63797 + return 0;
63798 +}
63799 +
63800 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
63801 +{
63802 + return dentry->d_inode->i_sb->s_dev;
63803 +}
63804 +
63805 +EXPORT_SYMBOL(gr_learn_resource);
63806 +EXPORT_SYMBOL(gr_set_kernel_label);
63807 +#ifdef CONFIG_SECURITY
63808 +EXPORT_SYMBOL(gr_check_user_change);
63809 +EXPORT_SYMBOL(gr_check_group_change);
63810 +#endif
63811 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
63812 new file mode 100644
63813 index 0000000..a96e155
63814 --- /dev/null
63815 +++ b/grsecurity/grsec_exec.c
63816 @@ -0,0 +1,204 @@
63817 +#include <linux/kernel.h>
63818 +#include <linux/sched.h>
63819 +#include <linux/file.h>
63820 +#include <linux/binfmts.h>
63821 +#include <linux/smp_lock.h>
63822 +#include <linux/fs.h>
63823 +#include <linux/types.h>
63824 +#include <linux/grdefs.h>
63825 +#include <linux/grinternal.h>
63826 +#include <linux/capability.h>
63827 +#include <linux/compat.h>
63828 +#include <linux/module.h>
63829 +
63830 +#include <asm/uaccess.h>
63831 +
63832 +#ifdef CONFIG_GRKERNSEC_EXECLOG
63833 +static char gr_exec_arg_buf[132];
63834 +static DEFINE_MUTEX(gr_exec_arg_mutex);
63835 +#endif
63836 +
63837 +void
63838 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
63839 +{
63840 +#ifdef CONFIG_GRKERNSEC_EXECLOG
63841 + char *grarg = gr_exec_arg_buf;
63842 + unsigned int i, x, execlen = 0;
63843 + char c;
63844 +
63845 + if (!((grsec_enable_execlog && grsec_enable_group &&
63846 + in_group_p(grsec_audit_gid))
63847 + || (grsec_enable_execlog && !grsec_enable_group)))
63848 + return;
63849 +
63850 + mutex_lock(&gr_exec_arg_mutex);
63851 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
63852 +
63853 + if (unlikely(argv == NULL))
63854 + goto log;
63855 +
63856 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
63857 + const char __user *p;
63858 + unsigned int len;
63859 +
63860 + if (copy_from_user(&p, argv + i, sizeof(p)))
63861 + goto log;
63862 + if (!p)
63863 + goto log;
63864 + len = strnlen_user(p, 128 - execlen);
63865 + if (len > 128 - execlen)
63866 + len = 128 - execlen;
63867 + else if (len > 0)
63868 + len--;
63869 + if (copy_from_user(grarg + execlen, p, len))
63870 + goto log;
63871 +
63872 + /* rewrite unprintable characters */
63873 + for (x = 0; x < len; x++) {
63874 + c = *(grarg + execlen + x);
63875 + if (c < 32 || c > 126)
63876 + *(grarg + execlen + x) = ' ';
63877 + }
63878 +
63879 + execlen += len;
63880 + *(grarg + execlen) = ' ';
63881 + *(grarg + execlen + 1) = '\0';
63882 + execlen++;
63883 + }
63884 +
63885 + log:
63886 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
63887 + bprm->file->f_path.mnt, grarg);
63888 + mutex_unlock(&gr_exec_arg_mutex);
63889 +#endif
63890 + return;
63891 +}
63892 +
63893 +#ifdef CONFIG_COMPAT
63894 +void
63895 +gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
63896 +{
63897 +#ifdef CONFIG_GRKERNSEC_EXECLOG
63898 + char *grarg = gr_exec_arg_buf;
63899 + unsigned int i, x, execlen = 0;
63900 + char c;
63901 +
63902 + if (!((grsec_enable_execlog && grsec_enable_group &&
63903 + in_group_p(grsec_audit_gid))
63904 + || (grsec_enable_execlog && !grsec_enable_group)))
63905 + return;
63906 +
63907 + mutex_lock(&gr_exec_arg_mutex);
63908 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
63909 +
63910 + if (unlikely(argv == NULL))
63911 + goto log;
63912 +
63913 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
63914 + compat_uptr_t p;
63915 + unsigned int len;
63916 +
63917 + if (get_user(p, argv + i))
63918 + goto log;
63919 + len = strnlen_user(compat_ptr(p), 128 - execlen);
63920 + if (len > 128 - execlen)
63921 + len = 128 - execlen;
63922 + else if (len > 0)
63923 + len--;
63924 + else
63925 + goto log;
63926 + if (copy_from_user(grarg + execlen, compat_ptr(p), len))
63927 + goto log;
63928 +
63929 + /* rewrite unprintable characters */
63930 + for (x = 0; x < len; x++) {
63931 + c = *(grarg + execlen + x);
63932 + if (c < 32 || c > 126)
63933 + *(grarg + execlen + x) = ' ';
63934 + }
63935 +
63936 + execlen += len;
63937 + *(grarg + execlen) = ' ';
63938 + *(grarg + execlen + 1) = '\0';
63939 + execlen++;
63940 + }
63941 +
63942 + log:
63943 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
63944 + bprm->file->f_path.mnt, grarg);
63945 + mutex_unlock(&gr_exec_arg_mutex);
63946 +#endif
63947 + return;
63948 +}
63949 +#endif
63950 +
63951 +#ifdef CONFIG_GRKERNSEC
63952 +extern int gr_acl_is_capable(const int cap);
63953 +extern int gr_acl_is_capable_nolog(const int cap);
63954 +extern int gr_chroot_is_capable(const int cap);
63955 +extern int gr_chroot_is_capable_nolog(const int cap);
63956 +#endif
63957 +
63958 +const char *captab_log[] = {
63959 + "CAP_CHOWN",
63960 + "CAP_DAC_OVERRIDE",
63961 + "CAP_DAC_READ_SEARCH",
63962 + "CAP_FOWNER",
63963 + "CAP_FSETID",
63964 + "CAP_KILL",
63965 + "CAP_SETGID",
63966 + "CAP_SETUID",
63967 + "CAP_SETPCAP",
63968 + "CAP_LINUX_IMMUTABLE",
63969 + "CAP_NET_BIND_SERVICE",
63970 + "CAP_NET_BROADCAST",
63971 + "CAP_NET_ADMIN",
63972 + "CAP_NET_RAW",
63973 + "CAP_IPC_LOCK",
63974 + "CAP_IPC_OWNER",
63975 + "CAP_SYS_MODULE",
63976 + "CAP_SYS_RAWIO",
63977 + "CAP_SYS_CHROOT",
63978 + "CAP_SYS_PTRACE",
63979 + "CAP_SYS_PACCT",
63980 + "CAP_SYS_ADMIN",
63981 + "CAP_SYS_BOOT",
63982 + "CAP_SYS_NICE",
63983 + "CAP_SYS_RESOURCE",
63984 + "CAP_SYS_TIME",
63985 + "CAP_SYS_TTY_CONFIG",
63986 + "CAP_MKNOD",
63987 + "CAP_LEASE",
63988 + "CAP_AUDIT_WRITE",
63989 + "CAP_AUDIT_CONTROL",
63990 + "CAP_SETFCAP",
63991 + "CAP_MAC_OVERRIDE",
63992 + "CAP_MAC_ADMIN"
63993 +};
63994 +
63995 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
63996 +
63997 +int gr_is_capable(const int cap)
63998 +{
63999 +#ifdef CONFIG_GRKERNSEC
64000 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
64001 + return 1;
64002 + return 0;
64003 +#else
64004 + return 1;
64005 +#endif
64006 +}
64007 +
64008 +int gr_is_capable_nolog(const int cap)
64009 +{
64010 +#ifdef CONFIG_GRKERNSEC
64011 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
64012 + return 1;
64013 + return 0;
64014 +#else
64015 + return 1;
64016 +#endif
64017 +}
64018 +
64019 +EXPORT_SYMBOL(gr_is_capable);
64020 +EXPORT_SYMBOL(gr_is_capable_nolog);
64021 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
64022 new file mode 100644
64023 index 0000000..d3ee748
64024 --- /dev/null
64025 +++ b/grsecurity/grsec_fifo.c
64026 @@ -0,0 +1,24 @@
64027 +#include <linux/kernel.h>
64028 +#include <linux/sched.h>
64029 +#include <linux/fs.h>
64030 +#include <linux/file.h>
64031 +#include <linux/grinternal.h>
64032 +
64033 +int
64034 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
64035 + const struct dentry *dir, const int flag, const int acc_mode)
64036 +{
64037 +#ifdef CONFIG_GRKERNSEC_FIFO
64038 + const struct cred *cred = current_cred();
64039 +
64040 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
64041 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
64042 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
64043 + (cred->fsuid != dentry->d_inode->i_uid)) {
64044 + if (!inode_permission(dentry->d_inode, acc_mode))
64045 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
64046 + return -EACCES;
64047 + }
64048 +#endif
64049 + return 0;
64050 +}
64051 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
64052 new file mode 100644
64053 index 0000000..8ca18bf
64054 --- /dev/null
64055 +++ b/grsecurity/grsec_fork.c
64056 @@ -0,0 +1,23 @@
64057 +#include <linux/kernel.h>
64058 +#include <linux/sched.h>
64059 +#include <linux/grsecurity.h>
64060 +#include <linux/grinternal.h>
64061 +#include <linux/errno.h>
64062 +
64063 +void
64064 +gr_log_forkfail(const int retval)
64065 +{
64066 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
64067 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
64068 + switch (retval) {
64069 + case -EAGAIN:
64070 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
64071 + break;
64072 + case -ENOMEM:
64073 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
64074 + break;
64075 + }
64076 + }
64077 +#endif
64078 + return;
64079 +}
64080 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
64081 new file mode 100644
64082 index 0000000..1e995d3
64083 --- /dev/null
64084 +++ b/grsecurity/grsec_init.c
64085 @@ -0,0 +1,278 @@
64086 +#include <linux/kernel.h>
64087 +#include <linux/sched.h>
64088 +#include <linux/mm.h>
64089 +#include <linux/smp_lock.h>
64090 +#include <linux/gracl.h>
64091 +#include <linux/slab.h>
64092 +#include <linux/vmalloc.h>
64093 +#include <linux/percpu.h>
64094 +#include <linux/module.h>
64095 +
64096 +int grsec_enable_ptrace_readexec;
64097 +int grsec_enable_setxid;
64098 +int grsec_enable_brute;
64099 +int grsec_enable_link;
64100 +int grsec_enable_dmesg;
64101 +int grsec_enable_harden_ptrace;
64102 +int grsec_enable_fifo;
64103 +int grsec_enable_execlog;
64104 +int grsec_enable_signal;
64105 +int grsec_enable_forkfail;
64106 +int grsec_enable_audit_ptrace;
64107 +int grsec_enable_time;
64108 +int grsec_enable_audit_textrel;
64109 +int grsec_enable_group;
64110 +int grsec_audit_gid;
64111 +int grsec_enable_chdir;
64112 +int grsec_enable_mount;
64113 +int grsec_enable_rofs;
64114 +int grsec_enable_chroot_findtask;
64115 +int grsec_enable_chroot_mount;
64116 +int grsec_enable_chroot_shmat;
64117 +int grsec_enable_chroot_fchdir;
64118 +int grsec_enable_chroot_double;
64119 +int grsec_enable_chroot_pivot;
64120 +int grsec_enable_chroot_chdir;
64121 +int grsec_enable_chroot_chmod;
64122 +int grsec_enable_chroot_mknod;
64123 +int grsec_enable_chroot_nice;
64124 +int grsec_enable_chroot_execlog;
64125 +int grsec_enable_chroot_caps;
64126 +int grsec_enable_chroot_sysctl;
64127 +int grsec_enable_chroot_unix;
64128 +int grsec_enable_tpe;
64129 +int grsec_tpe_gid;
64130 +int grsec_enable_blackhole;
64131 +#ifdef CONFIG_IPV6_MODULE
64132 +EXPORT_SYMBOL(grsec_enable_blackhole);
64133 +#endif
64134 +int grsec_lastack_retries;
64135 +int grsec_enable_tpe_all;
64136 +int grsec_enable_tpe_invert;
64137 +int grsec_enable_socket_all;
64138 +int grsec_socket_all_gid;
64139 +int grsec_enable_socket_client;
64140 +int grsec_socket_client_gid;
64141 +int grsec_enable_socket_server;
64142 +int grsec_socket_server_gid;
64143 +int grsec_resource_logging;
64144 +int grsec_disable_privio;
64145 +int grsec_enable_log_rwxmaps;
64146 +int grsec_lock;
64147 +
64148 +DEFINE_SPINLOCK(grsec_alert_lock);
64149 +unsigned long grsec_alert_wtime = 0;
64150 +unsigned long grsec_alert_fyet = 0;
64151 +
64152 +DEFINE_SPINLOCK(grsec_audit_lock);
64153 +
64154 +DEFINE_RWLOCK(grsec_exec_file_lock);
64155 +
64156 +char *gr_shared_page[4];
64157 +
64158 +char *gr_alert_log_fmt;
64159 +char *gr_audit_log_fmt;
64160 +char *gr_alert_log_buf;
64161 +char *gr_audit_log_buf;
64162 +
64163 +extern struct gr_arg *gr_usermode;
64164 +extern unsigned char *gr_system_salt;
64165 +extern unsigned char *gr_system_sum;
64166 +
64167 +void __init
64168 +grsecurity_init(void)
64169 +{
64170 + int j;
64171 + /* create the per-cpu shared pages */
64172 +
64173 +#ifdef CONFIG_X86
64174 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
64175 +#endif
64176 +
64177 + for (j = 0; j < 4; j++) {
64178 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
64179 + if (gr_shared_page[j] == NULL) {
64180 + panic("Unable to allocate grsecurity shared page");
64181 + return;
64182 + }
64183 + }
64184 +
64185 + /* allocate log buffers */
64186 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
64187 + if (!gr_alert_log_fmt) {
64188 + panic("Unable to allocate grsecurity alert log format buffer");
64189 + return;
64190 + }
64191 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
64192 + if (!gr_audit_log_fmt) {
64193 + panic("Unable to allocate grsecurity audit log format buffer");
64194 + return;
64195 + }
64196 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
64197 + if (!gr_alert_log_buf) {
64198 + panic("Unable to allocate grsecurity alert log buffer");
64199 + return;
64200 + }
64201 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
64202 + if (!gr_audit_log_buf) {
64203 + panic("Unable to allocate grsecurity audit log buffer");
64204 + return;
64205 + }
64206 +
64207 + /* allocate memory for authentication structure */
64208 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
64209 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
64210 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
64211 +
64212 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
64213 + panic("Unable to allocate grsecurity authentication structure");
64214 + return;
64215 + }
64216 +
64217 +
64218 +#ifdef CONFIG_GRKERNSEC_IO
64219 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
64220 + grsec_disable_privio = 1;
64221 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
64222 + grsec_disable_privio = 1;
64223 +#else
64224 + grsec_disable_privio = 0;
64225 +#endif
64226 +#endif
64227 +
64228 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
64229 + /* for backward compatibility, tpe_invert always defaults to on if
64230 + enabled in the kernel
64231 + */
64232 + grsec_enable_tpe_invert = 1;
64233 +#endif
64234 +
64235 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
64236 +#ifndef CONFIG_GRKERNSEC_SYSCTL
64237 + grsec_lock = 1;
64238 +#endif
64239 +
64240 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
64241 + grsec_enable_audit_textrel = 1;
64242 +#endif
64243 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64244 + grsec_enable_log_rwxmaps = 1;
64245 +#endif
64246 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
64247 + grsec_enable_group = 1;
64248 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
64249 +#endif
64250 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
64251 + grsec_enable_chdir = 1;
64252 +#endif
64253 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
64254 + grsec_enable_harden_ptrace = 1;
64255 +#endif
64256 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64257 + grsec_enable_mount = 1;
64258 +#endif
64259 +#ifdef CONFIG_GRKERNSEC_LINK
64260 + grsec_enable_link = 1;
64261 +#endif
64262 +#ifdef CONFIG_GRKERNSEC_BRUTE
64263 + grsec_enable_brute = 1;
64264 +#endif
64265 +#ifdef CONFIG_GRKERNSEC_DMESG
64266 + grsec_enable_dmesg = 1;
64267 +#endif
64268 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
64269 + grsec_enable_blackhole = 1;
64270 + grsec_lastack_retries = 4;
64271 +#endif
64272 +#ifdef CONFIG_GRKERNSEC_FIFO
64273 + grsec_enable_fifo = 1;
64274 +#endif
64275 +#ifdef CONFIG_GRKERNSEC_EXECLOG
64276 + grsec_enable_execlog = 1;
64277 +#endif
64278 +#ifdef CONFIG_GRKERNSEC_SETXID
64279 + grsec_enable_setxid = 1;
64280 +#endif
64281 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
64282 + grsec_enable_ptrace_readexec = 1;
64283 +#endif
64284 +#ifdef CONFIG_GRKERNSEC_SIGNAL
64285 + grsec_enable_signal = 1;
64286 +#endif
64287 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
64288 + grsec_enable_forkfail = 1;
64289 +#endif
64290 +#ifdef CONFIG_GRKERNSEC_TIME
64291 + grsec_enable_time = 1;
64292 +#endif
64293 +#ifdef CONFIG_GRKERNSEC_RESLOG
64294 + grsec_resource_logging = 1;
64295 +#endif
64296 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
64297 + grsec_enable_chroot_findtask = 1;
64298 +#endif
64299 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
64300 + grsec_enable_chroot_unix = 1;
64301 +#endif
64302 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
64303 + grsec_enable_chroot_mount = 1;
64304 +#endif
64305 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
64306 + grsec_enable_chroot_fchdir = 1;
64307 +#endif
64308 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
64309 + grsec_enable_chroot_shmat = 1;
64310 +#endif
64311 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
64312 + grsec_enable_audit_ptrace = 1;
64313 +#endif
64314 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
64315 + grsec_enable_chroot_double = 1;
64316 +#endif
64317 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
64318 + grsec_enable_chroot_pivot = 1;
64319 +#endif
64320 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
64321 + grsec_enable_chroot_chdir = 1;
64322 +#endif
64323 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
64324 + grsec_enable_chroot_chmod = 1;
64325 +#endif
64326 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
64327 + grsec_enable_chroot_mknod = 1;
64328 +#endif
64329 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
64330 + grsec_enable_chroot_nice = 1;
64331 +#endif
64332 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
64333 + grsec_enable_chroot_execlog = 1;
64334 +#endif
64335 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64336 + grsec_enable_chroot_caps = 1;
64337 +#endif
64338 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
64339 + grsec_enable_chroot_sysctl = 1;
64340 +#endif
64341 +#ifdef CONFIG_GRKERNSEC_TPE
64342 + grsec_enable_tpe = 1;
64343 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
64344 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
64345 + grsec_enable_tpe_all = 1;
64346 +#endif
64347 +#endif
64348 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
64349 + grsec_enable_socket_all = 1;
64350 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
64351 +#endif
64352 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
64353 + grsec_enable_socket_client = 1;
64354 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
64355 +#endif
64356 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64357 + grsec_enable_socket_server = 1;
64358 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
64359 +#endif
64360 +#endif
64361 +
64362 + return;
64363 +}
64364 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
64365 new file mode 100644
64366 index 0000000..3efe141
64367 --- /dev/null
64368 +++ b/grsecurity/grsec_link.c
64369 @@ -0,0 +1,43 @@
64370 +#include <linux/kernel.h>
64371 +#include <linux/sched.h>
64372 +#include <linux/fs.h>
64373 +#include <linux/file.h>
64374 +#include <linux/grinternal.h>
64375 +
64376 +int
64377 +gr_handle_follow_link(const struct inode *parent,
64378 + const struct inode *inode,
64379 + const struct dentry *dentry, const struct vfsmount *mnt)
64380 +{
64381 +#ifdef CONFIG_GRKERNSEC_LINK
64382 + const struct cred *cred = current_cred();
64383 +
64384 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
64385 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
64386 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
64387 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
64388 + return -EACCES;
64389 + }
64390 +#endif
64391 + return 0;
64392 +}
64393 +
64394 +int
64395 +gr_handle_hardlink(const struct dentry *dentry,
64396 + const struct vfsmount *mnt,
64397 + struct inode *inode, const int mode, const char *to)
64398 +{
64399 +#ifdef CONFIG_GRKERNSEC_LINK
64400 + const struct cred *cred = current_cred();
64401 +
64402 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
64403 + (!S_ISREG(mode) || (mode & S_ISUID) ||
64404 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
64405 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
64406 + !capable(CAP_FOWNER) && cred->uid) {
64407 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
64408 + return -EPERM;
64409 + }
64410 +#endif
64411 + return 0;
64412 +}
64413 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
64414 new file mode 100644
64415 index 0000000..a45d2e9
64416 --- /dev/null
64417 +++ b/grsecurity/grsec_log.c
64418 @@ -0,0 +1,322 @@
64419 +#include <linux/kernel.h>
64420 +#include <linux/sched.h>
64421 +#include <linux/file.h>
64422 +#include <linux/tty.h>
64423 +#include <linux/fs.h>
64424 +#include <linux/grinternal.h>
64425 +
64426 +#ifdef CONFIG_TREE_PREEMPT_RCU
64427 +#define DISABLE_PREEMPT() preempt_disable()
64428 +#define ENABLE_PREEMPT() preempt_enable()
64429 +#else
64430 +#define DISABLE_PREEMPT()
64431 +#define ENABLE_PREEMPT()
64432 +#endif
64433 +
64434 +#define BEGIN_LOCKS(x) \
64435 + DISABLE_PREEMPT(); \
64436 + rcu_read_lock(); \
64437 + read_lock(&tasklist_lock); \
64438 + read_lock(&grsec_exec_file_lock); \
64439 + if (x != GR_DO_AUDIT) \
64440 + spin_lock(&grsec_alert_lock); \
64441 + else \
64442 + spin_lock(&grsec_audit_lock)
64443 +
64444 +#define END_LOCKS(x) \
64445 + if (x != GR_DO_AUDIT) \
64446 + spin_unlock(&grsec_alert_lock); \
64447 + else \
64448 + spin_unlock(&grsec_audit_lock); \
64449 + read_unlock(&grsec_exec_file_lock); \
64450 + read_unlock(&tasklist_lock); \
64451 + rcu_read_unlock(); \
64452 + ENABLE_PREEMPT(); \
64453 + if (x == GR_DONT_AUDIT) \
64454 + gr_handle_alertkill(current)
64455 +
64456 +enum {
64457 + FLOODING,
64458 + NO_FLOODING
64459 +};
64460 +
64461 +extern char *gr_alert_log_fmt;
64462 +extern char *gr_audit_log_fmt;
64463 +extern char *gr_alert_log_buf;
64464 +extern char *gr_audit_log_buf;
64465 +
64466 +static int gr_log_start(int audit)
64467 +{
64468 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
64469 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
64470 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
64471 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
64472 + unsigned long curr_secs = get_seconds();
64473 +
64474 + if (audit == GR_DO_AUDIT)
64475 + goto set_fmt;
64476 +
64477 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
64478 + grsec_alert_wtime = curr_secs;
64479 + grsec_alert_fyet = 0;
64480 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
64481 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
64482 + grsec_alert_fyet++;
64483 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
64484 + grsec_alert_wtime = curr_secs;
64485 + grsec_alert_fyet++;
64486 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
64487 + return FLOODING;
64488 + }
64489 + else return FLOODING;
64490 +
64491 +set_fmt:
64492 +#endif
64493 + memset(buf, 0, PAGE_SIZE);
64494 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
64495 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
64496 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
64497 + } else if (current->signal->curr_ip) {
64498 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
64499 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
64500 + } else if (gr_acl_is_enabled()) {
64501 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
64502 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
64503 + } else {
64504 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
64505 + strcpy(buf, fmt);
64506 + }
64507 +
64508 + return NO_FLOODING;
64509 +}
64510 +
64511 +static void gr_log_middle(int audit, const char *msg, va_list ap)
64512 + __attribute__ ((format (printf, 2, 0)));
64513 +
64514 +static void gr_log_middle(int audit, const char *msg, va_list ap)
64515 +{
64516 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
64517 + unsigned int len = strlen(buf);
64518 +
64519 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
64520 +
64521 + return;
64522 +}
64523 +
64524 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
64525 + __attribute__ ((format (printf, 2, 3)));
64526 +
64527 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
64528 +{
64529 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
64530 + unsigned int len = strlen(buf);
64531 + va_list ap;
64532 +
64533 + va_start(ap, msg);
64534 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
64535 + va_end(ap);
64536 +
64537 + return;
64538 +}
64539 +
64540 +static void gr_log_end(int audit, int append_default)
64541 +{
64542 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
64543 +
64544 + if (append_default) {
64545 + unsigned int len = strlen(buf);
64546 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
64547 + }
64548 +
64549 + printk("%s\n", buf);
64550 +
64551 + return;
64552 +}
64553 +
64554 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
64555 +{
64556 + int logtype;
64557 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
64558 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
64559 + void *voidptr = NULL;
64560 + int num1 = 0, num2 = 0;
64561 + unsigned long ulong1 = 0, ulong2 = 0;
64562 + struct dentry *dentry = NULL;
64563 + struct vfsmount *mnt = NULL;
64564 + struct file *file = NULL;
64565 + struct task_struct *task = NULL;
64566 + const struct cred *cred, *pcred;
64567 + va_list ap;
64568 +
64569 + BEGIN_LOCKS(audit);
64570 + logtype = gr_log_start(audit);
64571 + if (logtype == FLOODING) {
64572 + END_LOCKS(audit);
64573 + return;
64574 + }
64575 + va_start(ap, argtypes);
64576 + switch (argtypes) {
64577 + case GR_TTYSNIFF:
64578 + task = va_arg(ap, struct task_struct *);
64579 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
64580 + break;
64581 + case GR_SYSCTL_HIDDEN:
64582 + str1 = va_arg(ap, char *);
64583 + gr_log_middle_varargs(audit, msg, result, str1);
64584 + break;
64585 + case GR_RBAC:
64586 + dentry = va_arg(ap, struct dentry *);
64587 + mnt = va_arg(ap, struct vfsmount *);
64588 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
64589 + break;
64590 + case GR_RBAC_STR:
64591 + dentry = va_arg(ap, struct dentry *);
64592 + mnt = va_arg(ap, struct vfsmount *);
64593 + str1 = va_arg(ap, char *);
64594 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
64595 + break;
64596 + case GR_STR_RBAC:
64597 + str1 = va_arg(ap, char *);
64598 + dentry = va_arg(ap, struct dentry *);
64599 + mnt = va_arg(ap, struct vfsmount *);
64600 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
64601 + break;
64602 + case GR_RBAC_MODE2:
64603 + dentry = va_arg(ap, struct dentry *);
64604 + mnt = va_arg(ap, struct vfsmount *);
64605 + str1 = va_arg(ap, char *);
64606 + str2 = va_arg(ap, char *);
64607 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
64608 + break;
64609 + case GR_RBAC_MODE3:
64610 + dentry = va_arg(ap, struct dentry *);
64611 + mnt = va_arg(ap, struct vfsmount *);
64612 + str1 = va_arg(ap, char *);
64613 + str2 = va_arg(ap, char *);
64614 + str3 = va_arg(ap, char *);
64615 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
64616 + break;
64617 + case GR_FILENAME:
64618 + dentry = va_arg(ap, struct dentry *);
64619 + mnt = va_arg(ap, struct vfsmount *);
64620 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
64621 + break;
64622 + case GR_STR_FILENAME:
64623 + str1 = va_arg(ap, char *);
64624 + dentry = va_arg(ap, struct dentry *);
64625 + mnt = va_arg(ap, struct vfsmount *);
64626 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
64627 + break;
64628 + case GR_FILENAME_STR:
64629 + dentry = va_arg(ap, struct dentry *);
64630 + mnt = va_arg(ap, struct vfsmount *);
64631 + str1 = va_arg(ap, char *);
64632 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
64633 + break;
64634 + case GR_FILENAME_TWO_INT:
64635 + dentry = va_arg(ap, struct dentry *);
64636 + mnt = va_arg(ap, struct vfsmount *);
64637 + num1 = va_arg(ap, int);
64638 + num2 = va_arg(ap, int);
64639 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
64640 + break;
64641 + case GR_FILENAME_TWO_INT_STR:
64642 + dentry = va_arg(ap, struct dentry *);
64643 + mnt = va_arg(ap, struct vfsmount *);
64644 + num1 = va_arg(ap, int);
64645 + num2 = va_arg(ap, int);
64646 + str1 = va_arg(ap, char *);
64647 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
64648 + break;
64649 + case GR_TEXTREL:
64650 + file = va_arg(ap, struct file *);
64651 + ulong1 = va_arg(ap, unsigned long);
64652 + ulong2 = va_arg(ap, unsigned long);
64653 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
64654 + break;
64655 + case GR_PTRACE:
64656 + task = va_arg(ap, struct task_struct *);
64657 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
64658 + break;
64659 + case GR_RESOURCE:
64660 + task = va_arg(ap, struct task_struct *);
64661 + cred = __task_cred(task);
64662 + pcred = __task_cred(task->real_parent);
64663 + ulong1 = va_arg(ap, unsigned long);
64664 + str1 = va_arg(ap, char *);
64665 + ulong2 = va_arg(ap, unsigned long);
64666 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
64667 + break;
64668 + case GR_CAP:
64669 + task = va_arg(ap, struct task_struct *);
64670 + cred = __task_cred(task);
64671 + pcred = __task_cred(task->real_parent);
64672 + str1 = va_arg(ap, char *);
64673 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
64674 + break;
64675 + case GR_SIG:
64676 + str1 = va_arg(ap, char *);
64677 + voidptr = va_arg(ap, void *);
64678 + gr_log_middle_varargs(audit, msg, str1, voidptr);
64679 + break;
64680 + case GR_SIG2:
64681 + task = va_arg(ap, struct task_struct *);
64682 + cred = __task_cred(task);
64683 + pcred = __task_cred(task->real_parent);
64684 + num1 = va_arg(ap, int);
64685 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
64686 + break;
64687 + case GR_CRASH1:
64688 + task = va_arg(ap, struct task_struct *);
64689 + cred = __task_cred(task);
64690 + pcred = __task_cred(task->real_parent);
64691 + ulong1 = va_arg(ap, unsigned long);
64692 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
64693 + break;
64694 + case GR_CRASH2:
64695 + task = va_arg(ap, struct task_struct *);
64696 + cred = __task_cred(task);
64697 + pcred = __task_cred(task->real_parent);
64698 + ulong1 = va_arg(ap, unsigned long);
64699 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
64700 + break;
64701 + case GR_RWXMAP:
64702 + file = va_arg(ap, struct file *);
64703 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
64704 + break;
64705 + case GR_PSACCT:
64706 + {
64707 + unsigned int wday, cday;
64708 + __u8 whr, chr;
64709 + __u8 wmin, cmin;
64710 + __u8 wsec, csec;
64711 + char cur_tty[64] = { 0 };
64712 + char parent_tty[64] = { 0 };
64713 +
64714 + task = va_arg(ap, struct task_struct *);
64715 + wday = va_arg(ap, unsigned int);
64716 + cday = va_arg(ap, unsigned int);
64717 + whr = va_arg(ap, int);
64718 + chr = va_arg(ap, int);
64719 + wmin = va_arg(ap, int);
64720 + cmin = va_arg(ap, int);
64721 + wsec = va_arg(ap, int);
64722 + csec = va_arg(ap, int);
64723 + ulong1 = va_arg(ap, unsigned long);
64724 + cred = __task_cred(task);
64725 + pcred = __task_cred(task->real_parent);
64726 +
64727 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
64728 + }
64729 + break;
64730 + default:
64731 + gr_log_middle(audit, msg, ap);
64732 + }
64733 + va_end(ap);
64734 + // these don't need DEFAULTSECARGS printed on the end
64735 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
64736 + gr_log_end(audit, 0);
64737 + else
64738 + gr_log_end(audit, 1);
64739 + END_LOCKS(audit);
64740 +}
64741 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
64742 new file mode 100644
64743 index 0000000..f536303
64744 --- /dev/null
64745 +++ b/grsecurity/grsec_mem.c
64746 @@ -0,0 +1,40 @@
64747 +#include <linux/kernel.h>
64748 +#include <linux/sched.h>
64749 +#include <linux/mm.h>
64750 +#include <linux/mman.h>
64751 +#include <linux/grinternal.h>
64752 +
64753 +void
64754 +gr_handle_ioperm(void)
64755 +{
64756 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
64757 + return;
64758 +}
64759 +
64760 +void
64761 +gr_handle_iopl(void)
64762 +{
64763 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
64764 + return;
64765 +}
64766 +
64767 +void
64768 +gr_handle_mem_readwrite(u64 from, u64 to)
64769 +{
64770 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
64771 + return;
64772 +}
64773 +
64774 +void
64775 +gr_handle_vm86(void)
64776 +{
64777 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
64778 + return;
64779 +}
64780 +
64781 +void
64782 +gr_log_badprocpid(const char *entry)
64783 +{
64784 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
64785 + return;
64786 +}
64787 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
64788 new file mode 100644
64789 index 0000000..2131422
64790 --- /dev/null
64791 +++ b/grsecurity/grsec_mount.c
64792 @@ -0,0 +1,62 @@
64793 +#include <linux/kernel.h>
64794 +#include <linux/sched.h>
64795 +#include <linux/mount.h>
64796 +#include <linux/grsecurity.h>
64797 +#include <linux/grinternal.h>
64798 +
64799 +void
64800 +gr_log_remount(const char *devname, const int retval)
64801 +{
64802 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64803 + if (grsec_enable_mount && (retval >= 0))
64804 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
64805 +#endif
64806 + return;
64807 +}
64808 +
64809 +void
64810 +gr_log_unmount(const char *devname, const int retval)
64811 +{
64812 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64813 + if (grsec_enable_mount && (retval >= 0))
64814 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
64815 +#endif
64816 + return;
64817 +}
64818 +
64819 +void
64820 +gr_log_mount(const char *from, const char *to, const int retval)
64821 +{
64822 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64823 + if (grsec_enable_mount && (retval >= 0))
64824 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
64825 +#endif
64826 + return;
64827 +}
64828 +
64829 +int
64830 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
64831 +{
64832 +#ifdef CONFIG_GRKERNSEC_ROFS
64833 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
64834 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
64835 + return -EPERM;
64836 + } else
64837 + return 0;
64838 +#endif
64839 + return 0;
64840 +}
64841 +
64842 +int
64843 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
64844 +{
64845 +#ifdef CONFIG_GRKERNSEC_ROFS
64846 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
64847 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
64848 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
64849 + return -EPERM;
64850 + } else
64851 + return 0;
64852 +#endif
64853 + return 0;
64854 +}
64855 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
64856 new file mode 100644
64857 index 0000000..a3b12a0
64858 --- /dev/null
64859 +++ b/grsecurity/grsec_pax.c
64860 @@ -0,0 +1,36 @@
64861 +#include <linux/kernel.h>
64862 +#include <linux/sched.h>
64863 +#include <linux/mm.h>
64864 +#include <linux/file.h>
64865 +#include <linux/grinternal.h>
64866 +#include <linux/grsecurity.h>
64867 +
64868 +void
64869 +gr_log_textrel(struct vm_area_struct * vma)
64870 +{
64871 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
64872 + if (grsec_enable_audit_textrel)
64873 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
64874 +#endif
64875 + return;
64876 +}
64877 +
64878 +void
64879 +gr_log_rwxmmap(struct file *file)
64880 +{
64881 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64882 + if (grsec_enable_log_rwxmaps)
64883 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
64884 +#endif
64885 + return;
64886 +}
64887 +
64888 +void
64889 +gr_log_rwxmprotect(struct file *file)
64890 +{
64891 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64892 + if (grsec_enable_log_rwxmaps)
64893 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
64894 +#endif
64895 + return;
64896 +}
64897 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
64898 new file mode 100644
64899 index 0000000..78f8733
64900 --- /dev/null
64901 +++ b/grsecurity/grsec_ptrace.c
64902 @@ -0,0 +1,30 @@
64903 +#include <linux/kernel.h>
64904 +#include <linux/sched.h>
64905 +#include <linux/grinternal.h>
64906 +#include <linux/security.h>
64907 +
64908 +void
64909 +gr_audit_ptrace(struct task_struct *task)
64910 +{
64911 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
64912 + if (grsec_enable_audit_ptrace)
64913 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
64914 +#endif
64915 + return;
64916 +}
64917 +
64918 +int
64919 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
64920 +{
64921 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
64922 + const struct dentry *dentry = file->f_path.dentry;
64923 + const struct vfsmount *mnt = file->f_path.mnt;
64924 +
64925 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
64926 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
64927 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
64928 + return -EACCES;
64929 + }
64930 +#endif
64931 + return 0;
64932 +}
64933 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
64934 new file mode 100644
64935 index 0000000..c648492
64936 --- /dev/null
64937 +++ b/grsecurity/grsec_sig.c
64938 @@ -0,0 +1,206 @@
64939 +#include <linux/kernel.h>
64940 +#include <linux/sched.h>
64941 +#include <linux/delay.h>
64942 +#include <linux/grsecurity.h>
64943 +#include <linux/grinternal.h>
64944 +#include <linux/hardirq.h>
64945 +
64946 +char *signames[] = {
64947 + [SIGSEGV] = "Segmentation fault",
64948 + [SIGILL] = "Illegal instruction",
64949 + [SIGABRT] = "Abort",
64950 + [SIGBUS] = "Invalid alignment/Bus error"
64951 +};
64952 +
64953 +void
64954 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
64955 +{
64956 +#ifdef CONFIG_GRKERNSEC_SIGNAL
64957 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
64958 + (sig == SIGABRT) || (sig == SIGBUS))) {
64959 + if (t->pid == current->pid) {
64960 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
64961 + } else {
64962 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
64963 + }
64964 + }
64965 +#endif
64966 + return;
64967 +}
64968 +
64969 +int
64970 +gr_handle_signal(const struct task_struct *p, const int sig)
64971 +{
64972 +#ifdef CONFIG_GRKERNSEC
64973 + /* ignore the 0 signal for protected task checks */
64974 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
64975 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
64976 + return -EPERM;
64977 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
64978 + return -EPERM;
64979 + }
64980 +#endif
64981 + return 0;
64982 +}
64983 +
64984 +#ifdef CONFIG_GRKERNSEC
64985 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
64986 +
64987 +int gr_fake_force_sig(int sig, struct task_struct *t)
64988 +{
64989 + unsigned long int flags;
64990 + int ret, blocked, ignored;
64991 + struct k_sigaction *action;
64992 +
64993 + spin_lock_irqsave(&t->sighand->siglock, flags);
64994 + action = &t->sighand->action[sig-1];
64995 + ignored = action->sa.sa_handler == SIG_IGN;
64996 + blocked = sigismember(&t->blocked, sig);
64997 + if (blocked || ignored) {
64998 + action->sa.sa_handler = SIG_DFL;
64999 + if (blocked) {
65000 + sigdelset(&t->blocked, sig);
65001 + recalc_sigpending_and_wake(t);
65002 + }
65003 + }
65004 + if (action->sa.sa_handler == SIG_DFL)
65005 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
65006 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
65007 +
65008 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
65009 +
65010 + return ret;
65011 +}
65012 +#endif
65013 +
65014 +#ifdef CONFIG_GRKERNSEC_BRUTE
65015 +#define GR_USER_BAN_TIME (15 * 60)
65016 +
65017 +static int __get_dumpable(unsigned long mm_flags)
65018 +{
65019 + int ret;
65020 +
65021 + ret = mm_flags & MMF_DUMPABLE_MASK;
65022 + return (ret >= 2) ? 2 : ret;
65023 +}
65024 +#endif
65025 +
65026 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
65027 +{
65028 +#ifdef CONFIG_GRKERNSEC_BRUTE
65029 + uid_t uid = 0;
65030 +
65031 + if (!grsec_enable_brute)
65032 + return;
65033 +
65034 + rcu_read_lock();
65035 + read_lock(&tasklist_lock);
65036 + read_lock(&grsec_exec_file_lock);
65037 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
65038 + p->real_parent->brute = 1;
65039 + else {
65040 + const struct cred *cred = __task_cred(p), *cred2;
65041 + struct task_struct *tsk, *tsk2;
65042 +
65043 + if (!__get_dumpable(mm_flags) && cred->uid) {
65044 + struct user_struct *user;
65045 +
65046 + uid = cred->uid;
65047 +
65048 + /* this is put upon execution past expiration */
65049 + user = find_user(uid);
65050 + if (user == NULL)
65051 + goto unlock;
65052 + user->banned = 1;
65053 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
65054 + if (user->ban_expires == ~0UL)
65055 + user->ban_expires--;
65056 +
65057 + do_each_thread(tsk2, tsk) {
65058 + cred2 = __task_cred(tsk);
65059 + if (tsk != p && cred2->uid == uid)
65060 + gr_fake_force_sig(SIGKILL, tsk);
65061 + } while_each_thread(tsk2, tsk);
65062 + }
65063 + }
65064 +unlock:
65065 + read_unlock(&grsec_exec_file_lock);
65066 + read_unlock(&tasklist_lock);
65067 + rcu_read_unlock();
65068 +
65069 + if (uid)
65070 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
65071 +#endif
65072 + return;
65073 +}
65074 +
65075 +void gr_handle_brute_check(void)
65076 +{
65077 +#ifdef CONFIG_GRKERNSEC_BRUTE
65078 + if (current->brute)
65079 + msleep(30 * 1000);
65080 +#endif
65081 + return;
65082 +}
65083 +
65084 +void gr_handle_kernel_exploit(void)
65085 +{
65086 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
65087 + const struct cred *cred;
65088 + struct task_struct *tsk, *tsk2;
65089 + struct user_struct *user;
65090 + uid_t uid;
65091 +
65092 + if (in_irq() || in_serving_softirq() || in_nmi())
65093 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
65094 +
65095 + uid = current_uid();
65096 +
65097 + if (uid == 0)
65098 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
65099 + else {
65100 + /* kill all the processes of this user, hold a reference
65101 + to their creds struct, and prevent them from creating
65102 + another process until system reset
65103 + */
65104 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
65105 + /* we intentionally leak this ref */
65106 + user = get_uid(current->cred->user);
65107 + if (user) {
65108 + user->banned = 1;
65109 + user->ban_expires = ~0UL;
65110 + }
65111 +
65112 + read_lock(&tasklist_lock);
65113 + do_each_thread(tsk2, tsk) {
65114 + cred = __task_cred(tsk);
65115 + if (cred->uid == uid)
65116 + gr_fake_force_sig(SIGKILL, tsk);
65117 + } while_each_thread(tsk2, tsk);
65118 + read_unlock(&tasklist_lock);
65119 + }
65120 +#endif
65121 +}
65122 +
65123 +int __gr_process_user_ban(struct user_struct *user)
65124 +{
65125 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
65126 + if (unlikely(user->banned)) {
65127 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
65128 + user->banned = 0;
65129 + user->ban_expires = 0;
65130 + free_uid(user);
65131 + } else
65132 + return -EPERM;
65133 + }
65134 +#endif
65135 + return 0;
65136 +}
65137 +
65138 +int gr_process_user_ban(void)
65139 +{
65140 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
65141 + return __gr_process_user_ban(current->cred->user);
65142 +#endif
65143 + return 0;
65144 +}
65145 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
65146 new file mode 100644
65147 index 0000000..7512ea9
65148 --- /dev/null
65149 +++ b/grsecurity/grsec_sock.c
65150 @@ -0,0 +1,275 @@
65151 +#include <linux/kernel.h>
65152 +#include <linux/module.h>
65153 +#include <linux/sched.h>
65154 +#include <linux/file.h>
65155 +#include <linux/net.h>
65156 +#include <linux/in.h>
65157 +#include <linux/ip.h>
65158 +#include <net/sock.h>
65159 +#include <net/inet_sock.h>
65160 +#include <linux/grsecurity.h>
65161 +#include <linux/grinternal.h>
65162 +#include <linux/gracl.h>
65163 +
65164 +kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
65165 +EXPORT_SYMBOL(gr_cap_rtnetlink);
65166 +
65167 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
65168 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
65169 +
65170 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
65171 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
65172 +
65173 +#ifdef CONFIG_UNIX_MODULE
65174 +EXPORT_SYMBOL(gr_acl_handle_unix);
65175 +EXPORT_SYMBOL(gr_acl_handle_mknod);
65176 +EXPORT_SYMBOL(gr_handle_chroot_unix);
65177 +EXPORT_SYMBOL(gr_handle_create);
65178 +#endif
65179 +
65180 +#ifdef CONFIG_GRKERNSEC
65181 +#define gr_conn_table_size 32749
65182 +struct conn_table_entry {
65183 + struct conn_table_entry *next;
65184 + struct signal_struct *sig;
65185 +};
65186 +
65187 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
65188 +DEFINE_SPINLOCK(gr_conn_table_lock);
65189 +
65190 +extern const char * gr_socktype_to_name(unsigned char type);
65191 +extern const char * gr_proto_to_name(unsigned char proto);
65192 +extern const char * gr_sockfamily_to_name(unsigned char family);
65193 +
65194 +static __inline__ int
65195 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
65196 +{
65197 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
65198 +}
65199 +
65200 +static __inline__ int
65201 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
65202 + __u16 sport, __u16 dport)
65203 +{
65204 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
65205 + sig->gr_sport == sport && sig->gr_dport == dport))
65206 + return 1;
65207 + else
65208 + return 0;
65209 +}
65210 +
65211 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
65212 +{
65213 + struct conn_table_entry **match;
65214 + unsigned int index;
65215 +
65216 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
65217 + sig->gr_sport, sig->gr_dport,
65218 + gr_conn_table_size);
65219 +
65220 + newent->sig = sig;
65221 +
65222 + match = &gr_conn_table[index];
65223 + newent->next = *match;
65224 + *match = newent;
65225 +
65226 + return;
65227 +}
65228 +
65229 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
65230 +{
65231 + struct conn_table_entry *match, *last = NULL;
65232 + unsigned int index;
65233 +
65234 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
65235 + sig->gr_sport, sig->gr_dport,
65236 + gr_conn_table_size);
65237 +
65238 + match = gr_conn_table[index];
65239 + while (match && !conn_match(match->sig,
65240 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
65241 + sig->gr_dport)) {
65242 + last = match;
65243 + match = match->next;
65244 + }
65245 +
65246 + if (match) {
65247 + if (last)
65248 + last->next = match->next;
65249 + else
65250 + gr_conn_table[index] = NULL;
65251 + kfree(match);
65252 + }
65253 +
65254 + return;
65255 +}
65256 +
65257 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
65258 + __u16 sport, __u16 dport)
65259 +{
65260 + struct conn_table_entry *match;
65261 + unsigned int index;
65262 +
65263 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
65264 +
65265 + match = gr_conn_table[index];
65266 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
65267 + match = match->next;
65268 +
65269 + if (match)
65270 + return match->sig;
65271 + else
65272 + return NULL;
65273 +}
65274 +
65275 +#endif
65276 +
65277 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
65278 +{
65279 +#ifdef CONFIG_GRKERNSEC
65280 + struct signal_struct *sig = task->signal;
65281 + struct conn_table_entry *newent;
65282 +
65283 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
65284 + if (newent == NULL)
65285 + return;
65286 + /* no bh lock needed since we are called with bh disabled */
65287 + spin_lock(&gr_conn_table_lock);
65288 + gr_del_task_from_ip_table_nolock(sig);
65289 + sig->gr_saddr = inet->rcv_saddr;
65290 + sig->gr_daddr = inet->daddr;
65291 + sig->gr_sport = inet->sport;
65292 + sig->gr_dport = inet->dport;
65293 + gr_add_to_task_ip_table_nolock(sig, newent);
65294 + spin_unlock(&gr_conn_table_lock);
65295 +#endif
65296 + return;
65297 +}
65298 +
65299 +void gr_del_task_from_ip_table(struct task_struct *task)
65300 +{
65301 +#ifdef CONFIG_GRKERNSEC
65302 + spin_lock_bh(&gr_conn_table_lock);
65303 + gr_del_task_from_ip_table_nolock(task->signal);
65304 + spin_unlock_bh(&gr_conn_table_lock);
65305 +#endif
65306 + return;
65307 +}
65308 +
65309 +void
65310 +gr_attach_curr_ip(const struct sock *sk)
65311 +{
65312 +#ifdef CONFIG_GRKERNSEC
65313 + struct signal_struct *p, *set;
65314 + const struct inet_sock *inet = inet_sk(sk);
65315 +
65316 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
65317 + return;
65318 +
65319 + set = current->signal;
65320 +
65321 + spin_lock_bh(&gr_conn_table_lock);
65322 + p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
65323 + inet->dport, inet->sport);
65324 + if (unlikely(p != NULL)) {
65325 + set->curr_ip = p->curr_ip;
65326 + set->used_accept = 1;
65327 + gr_del_task_from_ip_table_nolock(p);
65328 + spin_unlock_bh(&gr_conn_table_lock);
65329 + return;
65330 + }
65331 + spin_unlock_bh(&gr_conn_table_lock);
65332 +
65333 + set->curr_ip = inet->daddr;
65334 + set->used_accept = 1;
65335 +#endif
65336 + return;
65337 +}
65338 +
65339 +int
65340 +gr_handle_sock_all(const int family, const int type, const int protocol)
65341 +{
65342 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
65343 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
65344 + (family != AF_UNIX)) {
65345 + if (family == AF_INET)
65346 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
65347 + else
65348 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
65349 + return -EACCES;
65350 + }
65351 +#endif
65352 + return 0;
65353 +}
65354 +
65355 +int
65356 +gr_handle_sock_server(const struct sockaddr *sck)
65357 +{
65358 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
65359 + if (grsec_enable_socket_server &&
65360 + in_group_p(grsec_socket_server_gid) &&
65361 + sck && (sck->sa_family != AF_UNIX) &&
65362 + (sck->sa_family != AF_LOCAL)) {
65363 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
65364 + return -EACCES;
65365 + }
65366 +#endif
65367 + return 0;
65368 +}
65369 +
65370 +int
65371 +gr_handle_sock_server_other(const struct sock *sck)
65372 +{
65373 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
65374 + if (grsec_enable_socket_server &&
65375 + in_group_p(grsec_socket_server_gid) &&
65376 + sck && (sck->sk_family != AF_UNIX) &&
65377 + (sck->sk_family != AF_LOCAL)) {
65378 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
65379 + return -EACCES;
65380 + }
65381 +#endif
65382 + return 0;
65383 +}
65384 +
65385 +int
65386 +gr_handle_sock_client(const struct sockaddr *sck)
65387 +{
65388 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
65389 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
65390 + sck && (sck->sa_family != AF_UNIX) &&
65391 + (sck->sa_family != AF_LOCAL)) {
65392 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
65393 + return -EACCES;
65394 + }
65395 +#endif
65396 + return 0;
65397 +}
65398 +
65399 +kernel_cap_t
65400 +gr_cap_rtnetlink(struct sock *sock)
65401 +{
65402 +#ifdef CONFIG_GRKERNSEC
65403 + if (!gr_acl_is_enabled())
65404 + return current_cap();
65405 + else if (sock->sk_protocol == NETLINK_ISCSI &&
65406 + cap_raised(current_cap(), CAP_SYS_ADMIN) &&
65407 + gr_is_capable(CAP_SYS_ADMIN))
65408 + return current_cap();
65409 + else if (sock->sk_protocol == NETLINK_AUDIT &&
65410 + cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
65411 + gr_is_capable(CAP_AUDIT_WRITE) &&
65412 + cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
65413 + gr_is_capable(CAP_AUDIT_CONTROL))
65414 + return current_cap();
65415 + else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
65416 + ((sock->sk_protocol == NETLINK_ROUTE) ?
65417 + gr_is_capable_nolog(CAP_NET_ADMIN) :
65418 + gr_is_capable(CAP_NET_ADMIN)))
65419 + return current_cap();
65420 + else
65421 + return __cap_empty_set;
65422 +#else
65423 + return current_cap();
65424 +#endif
65425 +}
65426 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
65427 new file mode 100644
65428 index 0000000..31f3258
65429 --- /dev/null
65430 +++ b/grsecurity/grsec_sysctl.c
65431 @@ -0,0 +1,499 @@
65432 +#include <linux/kernel.h>
65433 +#include <linux/sched.h>
65434 +#include <linux/sysctl.h>
65435 +#include <linux/grsecurity.h>
65436 +#include <linux/grinternal.h>
65437 +
65438 +int
65439 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
65440 +{
65441 +#ifdef CONFIG_GRKERNSEC_SYSCTL
65442 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
65443 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
65444 + return -EACCES;
65445 + }
65446 +#endif
65447 + return 0;
65448 +}
65449 +
65450 +#ifdef CONFIG_GRKERNSEC_ROFS
65451 +static int __maybe_unused one = 1;
65452 +#endif
65453 +
65454 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
65455 +ctl_table grsecurity_table[] = {
65456 +#ifdef CONFIG_GRKERNSEC_SYSCTL
65457 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
65458 +#ifdef CONFIG_GRKERNSEC_IO
65459 + {
65460 + .ctl_name = CTL_UNNUMBERED,
65461 + .procname = "disable_priv_io",
65462 + .data = &grsec_disable_privio,
65463 + .maxlen = sizeof(int),
65464 + .mode = 0600,
65465 + .proc_handler = &proc_dointvec,
65466 + },
65467 +#endif
65468 +#endif
65469 +#ifdef CONFIG_GRKERNSEC_LINK
65470 + {
65471 + .ctl_name = CTL_UNNUMBERED,
65472 + .procname = "linking_restrictions",
65473 + .data = &grsec_enable_link,
65474 + .maxlen = sizeof(int),
65475 + .mode = 0600,
65476 + .proc_handler = &proc_dointvec,
65477 + },
65478 +#endif
65479 +#ifdef CONFIG_GRKERNSEC_BRUTE
65480 + {
65481 + .ctl_name = CTL_UNNUMBERED,
65482 + .procname = "deter_bruteforce",
65483 + .data = &grsec_enable_brute,
65484 + .maxlen = sizeof(int),
65485 + .mode = 0600,
65486 + .proc_handler = &proc_dointvec,
65487 + },
65488 +#endif
65489 +#ifdef CONFIG_GRKERNSEC_FIFO
65490 + {
65491 + .ctl_name = CTL_UNNUMBERED,
65492 + .procname = "fifo_restrictions",
65493 + .data = &grsec_enable_fifo,
65494 + .maxlen = sizeof(int),
65495 + .mode = 0600,
65496 + .proc_handler = &proc_dointvec,
65497 + },
65498 +#endif
65499 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
65500 + {
65501 + .ctl_name = CTL_UNNUMBERED,
65502 + .procname = "ptrace_readexec",
65503 + .data = &grsec_enable_ptrace_readexec,
65504 + .maxlen = sizeof(int),
65505 + .mode = 0600,
65506 + .proc_handler = &proc_dointvec,
65507 + },
65508 +#endif
65509 +#ifdef CONFIG_GRKERNSEC_SETXID
65510 + {
65511 + .ctl_name = CTL_UNNUMBERED,
65512 + .procname = "consistent_setxid",
65513 + .data = &grsec_enable_setxid,
65514 + .maxlen = sizeof(int),
65515 + .mode = 0600,
65516 + .proc_handler = &proc_dointvec,
65517 + },
65518 +#endif
65519 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65520 + {
65521 + .ctl_name = CTL_UNNUMBERED,
65522 + .procname = "ip_blackhole",
65523 + .data = &grsec_enable_blackhole,
65524 + .maxlen = sizeof(int),
65525 + .mode = 0600,
65526 + .proc_handler = &proc_dointvec,
65527 + },
65528 + {
65529 + .ctl_name = CTL_UNNUMBERED,
65530 + .procname = "lastack_retries",
65531 + .data = &grsec_lastack_retries,
65532 + .maxlen = sizeof(int),
65533 + .mode = 0600,
65534 + .proc_handler = &proc_dointvec,
65535 + },
65536 +#endif
65537 +#ifdef CONFIG_GRKERNSEC_EXECLOG
65538 + {
65539 + .ctl_name = CTL_UNNUMBERED,
65540 + .procname = "exec_logging",
65541 + .data = &grsec_enable_execlog,
65542 + .maxlen = sizeof(int),
65543 + .mode = 0600,
65544 + .proc_handler = &proc_dointvec,
65545 + },
65546 +#endif
65547 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
65548 + {
65549 + .ctl_name = CTL_UNNUMBERED,
65550 + .procname = "rwxmap_logging",
65551 + .data = &grsec_enable_log_rwxmaps,
65552 + .maxlen = sizeof(int),
65553 + .mode = 0600,
65554 + .proc_handler = &proc_dointvec,
65555 + },
65556 +#endif
65557 +#ifdef CONFIG_GRKERNSEC_SIGNAL
65558 + {
65559 + .ctl_name = CTL_UNNUMBERED,
65560 + .procname = "signal_logging",
65561 + .data = &grsec_enable_signal,
65562 + .maxlen = sizeof(int),
65563 + .mode = 0600,
65564 + .proc_handler = &proc_dointvec,
65565 + },
65566 +#endif
65567 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
65568 + {
65569 + .ctl_name = CTL_UNNUMBERED,
65570 + .procname = "forkfail_logging",
65571 + .data = &grsec_enable_forkfail,
65572 + .maxlen = sizeof(int),
65573 + .mode = 0600,
65574 + .proc_handler = &proc_dointvec,
65575 + },
65576 +#endif
65577 +#ifdef CONFIG_GRKERNSEC_TIME
65578 + {
65579 + .ctl_name = CTL_UNNUMBERED,
65580 + .procname = "timechange_logging",
65581 + .data = &grsec_enable_time,
65582 + .maxlen = sizeof(int),
65583 + .mode = 0600,
65584 + .proc_handler = &proc_dointvec,
65585 + },
65586 +#endif
65587 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
65588 + {
65589 + .ctl_name = CTL_UNNUMBERED,
65590 + .procname = "chroot_deny_shmat",
65591 + .data = &grsec_enable_chroot_shmat,
65592 + .maxlen = sizeof(int),
65593 + .mode = 0600,
65594 + .proc_handler = &proc_dointvec,
65595 + },
65596 +#endif
65597 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
65598 + {
65599 + .ctl_name = CTL_UNNUMBERED,
65600 + .procname = "chroot_deny_unix",
65601 + .data = &grsec_enable_chroot_unix,
65602 + .maxlen = sizeof(int),
65603 + .mode = 0600,
65604 + .proc_handler = &proc_dointvec,
65605 + },
65606 +#endif
65607 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
65608 + {
65609 + .ctl_name = CTL_UNNUMBERED,
65610 + .procname = "chroot_deny_mount",
65611 + .data = &grsec_enable_chroot_mount,
65612 + .maxlen = sizeof(int),
65613 + .mode = 0600,
65614 + .proc_handler = &proc_dointvec,
65615 + },
65616 +#endif
65617 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
65618 + {
65619 + .ctl_name = CTL_UNNUMBERED,
65620 + .procname = "chroot_deny_fchdir",
65621 + .data = &grsec_enable_chroot_fchdir,
65622 + .maxlen = sizeof(int),
65623 + .mode = 0600,
65624 + .proc_handler = &proc_dointvec,
65625 + },
65626 +#endif
65627 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
65628 + {
65629 + .ctl_name = CTL_UNNUMBERED,
65630 + .procname = "chroot_deny_chroot",
65631 + .data = &grsec_enable_chroot_double,
65632 + .maxlen = sizeof(int),
65633 + .mode = 0600,
65634 + .proc_handler = &proc_dointvec,
65635 + },
65636 +#endif
65637 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
65638 + {
65639 + .ctl_name = CTL_UNNUMBERED,
65640 + .procname = "chroot_deny_pivot",
65641 + .data = &grsec_enable_chroot_pivot,
65642 + .maxlen = sizeof(int),
65643 + .mode = 0600,
65644 + .proc_handler = &proc_dointvec,
65645 + },
65646 +#endif
65647 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
65648 + {
65649 + .ctl_name = CTL_UNNUMBERED,
65650 + .procname = "chroot_enforce_chdir",
65651 + .data = &grsec_enable_chroot_chdir,
65652 + .maxlen = sizeof(int),
65653 + .mode = 0600,
65654 + .proc_handler = &proc_dointvec,
65655 + },
65656 +#endif
65657 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
65658 + {
65659 + .ctl_name = CTL_UNNUMBERED,
65660 + .procname = "chroot_deny_chmod",
65661 + .data = &grsec_enable_chroot_chmod,
65662 + .maxlen = sizeof(int),
65663 + .mode = 0600,
65664 + .proc_handler = &proc_dointvec,
65665 + },
65666 +#endif
65667 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
65668 + {
65669 + .ctl_name = CTL_UNNUMBERED,
65670 + .procname = "chroot_deny_mknod",
65671 + .data = &grsec_enable_chroot_mknod,
65672 + .maxlen = sizeof(int),
65673 + .mode = 0600,
65674 + .proc_handler = &proc_dointvec,
65675 + },
65676 +#endif
65677 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
65678 + {
65679 + .ctl_name = CTL_UNNUMBERED,
65680 + .procname = "chroot_restrict_nice",
65681 + .data = &grsec_enable_chroot_nice,
65682 + .maxlen = sizeof(int),
65683 + .mode = 0600,
65684 + .proc_handler = &proc_dointvec,
65685 + },
65686 +#endif
65687 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
65688 + {
65689 + .ctl_name = CTL_UNNUMBERED,
65690 + .procname = "chroot_execlog",
65691 + .data = &grsec_enable_chroot_execlog,
65692 + .maxlen = sizeof(int),
65693 + .mode = 0600,
65694 + .proc_handler = &proc_dointvec,
65695 + },
65696 +#endif
65697 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
65698 + {
65699 + .ctl_name = CTL_UNNUMBERED,
65700 + .procname = "chroot_caps",
65701 + .data = &grsec_enable_chroot_caps,
65702 + .maxlen = sizeof(int),
65703 + .mode = 0600,
65704 + .proc_handler = &proc_dointvec,
65705 + },
65706 +#endif
65707 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
65708 + {
65709 + .ctl_name = CTL_UNNUMBERED,
65710 + .procname = "chroot_deny_sysctl",
65711 + .data = &grsec_enable_chroot_sysctl,
65712 + .maxlen = sizeof(int),
65713 + .mode = 0600,
65714 + .proc_handler = &proc_dointvec,
65715 + },
65716 +#endif
65717 +#ifdef CONFIG_GRKERNSEC_TPE
65718 + {
65719 + .ctl_name = CTL_UNNUMBERED,
65720 + .procname = "tpe",
65721 + .data = &grsec_enable_tpe,
65722 + .maxlen = sizeof(int),
65723 + .mode = 0600,
65724 + .proc_handler = &proc_dointvec,
65725 + },
65726 + {
65727 + .ctl_name = CTL_UNNUMBERED,
65728 + .procname = "tpe_gid",
65729 + .data = &grsec_tpe_gid,
65730 + .maxlen = sizeof(int),
65731 + .mode = 0600,
65732 + .proc_handler = &proc_dointvec,
65733 + },
65734 +#endif
65735 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
65736 + {
65737 + .ctl_name = CTL_UNNUMBERED,
65738 + .procname = "tpe_invert",
65739 + .data = &grsec_enable_tpe_invert,
65740 + .maxlen = sizeof(int),
65741 + .mode = 0600,
65742 + .proc_handler = &proc_dointvec,
65743 + },
65744 +#endif
65745 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
65746 + {
65747 + .ctl_name = CTL_UNNUMBERED,
65748 + .procname = "tpe_restrict_all",
65749 + .data = &grsec_enable_tpe_all,
65750 + .maxlen = sizeof(int),
65751 + .mode = 0600,
65752 + .proc_handler = &proc_dointvec,
65753 + },
65754 +#endif
65755 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
65756 + {
65757 + .ctl_name = CTL_UNNUMBERED,
65758 + .procname = "socket_all",
65759 + .data = &grsec_enable_socket_all,
65760 + .maxlen = sizeof(int),
65761 + .mode = 0600,
65762 + .proc_handler = &proc_dointvec,
65763 + },
65764 + {
65765 + .ctl_name = CTL_UNNUMBERED,
65766 + .procname = "socket_all_gid",
65767 + .data = &grsec_socket_all_gid,
65768 + .maxlen = sizeof(int),
65769 + .mode = 0600,
65770 + .proc_handler = &proc_dointvec,
65771 + },
65772 +#endif
65773 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
65774 + {
65775 + .ctl_name = CTL_UNNUMBERED,
65776 + .procname = "socket_client",
65777 + .data = &grsec_enable_socket_client,
65778 + .maxlen = sizeof(int),
65779 + .mode = 0600,
65780 + .proc_handler = &proc_dointvec,
65781 + },
65782 + {
65783 + .ctl_name = CTL_UNNUMBERED,
65784 + .procname = "socket_client_gid",
65785 + .data = &grsec_socket_client_gid,
65786 + .maxlen = sizeof(int),
65787 + .mode = 0600,
65788 + .proc_handler = &proc_dointvec,
65789 + },
65790 +#endif
65791 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
65792 + {
65793 + .ctl_name = CTL_UNNUMBERED,
65794 + .procname = "socket_server",
65795 + .data = &grsec_enable_socket_server,
65796 + .maxlen = sizeof(int),
65797 + .mode = 0600,
65798 + .proc_handler = &proc_dointvec,
65799 + },
65800 + {
65801 + .ctl_name = CTL_UNNUMBERED,
65802 + .procname = "socket_server_gid",
65803 + .data = &grsec_socket_server_gid,
65804 + .maxlen = sizeof(int),
65805 + .mode = 0600,
65806 + .proc_handler = &proc_dointvec,
65807 + },
65808 +#endif
65809 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
65810 + {
65811 + .ctl_name = CTL_UNNUMBERED,
65812 + .procname = "audit_group",
65813 + .data = &grsec_enable_group,
65814 + .maxlen = sizeof(int),
65815 + .mode = 0600,
65816 + .proc_handler = &proc_dointvec,
65817 + },
65818 + {
65819 + .ctl_name = CTL_UNNUMBERED,
65820 + .procname = "audit_gid",
65821 + .data = &grsec_audit_gid,
65822 + .maxlen = sizeof(int),
65823 + .mode = 0600,
65824 + .proc_handler = &proc_dointvec,
65825 + },
65826 +#endif
65827 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
65828 + {
65829 + .ctl_name = CTL_UNNUMBERED,
65830 + .procname = "audit_chdir",
65831 + .data = &grsec_enable_chdir,
65832 + .maxlen = sizeof(int),
65833 + .mode = 0600,
65834 + .proc_handler = &proc_dointvec,
65835 + },
65836 +#endif
65837 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65838 + {
65839 + .ctl_name = CTL_UNNUMBERED,
65840 + .procname = "audit_mount",
65841 + .data = &grsec_enable_mount,
65842 + .maxlen = sizeof(int),
65843 + .mode = 0600,
65844 + .proc_handler = &proc_dointvec,
65845 + },
65846 +#endif
65847 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
65848 + {
65849 + .ctl_name = CTL_UNNUMBERED,
65850 + .procname = "audit_textrel",
65851 + .data = &grsec_enable_audit_textrel,
65852 + .maxlen = sizeof(int),
65853 + .mode = 0600,
65854 + .proc_handler = &proc_dointvec,
65855 + },
65856 +#endif
65857 +#ifdef CONFIG_GRKERNSEC_DMESG
65858 + {
65859 + .ctl_name = CTL_UNNUMBERED,
65860 + .procname = "dmesg",
65861 + .data = &grsec_enable_dmesg,
65862 + .maxlen = sizeof(int),
65863 + .mode = 0600,
65864 + .proc_handler = &proc_dointvec,
65865 + },
65866 +#endif
65867 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65868 + {
65869 + .ctl_name = CTL_UNNUMBERED,
65870 + .procname = "chroot_findtask",
65871 + .data = &grsec_enable_chroot_findtask,
65872 + .maxlen = sizeof(int),
65873 + .mode = 0600,
65874 + .proc_handler = &proc_dointvec,
65875 + },
65876 +#endif
65877 +#ifdef CONFIG_GRKERNSEC_RESLOG
65878 + {
65879 + .ctl_name = CTL_UNNUMBERED,
65880 + .procname = "resource_logging",
65881 + .data = &grsec_resource_logging,
65882 + .maxlen = sizeof(int),
65883 + .mode = 0600,
65884 + .proc_handler = &proc_dointvec,
65885 + },
65886 +#endif
65887 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
65888 + {
65889 + .ctl_name = CTL_UNNUMBERED,
65890 + .procname = "audit_ptrace",
65891 + .data = &grsec_enable_audit_ptrace,
65892 + .maxlen = sizeof(int),
65893 + .mode = 0600,
65894 + .proc_handler = &proc_dointvec,
65895 + },
65896 +#endif
65897 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
65898 + {
65899 + .ctl_name = CTL_UNNUMBERED,
65900 + .procname = "harden_ptrace",
65901 + .data = &grsec_enable_harden_ptrace,
65902 + .maxlen = sizeof(int),
65903 + .mode = 0600,
65904 + .proc_handler = &proc_dointvec,
65905 + },
65906 +#endif
65907 + {
65908 + .ctl_name = CTL_UNNUMBERED,
65909 + .procname = "grsec_lock",
65910 + .data = &grsec_lock,
65911 + .maxlen = sizeof(int),
65912 + .mode = 0600,
65913 + .proc_handler = &proc_dointvec,
65914 + },
65915 +#endif
65916 +#ifdef CONFIG_GRKERNSEC_ROFS
65917 + {
65918 + .ctl_name = CTL_UNNUMBERED,
65919 + .procname = "romount_protect",
65920 + .data = &grsec_enable_rofs,
65921 + .maxlen = sizeof(int),
65922 + .mode = 0600,
65923 + .proc_handler = &proc_dointvec_minmax,
65924 + .extra1 = &one,
65925 + .extra2 = &one,
65926 + },
65927 +#endif
65928 + { .ctl_name = 0 }
65929 +};
65930 +#endif
65931 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
65932 new file mode 100644
65933 index 0000000..0dc13c3
65934 --- /dev/null
65935 +++ b/grsecurity/grsec_time.c
65936 @@ -0,0 +1,16 @@
65937 +#include <linux/kernel.h>
65938 +#include <linux/sched.h>
65939 +#include <linux/grinternal.h>
65940 +#include <linux/module.h>
65941 +
65942 +void
65943 +gr_log_timechange(void)
65944 +{
65945 +#ifdef CONFIG_GRKERNSEC_TIME
65946 + if (grsec_enable_time)
65947 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
65948 +#endif
65949 + return;
65950 +}
65951 +
65952 +EXPORT_SYMBOL(gr_log_timechange);
65953 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
65954 new file mode 100644
65955 index 0000000..07e0dc0
65956 --- /dev/null
65957 +++ b/grsecurity/grsec_tpe.c
65958 @@ -0,0 +1,73 @@
65959 +#include <linux/kernel.h>
65960 +#include <linux/sched.h>
65961 +#include <linux/file.h>
65962 +#include <linux/fs.h>
65963 +#include <linux/grinternal.h>
65964 +
65965 +extern int gr_acl_tpe_check(void);
65966 +
65967 +int
65968 +gr_tpe_allow(const struct file *file)
65969 +{
65970 +#ifdef CONFIG_GRKERNSEC
65971 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
65972 + const struct cred *cred = current_cred();
65973 + char *msg = NULL;
65974 + char *msg2 = NULL;
65975 +
65976 + // never restrict root
65977 + if (!cred->uid)
65978 + return 1;
65979 +
65980 + if (grsec_enable_tpe) {
65981 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
65982 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
65983 + msg = "not being in trusted group";
65984 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
65985 + msg = "being in untrusted group";
65986 +#else
65987 + if (in_group_p(grsec_tpe_gid))
65988 + msg = "being in untrusted group";
65989 +#endif
65990 + }
65991 + if (!msg && gr_acl_tpe_check())
65992 + msg = "being in untrusted role";
65993 +
65994 + // not in any affected group/role
65995 + if (!msg)
65996 + goto next_check;
65997 +
65998 + if (inode->i_uid)
65999 + msg2 = "file in non-root-owned directory";
66000 + else if (inode->i_mode & S_IWOTH)
66001 + msg2 = "file in world-writable directory";
66002 + else if (inode->i_mode & S_IWGRP)
66003 + msg2 = "file in group-writable directory";
66004 +
66005 + if (msg && msg2) {
66006 + char fullmsg[70] = {0};
66007 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
66008 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
66009 + return 0;
66010 + }
66011 + msg = NULL;
66012 +next_check:
66013 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
66014 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
66015 + return 1;
66016 +
66017 + if (inode->i_uid && (inode->i_uid != cred->uid))
66018 + msg = "directory not owned by user";
66019 + else if (inode->i_mode & S_IWOTH)
66020 + msg = "file in world-writable directory";
66021 + else if (inode->i_mode & S_IWGRP)
66022 + msg = "file in group-writable directory";
66023 +
66024 + if (msg) {
66025 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
66026 + return 0;
66027 + }
66028 +#endif
66029 +#endif
66030 + return 1;
66031 +}
66032 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
66033 new file mode 100644
66034 index 0000000..9f7b1ac
66035 --- /dev/null
66036 +++ b/grsecurity/grsum.c
66037 @@ -0,0 +1,61 @@
66038 +#include <linux/err.h>
66039 +#include <linux/kernel.h>
66040 +#include <linux/sched.h>
66041 +#include <linux/mm.h>
66042 +#include <linux/scatterlist.h>
66043 +#include <linux/crypto.h>
66044 +#include <linux/gracl.h>
66045 +
66046 +
66047 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
66048 +#error "crypto and sha256 must be built into the kernel"
66049 +#endif
66050 +
66051 +int
66052 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
66053 +{
66054 + char *p;
66055 + struct crypto_hash *tfm;
66056 + struct hash_desc desc;
66057 + struct scatterlist sg;
66058 + unsigned char temp_sum[GR_SHA_LEN];
66059 + volatile int retval = 0;
66060 + volatile int dummy = 0;
66061 + unsigned int i;
66062 +
66063 + sg_init_table(&sg, 1);
66064 +
66065 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
66066 + if (IS_ERR(tfm)) {
66067 + /* should never happen, since sha256 should be built in */
66068 + return 1;
66069 + }
66070 +
66071 + desc.tfm = tfm;
66072 + desc.flags = 0;
66073 +
66074 + crypto_hash_init(&desc);
66075 +
66076 + p = salt;
66077 + sg_set_buf(&sg, p, GR_SALT_LEN);
66078 + crypto_hash_update(&desc, &sg, sg.length);
66079 +
66080 + p = entry->pw;
66081 + sg_set_buf(&sg, p, strlen(p));
66082 +
66083 + crypto_hash_update(&desc, &sg, sg.length);
66084 +
66085 + crypto_hash_final(&desc, temp_sum);
66086 +
66087 + memset(entry->pw, 0, GR_PW_LEN);
66088 +
66089 + for (i = 0; i < GR_SHA_LEN; i++)
66090 + if (sum[i] != temp_sum[i])
66091 + retval = 1;
66092 + else
66093 + dummy = 1; // waste a cycle
66094 +
66095 + crypto_free_hash(tfm);
66096 +
66097 + return retval;
66098 +}
66099 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
66100 index 3cd9ccd..fe16d47 100644
66101 --- a/include/acpi/acpi_bus.h
66102 +++ b/include/acpi/acpi_bus.h
66103 @@ -107,7 +107,7 @@ struct acpi_device_ops {
66104 acpi_op_bind bind;
66105 acpi_op_unbind unbind;
66106 acpi_op_notify notify;
66107 -};
66108 +} __no_const;
66109
66110 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
66111
66112 diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
66113 index f4906f6..71feb73 100644
66114 --- a/include/acpi/acpi_drivers.h
66115 +++ b/include/acpi/acpi_drivers.h
66116 @@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acpi_handle handle, int type);
66117 Dock Station
66118 -------------------------------------------------------------------------- */
66119 struct acpi_dock_ops {
66120 - acpi_notify_handler handler;
66121 - acpi_notify_handler uevent;
66122 + const acpi_notify_handler handler;
66123 + const acpi_notify_handler uevent;
66124 };
66125
66126 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
66127 @@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle handle);
66128 extern int register_dock_notifier(struct notifier_block *nb);
66129 extern void unregister_dock_notifier(struct notifier_block *nb);
66130 extern int register_hotplug_dock_device(acpi_handle handle,
66131 - struct acpi_dock_ops *ops,
66132 + const struct acpi_dock_ops *ops,
66133 void *context);
66134 extern void unregister_hotplug_dock_device(acpi_handle handle);
66135 #else
66136 @@ -144,7 +144,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb)
66137 {
66138 }
66139 static inline int register_hotplug_dock_device(acpi_handle handle,
66140 - struct acpi_dock_ops *ops,
66141 + const struct acpi_dock_ops *ops,
66142 void *context)
66143 {
66144 return -ENODEV;
66145 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
66146 index b7babf0..a9ac9fc 100644
66147 --- a/include/asm-generic/atomic-long.h
66148 +++ b/include/asm-generic/atomic-long.h
66149 @@ -22,6 +22,12 @@
66150
66151 typedef atomic64_t atomic_long_t;
66152
66153 +#ifdef CONFIG_PAX_REFCOUNT
66154 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
66155 +#else
66156 +typedef atomic64_t atomic_long_unchecked_t;
66157 +#endif
66158 +
66159 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
66160
66161 static inline long atomic_long_read(atomic_long_t *l)
66162 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
66163 return (long)atomic64_read(v);
66164 }
66165
66166 +#ifdef CONFIG_PAX_REFCOUNT
66167 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
66168 +{
66169 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66170 +
66171 + return (long)atomic64_read_unchecked(v);
66172 +}
66173 +#endif
66174 +
66175 static inline void atomic_long_set(atomic_long_t *l, long i)
66176 {
66177 atomic64_t *v = (atomic64_t *)l;
66178 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
66179 atomic64_set(v, i);
66180 }
66181
66182 +#ifdef CONFIG_PAX_REFCOUNT
66183 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
66184 +{
66185 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66186 +
66187 + atomic64_set_unchecked(v, i);
66188 +}
66189 +#endif
66190 +
66191 static inline void atomic_long_inc(atomic_long_t *l)
66192 {
66193 atomic64_t *v = (atomic64_t *)l;
66194 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
66195 atomic64_inc(v);
66196 }
66197
66198 +#ifdef CONFIG_PAX_REFCOUNT
66199 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
66200 +{
66201 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66202 +
66203 + atomic64_inc_unchecked(v);
66204 +}
66205 +#endif
66206 +
66207 static inline void atomic_long_dec(atomic_long_t *l)
66208 {
66209 atomic64_t *v = (atomic64_t *)l;
66210 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
66211 atomic64_dec(v);
66212 }
66213
66214 +#ifdef CONFIG_PAX_REFCOUNT
66215 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
66216 +{
66217 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66218 +
66219 + atomic64_dec_unchecked(v);
66220 +}
66221 +#endif
66222 +
66223 static inline void atomic_long_add(long i, atomic_long_t *l)
66224 {
66225 atomic64_t *v = (atomic64_t *)l;
66226 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
66227 atomic64_add(i, v);
66228 }
66229
66230 +#ifdef CONFIG_PAX_REFCOUNT
66231 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
66232 +{
66233 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66234 +
66235 + atomic64_add_unchecked(i, v);
66236 +}
66237 +#endif
66238 +
66239 static inline void atomic_long_sub(long i, atomic_long_t *l)
66240 {
66241 atomic64_t *v = (atomic64_t *)l;
66242 @@ -115,6 +166,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
66243 return (long)atomic64_inc_return(v);
66244 }
66245
66246 +#ifdef CONFIG_PAX_REFCOUNT
66247 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
66248 +{
66249 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66250 +
66251 + return (long)atomic64_inc_return_unchecked(v);
66252 +}
66253 +#endif
66254 +
66255 static inline long atomic_long_dec_return(atomic_long_t *l)
66256 {
66257 atomic64_t *v = (atomic64_t *)l;
66258 @@ -140,6 +200,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
66259
66260 typedef atomic_t atomic_long_t;
66261
66262 +#ifdef CONFIG_PAX_REFCOUNT
66263 +typedef atomic_unchecked_t atomic_long_unchecked_t;
66264 +#else
66265 +typedef atomic_t atomic_long_unchecked_t;
66266 +#endif
66267 +
66268 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
66269 static inline long atomic_long_read(atomic_long_t *l)
66270 {
66271 @@ -148,6 +214,15 @@ static inline long atomic_long_read(atomic_long_t *l)
66272 return (long)atomic_read(v);
66273 }
66274
66275 +#ifdef CONFIG_PAX_REFCOUNT
66276 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
66277 +{
66278 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66279 +
66280 + return (long)atomic_read_unchecked(v);
66281 +}
66282 +#endif
66283 +
66284 static inline void atomic_long_set(atomic_long_t *l, long i)
66285 {
66286 atomic_t *v = (atomic_t *)l;
66287 @@ -155,6 +230,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
66288 atomic_set(v, i);
66289 }
66290
66291 +#ifdef CONFIG_PAX_REFCOUNT
66292 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
66293 +{
66294 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66295 +
66296 + atomic_set_unchecked(v, i);
66297 +}
66298 +#endif
66299 +
66300 static inline void atomic_long_inc(atomic_long_t *l)
66301 {
66302 atomic_t *v = (atomic_t *)l;
66303 @@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
66304 atomic_inc(v);
66305 }
66306
66307 +#ifdef CONFIG_PAX_REFCOUNT
66308 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
66309 +{
66310 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66311 +
66312 + atomic_inc_unchecked(v);
66313 +}
66314 +#endif
66315 +
66316 static inline void atomic_long_dec(atomic_long_t *l)
66317 {
66318 atomic_t *v = (atomic_t *)l;
66319 @@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
66320 atomic_dec(v);
66321 }
66322
66323 +#ifdef CONFIG_PAX_REFCOUNT
66324 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
66325 +{
66326 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66327 +
66328 + atomic_dec_unchecked(v);
66329 +}
66330 +#endif
66331 +
66332 static inline void atomic_long_add(long i, atomic_long_t *l)
66333 {
66334 atomic_t *v = (atomic_t *)l;
66335 @@ -176,6 +278,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
66336 atomic_add(i, v);
66337 }
66338
66339 +#ifdef CONFIG_PAX_REFCOUNT
66340 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
66341 +{
66342 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66343 +
66344 + atomic_add_unchecked(i, v);
66345 +}
66346 +#endif
66347 +
66348 static inline void atomic_long_sub(long i, atomic_long_t *l)
66349 {
66350 atomic_t *v = (atomic_t *)l;
66351 @@ -232,6 +343,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
66352 return (long)atomic_inc_return(v);
66353 }
66354
66355 +#ifdef CONFIG_PAX_REFCOUNT
66356 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
66357 +{
66358 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66359 +
66360 + return (long)atomic_inc_return_unchecked(v);
66361 +}
66362 +#endif
66363 +
66364 static inline long atomic_long_dec_return(atomic_long_t *l)
66365 {
66366 atomic_t *v = (atomic_t *)l;
66367 @@ -255,4 +375,47 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
66368
66369 #endif /* BITS_PER_LONG == 64 */
66370
66371 +#ifdef CONFIG_PAX_REFCOUNT
66372 +static inline void pax_refcount_needs_these_functions(void)
66373 +{
66374 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
66375 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
66376 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
66377 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
66378 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
66379 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
66380 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
66381 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
66382 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
66383 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
66384 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
66385 +
66386 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
66387 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
66388 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
66389 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
66390 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
66391 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
66392 +}
66393 +#else
66394 +#define atomic_read_unchecked(v) atomic_read(v)
66395 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
66396 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
66397 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
66398 +#define atomic_inc_unchecked(v) atomic_inc(v)
66399 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
66400 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
66401 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
66402 +#define atomic_dec_unchecked(v) atomic_dec(v)
66403 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
66404 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
66405 +
66406 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
66407 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
66408 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
66409 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
66410 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
66411 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
66412 +#endif
66413 +
66414 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
66415 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
66416 index b18ce4f..2ee2843 100644
66417 --- a/include/asm-generic/atomic64.h
66418 +++ b/include/asm-generic/atomic64.h
66419 @@ -16,6 +16,8 @@ typedef struct {
66420 long long counter;
66421 } atomic64_t;
66422
66423 +typedef atomic64_t atomic64_unchecked_t;
66424 +
66425 #define ATOMIC64_INIT(i) { (i) }
66426
66427 extern long long atomic64_read(const atomic64_t *v);
66428 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
66429 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
66430 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
66431
66432 +#define atomic64_read_unchecked(v) atomic64_read(v)
66433 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
66434 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
66435 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
66436 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
66437 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
66438 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
66439 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
66440 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
66441 +
66442 #endif /* _ASM_GENERIC_ATOMIC64_H */
66443 diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
66444 index d48ddf0..656a0ac 100644
66445 --- a/include/asm-generic/bug.h
66446 +++ b/include/asm-generic/bug.h
66447 @@ -105,11 +105,11 @@ extern void warn_slowpath_null(const char *file, const int line);
66448
66449 #else /* !CONFIG_BUG */
66450 #ifndef HAVE_ARCH_BUG
66451 -#define BUG() do {} while(0)
66452 +#define BUG() do { for (;;) ; } while(0)
66453 #endif
66454
66455 #ifndef HAVE_ARCH_BUG_ON
66456 -#define BUG_ON(condition) do { if (condition) ; } while(0)
66457 +#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0)
66458 #endif
66459
66460 #ifndef HAVE_ARCH_WARN_ON
66461 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
66462 index 1bfcfe5..e04c5c9 100644
66463 --- a/include/asm-generic/cache.h
66464 +++ b/include/asm-generic/cache.h
66465 @@ -6,7 +6,7 @@
66466 * cache lines need to provide their own cache.h.
66467 */
66468
66469 -#define L1_CACHE_SHIFT 5
66470 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
66471 +#define L1_CACHE_SHIFT 5UL
66472 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
66473
66474 #endif /* __ASM_GENERIC_CACHE_H */
66475 diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
66476 index 6920695..41038bc 100644
66477 --- a/include/asm-generic/dma-mapping-common.h
66478 +++ b/include/asm-generic/dma-mapping-common.h
66479 @@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
66480 enum dma_data_direction dir,
66481 struct dma_attrs *attrs)
66482 {
66483 - struct dma_map_ops *ops = get_dma_ops(dev);
66484 + const struct dma_map_ops *ops = get_dma_ops(dev);
66485 dma_addr_t addr;
66486
66487 kmemcheck_mark_initialized(ptr, size);
66488 @@ -30,7 +30,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
66489 enum dma_data_direction dir,
66490 struct dma_attrs *attrs)
66491 {
66492 - struct dma_map_ops *ops = get_dma_ops(dev);
66493 + const struct dma_map_ops *ops = get_dma_ops(dev);
66494
66495 BUG_ON(!valid_dma_direction(dir));
66496 if (ops->unmap_page)
66497 @@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
66498 int nents, enum dma_data_direction dir,
66499 struct dma_attrs *attrs)
66500 {
66501 - struct dma_map_ops *ops = get_dma_ops(dev);
66502 + const struct dma_map_ops *ops = get_dma_ops(dev);
66503 int i, ents;
66504 struct scatterlist *s;
66505
66506 @@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
66507 int nents, enum dma_data_direction dir,
66508 struct dma_attrs *attrs)
66509 {
66510 - struct dma_map_ops *ops = get_dma_ops(dev);
66511 + const struct dma_map_ops *ops = get_dma_ops(dev);
66512
66513 BUG_ON(!valid_dma_direction(dir));
66514 debug_dma_unmap_sg(dev, sg, nents, dir);
66515 @@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
66516 size_t offset, size_t size,
66517 enum dma_data_direction dir)
66518 {
66519 - struct dma_map_ops *ops = get_dma_ops(dev);
66520 + const struct dma_map_ops *ops = get_dma_ops(dev);
66521 dma_addr_t addr;
66522
66523 kmemcheck_mark_initialized(page_address(page) + offset, size);
66524 @@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
66525 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
66526 size_t size, enum dma_data_direction dir)
66527 {
66528 - struct dma_map_ops *ops = get_dma_ops(dev);
66529 + const struct dma_map_ops *ops = get_dma_ops(dev);
66530
66531 BUG_ON(!valid_dma_direction(dir));
66532 if (ops->unmap_page)
66533 @@ -97,7 +97,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
66534 size_t size,
66535 enum dma_data_direction dir)
66536 {
66537 - struct dma_map_ops *ops = get_dma_ops(dev);
66538 + const struct dma_map_ops *ops = get_dma_ops(dev);
66539
66540 BUG_ON(!valid_dma_direction(dir));
66541 if (ops->sync_single_for_cpu)
66542 @@ -109,7 +109,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
66543 dma_addr_t addr, size_t size,
66544 enum dma_data_direction dir)
66545 {
66546 - struct dma_map_ops *ops = get_dma_ops(dev);
66547 + const struct dma_map_ops *ops = get_dma_ops(dev);
66548
66549 BUG_ON(!valid_dma_direction(dir));
66550 if (ops->sync_single_for_device)
66551 @@ -123,7 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
66552 size_t size,
66553 enum dma_data_direction dir)
66554 {
66555 - struct dma_map_ops *ops = get_dma_ops(dev);
66556 + const struct dma_map_ops *ops = get_dma_ops(dev);
66557
66558 BUG_ON(!valid_dma_direction(dir));
66559 if (ops->sync_single_range_for_cpu) {
66560 @@ -140,7 +140,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
66561 size_t size,
66562 enum dma_data_direction dir)
66563 {
66564 - struct dma_map_ops *ops = get_dma_ops(dev);
66565 + const struct dma_map_ops *ops = get_dma_ops(dev);
66566
66567 BUG_ON(!valid_dma_direction(dir));
66568 if (ops->sync_single_range_for_device) {
66569 @@ -155,7 +155,7 @@ static inline void
66570 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
66571 int nelems, enum dma_data_direction dir)
66572 {
66573 - struct dma_map_ops *ops = get_dma_ops(dev);
66574 + const struct dma_map_ops *ops = get_dma_ops(dev);
66575
66576 BUG_ON(!valid_dma_direction(dir));
66577 if (ops->sync_sg_for_cpu)
66578 @@ -167,7 +167,7 @@ static inline void
66579 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
66580 int nelems, enum dma_data_direction dir)
66581 {
66582 - struct dma_map_ops *ops = get_dma_ops(dev);
66583 + const struct dma_map_ops *ops = get_dma_ops(dev);
66584
66585 BUG_ON(!valid_dma_direction(dir));
66586 if (ops->sync_sg_for_device)
66587 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
66588 index 0d68a1e..b74a761 100644
66589 --- a/include/asm-generic/emergency-restart.h
66590 +++ b/include/asm-generic/emergency-restart.h
66591 @@ -1,7 +1,7 @@
66592 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
66593 #define _ASM_GENERIC_EMERGENCY_RESTART_H
66594
66595 -static inline void machine_emergency_restart(void)
66596 +static inline __noreturn void machine_emergency_restart(void)
66597 {
66598 machine_restart(NULL);
66599 }
66600 diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
66601 index 3c2344f..4590a7d 100644
66602 --- a/include/asm-generic/futex.h
66603 +++ b/include/asm-generic/futex.h
66604 @@ -6,7 +6,7 @@
66605 #include <asm/errno.h>
66606
66607 static inline int
66608 -futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
66609 +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
66610 {
66611 int op = (encoded_op >> 28) & 7;
66612 int cmp = (encoded_op >> 24) & 15;
66613 @@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
66614 }
66615
66616 static inline int
66617 -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
66618 +futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
66619 {
66620 return -ENOSYS;
66621 }
66622 diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
66623 index 1ca3efc..e3dc852 100644
66624 --- a/include/asm-generic/int-l64.h
66625 +++ b/include/asm-generic/int-l64.h
66626 @@ -46,6 +46,8 @@ typedef unsigned int u32;
66627 typedef signed long s64;
66628 typedef unsigned long u64;
66629
66630 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
66631 +
66632 #define S8_C(x) x
66633 #define U8_C(x) x ## U
66634 #define S16_C(x) x
66635 diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
66636 index f394147..b6152b9 100644
66637 --- a/include/asm-generic/int-ll64.h
66638 +++ b/include/asm-generic/int-ll64.h
66639 @@ -51,6 +51,8 @@ typedef unsigned int u32;
66640 typedef signed long long s64;
66641 typedef unsigned long long u64;
66642
66643 +typedef unsigned long long intoverflow_t;
66644 +
66645 #define S8_C(x) x
66646 #define U8_C(x) x ## U
66647 #define S16_C(x) x
66648 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
66649 index e5f234a..cdb16b3 100644
66650 --- a/include/asm-generic/kmap_types.h
66651 +++ b/include/asm-generic/kmap_types.h
66652 @@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
66653 KMAP_D(16) KM_IRQ_PTE,
66654 KMAP_D(17) KM_NMI,
66655 KMAP_D(18) KM_NMI_PTE,
66656 -KMAP_D(19) KM_TYPE_NR
66657 +KMAP_D(19) KM_CLEARPAGE,
66658 +KMAP_D(20) KM_TYPE_NR
66659 };
66660
66661 #undef KMAP_D
66662 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
66663 index 725612b..9cc513a 100644
66664 --- a/include/asm-generic/pgtable-nopmd.h
66665 +++ b/include/asm-generic/pgtable-nopmd.h
66666 @@ -1,14 +1,19 @@
66667 #ifndef _PGTABLE_NOPMD_H
66668 #define _PGTABLE_NOPMD_H
66669
66670 -#ifndef __ASSEMBLY__
66671 -
66672 #include <asm-generic/pgtable-nopud.h>
66673
66674 -struct mm_struct;
66675 -
66676 #define __PAGETABLE_PMD_FOLDED
66677
66678 +#define PMD_SHIFT PUD_SHIFT
66679 +#define PTRS_PER_PMD 1
66680 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
66681 +#define PMD_MASK (~(PMD_SIZE-1))
66682 +
66683 +#ifndef __ASSEMBLY__
66684 +
66685 +struct mm_struct;
66686 +
66687 /*
66688 * Having the pmd type consist of a pud gets the size right, and allows
66689 * us to conceptually access the pud entry that this pmd is folded into
66690 @@ -16,11 +21,6 @@ struct mm_struct;
66691 */
66692 typedef struct { pud_t pud; } pmd_t;
66693
66694 -#define PMD_SHIFT PUD_SHIFT
66695 -#define PTRS_PER_PMD 1
66696 -#define PMD_SIZE (1UL << PMD_SHIFT)
66697 -#define PMD_MASK (~(PMD_SIZE-1))
66698 -
66699 /*
66700 * The "pud_xxx()" functions here are trivial for a folded two-level
66701 * setup: the pmd is never bad, and a pmd always exists (as it's folded
66702 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
66703 index 810431d..ccc3638 100644
66704 --- a/include/asm-generic/pgtable-nopud.h
66705 +++ b/include/asm-generic/pgtable-nopud.h
66706 @@ -1,10 +1,15 @@
66707 #ifndef _PGTABLE_NOPUD_H
66708 #define _PGTABLE_NOPUD_H
66709
66710 -#ifndef __ASSEMBLY__
66711 -
66712 #define __PAGETABLE_PUD_FOLDED
66713
66714 +#define PUD_SHIFT PGDIR_SHIFT
66715 +#define PTRS_PER_PUD 1
66716 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
66717 +#define PUD_MASK (~(PUD_SIZE-1))
66718 +
66719 +#ifndef __ASSEMBLY__
66720 +
66721 /*
66722 * Having the pud type consist of a pgd gets the size right, and allows
66723 * us to conceptually access the pgd entry that this pud is folded into
66724 @@ -12,11 +17,6 @@
66725 */
66726 typedef struct { pgd_t pgd; } pud_t;
66727
66728 -#define PUD_SHIFT PGDIR_SHIFT
66729 -#define PTRS_PER_PUD 1
66730 -#define PUD_SIZE (1UL << PUD_SHIFT)
66731 -#define PUD_MASK (~(PUD_SIZE-1))
66732 -
66733 /*
66734 * The "pgd_xxx()" functions here are trivial for a folded two-level
66735 * setup: the pud is never bad, and a pud always exists (as it's folded
66736 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
66737 index e2bd73e..fea8ed3 100644
66738 --- a/include/asm-generic/pgtable.h
66739 +++ b/include/asm-generic/pgtable.h
66740 @@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
66741 unsigned long size);
66742 #endif
66743
66744 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
66745 +static inline unsigned long pax_open_kernel(void) { return 0; }
66746 +#endif
66747 +
66748 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
66749 +static inline unsigned long pax_close_kernel(void) { return 0; }
66750 +#endif
66751 +
66752 #endif /* !__ASSEMBLY__ */
66753
66754 #endif /* _ASM_GENERIC_PGTABLE_H */
66755 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
66756 index b6e818f..21aa58a 100644
66757 --- a/include/asm-generic/vmlinux.lds.h
66758 +++ b/include/asm-generic/vmlinux.lds.h
66759 @@ -199,6 +199,7 @@
66760 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
66761 VMLINUX_SYMBOL(__start_rodata) = .; \
66762 *(.rodata) *(.rodata.*) \
66763 + *(.data.read_only) \
66764 *(__vermagic) /* Kernel version magic */ \
66765 *(__markers_strings) /* Markers: strings */ \
66766 *(__tracepoints_strings)/* Tracepoints: strings */ \
66767 @@ -656,22 +657,24 @@
66768 * section in the linker script will go there too. @phdr should have
66769 * a leading colon.
66770 *
66771 - * Note that this macros defines __per_cpu_load as an absolute symbol.
66772 + * Note that this macros defines per_cpu_load as an absolute symbol.
66773 * If there is no need to put the percpu section at a predetermined
66774 * address, use PERCPU().
66775 */
66776 #define PERCPU_VADDR(vaddr, phdr) \
66777 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
66778 - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
66779 + per_cpu_load = .; \
66780 + .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
66781 - LOAD_OFFSET) { \
66782 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
66783 VMLINUX_SYMBOL(__per_cpu_start) = .; \
66784 *(.data.percpu.first) \
66785 - *(.data.percpu.page_aligned) \
66786 *(.data.percpu) \
66787 + . = ALIGN(PAGE_SIZE); \
66788 + *(.data.percpu.page_aligned) \
66789 *(.data.percpu.shared_aligned) \
66790 VMLINUX_SYMBOL(__per_cpu_end) = .; \
66791 } phdr \
66792 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
66793 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
66794
66795 /**
66796 * PERCPU - define output section for percpu area, simple version
66797 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
66798 index ebab6a6..351dba1 100644
66799 --- a/include/drm/drmP.h
66800 +++ b/include/drm/drmP.h
66801 @@ -71,6 +71,7 @@
66802 #include <linux/workqueue.h>
66803 #include <linux/poll.h>
66804 #include <asm/pgalloc.h>
66805 +#include <asm/local.h>
66806 #include "drm.h"
66807
66808 #include <linux/idr.h>
66809 @@ -814,7 +815,7 @@ struct drm_driver {
66810 void (*vgaarb_irq)(struct drm_device *dev, bool state);
66811
66812 /* Driver private ops for this object */
66813 - struct vm_operations_struct *gem_vm_ops;
66814 + const struct vm_operations_struct *gem_vm_ops;
66815
66816 int major;
66817 int minor;
66818 @@ -917,7 +918,7 @@ struct drm_device {
66819
66820 /** \name Usage Counters */
66821 /*@{ */
66822 - int open_count; /**< Outstanding files open */
66823 + local_t open_count; /**< Outstanding files open */
66824 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
66825 atomic_t vma_count; /**< Outstanding vma areas open */
66826 int buf_use; /**< Buffers in use -- cannot alloc */
66827 @@ -928,7 +929,7 @@ struct drm_device {
66828 /*@{ */
66829 unsigned long counters;
66830 enum drm_stat_type types[15];
66831 - atomic_t counts[15];
66832 + atomic_unchecked_t counts[15];
66833 /*@} */
66834
66835 struct list_head filelist;
66836 @@ -1016,7 +1017,7 @@ struct drm_device {
66837 struct pci_controller *hose;
66838 #endif
66839 struct drm_sg_mem *sg; /**< Scatter gather memory */
66840 - unsigned int num_crtcs; /**< Number of CRTCs on this device */
66841 + unsigned int num_crtcs; /**< Number of CRTCs on this device */
66842 void *dev_private; /**< device private data */
66843 void *mm_private;
66844 struct address_space *dev_mapping;
66845 @@ -1042,11 +1043,11 @@ struct drm_device {
66846 spinlock_t object_name_lock;
66847 struct idr object_name_idr;
66848 atomic_t object_count;
66849 - atomic_t object_memory;
66850 + atomic_unchecked_t object_memory;
66851 atomic_t pin_count;
66852 - atomic_t pin_memory;
66853 + atomic_unchecked_t pin_memory;
66854 atomic_t gtt_count;
66855 - atomic_t gtt_memory;
66856 + atomic_unchecked_t gtt_memory;
66857 uint32_t gtt_total;
66858 uint32_t invalidate_domains; /* domains pending invalidation */
66859 uint32_t flush_domains; /* domains pending flush */
66860 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
66861 index b29e201..3413cc9 100644
66862 --- a/include/drm/drm_crtc_helper.h
66863 +++ b/include/drm/drm_crtc_helper.h
66864 @@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
66865
66866 /* reload the current crtc LUT */
66867 void (*load_lut)(struct drm_crtc *crtc);
66868 -};
66869 +} __no_const;
66870
66871 struct drm_encoder_helper_funcs {
66872 void (*dpms)(struct drm_encoder *encoder, int mode);
66873 @@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
66874 struct drm_connector *connector);
66875 /* disable encoder when not in use - more explicit than dpms off */
66876 void (*disable)(struct drm_encoder *encoder);
66877 -};
66878 +} __no_const;
66879
66880 struct drm_connector_helper_funcs {
66881 int (*get_modes)(struct drm_connector *connector);
66882 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
66883 index b199170..6f9e64c 100644
66884 --- a/include/drm/ttm/ttm_memory.h
66885 +++ b/include/drm/ttm/ttm_memory.h
66886 @@ -47,7 +47,7 @@
66887
66888 struct ttm_mem_shrink {
66889 int (*do_shrink) (struct ttm_mem_shrink *);
66890 -};
66891 +} __no_const;
66892
66893 /**
66894 * struct ttm_mem_global - Global memory accounting structure.
66895 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
66896 index e86dfca..40cc55f 100644
66897 --- a/include/linux/a.out.h
66898 +++ b/include/linux/a.out.h
66899 @@ -39,6 +39,14 @@ enum machine_type {
66900 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
66901 };
66902
66903 +/* Constants for the N_FLAGS field */
66904 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
66905 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
66906 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
66907 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
66908 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
66909 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
66910 +
66911 #if !defined (N_MAGIC)
66912 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
66913 #endif
66914 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
66915 index 817b237..62c10bc 100644
66916 --- a/include/linux/atmdev.h
66917 +++ b/include/linux/atmdev.h
66918 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
66919 #endif
66920
66921 struct k_atm_aal_stats {
66922 -#define __HANDLE_ITEM(i) atomic_t i
66923 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
66924 __AAL_STAT_ITEMS
66925 #undef __HANDLE_ITEM
66926 };
66927 diff --git a/include/linux/backlight.h b/include/linux/backlight.h
66928 index 0f5f578..8c4f884 100644
66929 --- a/include/linux/backlight.h
66930 +++ b/include/linux/backlight.h
66931 @@ -36,18 +36,18 @@ struct backlight_device;
66932 struct fb_info;
66933
66934 struct backlight_ops {
66935 - unsigned int options;
66936 + const unsigned int options;
66937
66938 #define BL_CORE_SUSPENDRESUME (1 << 0)
66939
66940 /* Notify the backlight driver some property has changed */
66941 - int (*update_status)(struct backlight_device *);
66942 + int (* const update_status)(struct backlight_device *);
66943 /* Return the current backlight brightness (accounting for power,
66944 fb_blank etc.) */
66945 - int (*get_brightness)(struct backlight_device *);
66946 + int (* const get_brightness)(struct backlight_device *);
66947 /* Check if given framebuffer device is the one bound to this backlight;
66948 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
66949 - int (*check_fb)(struct fb_info *);
66950 + int (* const check_fb)(struct fb_info *);
66951 };
66952
66953 /* This structure defines all the properties of a backlight */
66954 @@ -86,7 +86,7 @@ struct backlight_device {
66955 registered this device has been unloaded, and if class_get_devdata()
66956 points to something in the body of that driver, it is also invalid. */
66957 struct mutex ops_lock;
66958 - struct backlight_ops *ops;
66959 + const struct backlight_ops *ops;
66960
66961 /* The framebuffer notifier block */
66962 struct notifier_block fb_notif;
66963 @@ -103,7 +103,7 @@ static inline void backlight_update_status(struct backlight_device *bd)
66964 }
66965
66966 extern struct backlight_device *backlight_device_register(const char *name,
66967 - struct device *dev, void *devdata, struct backlight_ops *ops);
66968 + struct device *dev, void *devdata, const struct backlight_ops *ops);
66969 extern void backlight_device_unregister(struct backlight_device *bd);
66970 extern void backlight_force_update(struct backlight_device *bd,
66971 enum backlight_update_reason reason);
66972 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
66973 index a3d802e..93a2ef4 100644
66974 --- a/include/linux/binfmts.h
66975 +++ b/include/linux/binfmts.h
66976 @@ -18,7 +18,7 @@ struct pt_regs;
66977 #define BINPRM_BUF_SIZE 128
66978
66979 #ifdef __KERNEL__
66980 -#include <linux/list.h>
66981 +#include <linux/sched.h>
66982
66983 #define CORENAME_MAX_SIZE 128
66984
66985 @@ -58,6 +58,7 @@ struct linux_binprm{
66986 unsigned interp_flags;
66987 unsigned interp_data;
66988 unsigned long loader, exec;
66989 + char tcomm[TASK_COMM_LEN];
66990 };
66991
66992 extern void acct_arg_size(struct linux_binprm *bprm, unsigned long pages);
66993 @@ -83,6 +84,7 @@ struct linux_binfmt {
66994 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
66995 int (*load_shlib)(struct file *);
66996 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
66997 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
66998 unsigned long min_coredump; /* minimal dump size */
66999 int hasvdso;
67000 };
67001 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
67002 index 5eb6cb0..a2906d2 100644
67003 --- a/include/linux/blkdev.h
67004 +++ b/include/linux/blkdev.h
67005 @@ -1281,7 +1281,7 @@ struct block_device_operations {
67006 int (*revalidate_disk) (struct gendisk *);
67007 int (*getgeo)(struct block_device *, struct hd_geometry *);
67008 struct module *owner;
67009 -};
67010 +} __do_const;
67011
67012 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
67013 unsigned long);
67014 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
67015 index 3b73b99..629d21b 100644
67016 --- a/include/linux/blktrace_api.h
67017 +++ b/include/linux/blktrace_api.h
67018 @@ -160,7 +160,7 @@ struct blk_trace {
67019 struct dentry *dir;
67020 struct dentry *dropped_file;
67021 struct dentry *msg_file;
67022 - atomic_t dropped;
67023 + atomic_unchecked_t dropped;
67024 };
67025
67026 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
67027 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
67028 index 83195fb..0b0f77d 100644
67029 --- a/include/linux/byteorder/little_endian.h
67030 +++ b/include/linux/byteorder/little_endian.h
67031 @@ -42,51 +42,51 @@
67032
67033 static inline __le64 __cpu_to_le64p(const __u64 *p)
67034 {
67035 - return (__force __le64)*p;
67036 + return (__force const __le64)*p;
67037 }
67038 static inline __u64 __le64_to_cpup(const __le64 *p)
67039 {
67040 - return (__force __u64)*p;
67041 + return (__force const __u64)*p;
67042 }
67043 static inline __le32 __cpu_to_le32p(const __u32 *p)
67044 {
67045 - return (__force __le32)*p;
67046 + return (__force const __le32)*p;
67047 }
67048 static inline __u32 __le32_to_cpup(const __le32 *p)
67049 {
67050 - return (__force __u32)*p;
67051 + return (__force const __u32)*p;
67052 }
67053 static inline __le16 __cpu_to_le16p(const __u16 *p)
67054 {
67055 - return (__force __le16)*p;
67056 + return (__force const __le16)*p;
67057 }
67058 static inline __u16 __le16_to_cpup(const __le16 *p)
67059 {
67060 - return (__force __u16)*p;
67061 + return (__force const __u16)*p;
67062 }
67063 static inline __be64 __cpu_to_be64p(const __u64 *p)
67064 {
67065 - return (__force __be64)__swab64p(p);
67066 + return (__force const __be64)__swab64p(p);
67067 }
67068 static inline __u64 __be64_to_cpup(const __be64 *p)
67069 {
67070 - return __swab64p((__u64 *)p);
67071 + return __swab64p((const __u64 *)p);
67072 }
67073 static inline __be32 __cpu_to_be32p(const __u32 *p)
67074 {
67075 - return (__force __be32)__swab32p(p);
67076 + return (__force const __be32)__swab32p(p);
67077 }
67078 static inline __u32 __be32_to_cpup(const __be32 *p)
67079 {
67080 - return __swab32p((__u32 *)p);
67081 + return __swab32p((const __u32 *)p);
67082 }
67083 static inline __be16 __cpu_to_be16p(const __u16 *p)
67084 {
67085 - return (__force __be16)__swab16p(p);
67086 + return (__force const __be16)__swab16p(p);
67087 }
67088 static inline __u16 __be16_to_cpup(const __be16 *p)
67089 {
67090 - return __swab16p((__u16 *)p);
67091 + return __swab16p((const __u16 *)p);
67092 }
67093 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
67094 #define __le64_to_cpus(x) do { (void)(x); } while (0)
67095 diff --git a/include/linux/cache.h b/include/linux/cache.h
67096 index 97e2488..e7576b9 100644
67097 --- a/include/linux/cache.h
67098 +++ b/include/linux/cache.h
67099 @@ -16,6 +16,10 @@
67100 #define __read_mostly
67101 #endif
67102
67103 +#ifndef __read_only
67104 +#define __read_only __read_mostly
67105 +#endif
67106 +
67107 #ifndef ____cacheline_aligned
67108 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
67109 #endif
67110 diff --git a/include/linux/capability.h b/include/linux/capability.h
67111 index c8f2a5f7..1618a5c 100644
67112 --- a/include/linux/capability.h
67113 +++ b/include/linux/capability.h
67114 @@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff_set;
67115 (security_real_capable_noaudit((t), (cap)) == 0)
67116
67117 extern int capable(int cap);
67118 +int capable_nolog(int cap);
67119
67120 /* audit system wants to get cap info from files as well */
67121 struct dentry;
67122 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
67123 index 450fa59..86019fb 100644
67124 --- a/include/linux/compiler-gcc4.h
67125 +++ b/include/linux/compiler-gcc4.h
67126 @@ -36,4 +36,16 @@
67127 the kernel context */
67128 #define __cold __attribute__((__cold__))
67129
67130 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
67131 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
67132 +#define __bos0(ptr) __bos((ptr), 0)
67133 +#define __bos1(ptr) __bos((ptr), 1)
67134 +
67135 +#if __GNUC_MINOR__ >= 5
67136 +#ifdef CONSTIFY_PLUGIN
67137 +#define __no_const __attribute__((no_const))
67138 +#define __do_const __attribute__((do_const))
67139 +#endif
67140 +#endif
67141 +
67142 #endif
67143 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
67144 index 04fb513..fd6477b 100644
67145 --- a/include/linux/compiler.h
67146 +++ b/include/linux/compiler.h
67147 @@ -5,11 +5,14 @@
67148
67149 #ifdef __CHECKER__
67150 # define __user __attribute__((noderef, address_space(1)))
67151 +# define __force_user __force __user
67152 # define __kernel /* default address space */
67153 +# define __force_kernel __force __kernel
67154 # define __safe __attribute__((safe))
67155 # define __force __attribute__((force))
67156 # define __nocast __attribute__((nocast))
67157 # define __iomem __attribute__((noderef, address_space(2)))
67158 +# define __force_iomem __force __iomem
67159 # define __acquires(x) __attribute__((context(x,0,1)))
67160 # define __releases(x) __attribute__((context(x,1,0)))
67161 # define __acquire(x) __context__(x,1)
67162 @@ -17,13 +20,34 @@
67163 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
67164 extern void __chk_user_ptr(const volatile void __user *);
67165 extern void __chk_io_ptr(const volatile void __iomem *);
67166 +#elif defined(CHECKER_PLUGIN)
67167 +//# define __user
67168 +//# define __force_user
67169 +//# define __kernel
67170 +//# define __force_kernel
67171 +# define __safe
67172 +# define __force
67173 +# define __nocast
67174 +# define __iomem
67175 +# define __force_iomem
67176 +# define __chk_user_ptr(x) (void)0
67177 +# define __chk_io_ptr(x) (void)0
67178 +# define __builtin_warning(x, y...) (1)
67179 +# define __acquires(x)
67180 +# define __releases(x)
67181 +# define __acquire(x) (void)0
67182 +# define __release(x) (void)0
67183 +# define __cond_lock(x,c) (c)
67184 #else
67185 # define __user
67186 +# define __force_user
67187 # define __kernel
67188 +# define __force_kernel
67189 # define __safe
67190 # define __force
67191 # define __nocast
67192 # define __iomem
67193 +# define __force_iomem
67194 # define __chk_user_ptr(x) (void)0
67195 # define __chk_io_ptr(x) (void)0
67196 # define __builtin_warning(x, y...) (1)
67197 @@ -247,6 +271,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
67198 # define __attribute_const__ /* unimplemented */
67199 #endif
67200
67201 +#ifndef __no_const
67202 +# define __no_const
67203 +#endif
67204 +
67205 +#ifndef __do_const
67206 +# define __do_const
67207 +#endif
67208 +
67209 /*
67210 * Tell gcc if a function is cold. The compiler will assume any path
67211 * directly leading to the call is unlikely.
67212 @@ -256,6 +288,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
67213 #define __cold
67214 #endif
67215
67216 +#ifndef __alloc_size
67217 +#define __alloc_size(...)
67218 +#endif
67219 +
67220 +#ifndef __bos
67221 +#define __bos(ptr, arg)
67222 +#endif
67223 +
67224 +#ifndef __bos0
67225 +#define __bos0(ptr)
67226 +#endif
67227 +
67228 +#ifndef __bos1
67229 +#define __bos1(ptr)
67230 +#endif
67231 +
67232 /* Simple shorthand for a section definition */
67233 #ifndef __section
67234 # define __section(S) __attribute__ ((__section__(#S)))
67235 @@ -278,6 +326,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
67236 * use is to mediate communication between process-level code and irq/NMI
67237 * handlers, all running on the same CPU.
67238 */
67239 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
67240 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
67241 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
67242
67243 #endif /* __LINUX_COMPILER_H */
67244 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
67245 index fd92988..a3164bd 100644
67246 --- a/include/linux/crypto.h
67247 +++ b/include/linux/crypto.h
67248 @@ -394,7 +394,7 @@ struct cipher_tfm {
67249 const u8 *key, unsigned int keylen);
67250 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
67251 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
67252 -};
67253 +} __no_const;
67254
67255 struct hash_tfm {
67256 int (*init)(struct hash_desc *desc);
67257 @@ -415,13 +415,13 @@ struct compress_tfm {
67258 int (*cot_decompress)(struct crypto_tfm *tfm,
67259 const u8 *src, unsigned int slen,
67260 u8 *dst, unsigned int *dlen);
67261 -};
67262 +} __no_const;
67263
67264 struct rng_tfm {
67265 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
67266 unsigned int dlen);
67267 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
67268 -};
67269 +} __no_const;
67270
67271 #define crt_ablkcipher crt_u.ablkcipher
67272 #define crt_aead crt_u.aead
67273 diff --git a/include/linux/dcache.h b/include/linux/dcache.h
67274 index 30b93b2..cd7a8db 100644
67275 --- a/include/linux/dcache.h
67276 +++ b/include/linux/dcache.h
67277 @@ -119,6 +119,8 @@ struct dentry {
67278 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
67279 };
67280
67281 +#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
67282 +
67283 /*
67284 * dentry->d_lock spinlock nesting subclasses:
67285 *
67286 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
67287 index 3e9bd6a..f4e1aa0 100644
67288 --- a/include/linux/decompress/mm.h
67289 +++ b/include/linux/decompress/mm.h
67290 @@ -78,7 +78,7 @@ static void free(void *where)
67291 * warnings when not needed (indeed large_malloc / large_free are not
67292 * needed by inflate */
67293
67294 -#define malloc(a) kmalloc(a, GFP_KERNEL)
67295 +#define malloc(a) kmalloc((a), GFP_KERNEL)
67296 #define free(a) kfree(a)
67297
67298 #define large_malloc(a) vmalloc(a)
67299 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
67300 index 91b7618..92a93d32 100644
67301 --- a/include/linux/dma-mapping.h
67302 +++ b/include/linux/dma-mapping.h
67303 @@ -16,51 +16,51 @@ enum dma_data_direction {
67304 };
67305
67306 struct dma_map_ops {
67307 - void* (*alloc_coherent)(struct device *dev, size_t size,
67308 + void* (* const alloc_coherent)(struct device *dev, size_t size,
67309 dma_addr_t *dma_handle, gfp_t gfp);
67310 - void (*free_coherent)(struct device *dev, size_t size,
67311 + void (* const free_coherent)(struct device *dev, size_t size,
67312 void *vaddr, dma_addr_t dma_handle);
67313 - dma_addr_t (*map_page)(struct device *dev, struct page *page,
67314 + dma_addr_t (* const map_page)(struct device *dev, struct page *page,
67315 unsigned long offset, size_t size,
67316 enum dma_data_direction dir,
67317 struct dma_attrs *attrs);
67318 - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
67319 + void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
67320 size_t size, enum dma_data_direction dir,
67321 struct dma_attrs *attrs);
67322 - int (*map_sg)(struct device *dev, struct scatterlist *sg,
67323 + int (* const map_sg)(struct device *dev, struct scatterlist *sg,
67324 int nents, enum dma_data_direction dir,
67325 struct dma_attrs *attrs);
67326 - void (*unmap_sg)(struct device *dev,
67327 + void (* const unmap_sg)(struct device *dev,
67328 struct scatterlist *sg, int nents,
67329 enum dma_data_direction dir,
67330 struct dma_attrs *attrs);
67331 - void (*sync_single_for_cpu)(struct device *dev,
67332 + void (* const sync_single_for_cpu)(struct device *dev,
67333 dma_addr_t dma_handle, size_t size,
67334 enum dma_data_direction dir);
67335 - void (*sync_single_for_device)(struct device *dev,
67336 + void (* const sync_single_for_device)(struct device *dev,
67337 dma_addr_t dma_handle, size_t size,
67338 enum dma_data_direction dir);
67339 - void (*sync_single_range_for_cpu)(struct device *dev,
67340 + void (* const sync_single_range_for_cpu)(struct device *dev,
67341 dma_addr_t dma_handle,
67342 unsigned long offset,
67343 size_t size,
67344 enum dma_data_direction dir);
67345 - void (*sync_single_range_for_device)(struct device *dev,
67346 + void (* const sync_single_range_for_device)(struct device *dev,
67347 dma_addr_t dma_handle,
67348 unsigned long offset,
67349 size_t size,
67350 enum dma_data_direction dir);
67351 - void (*sync_sg_for_cpu)(struct device *dev,
67352 + void (* const sync_sg_for_cpu)(struct device *dev,
67353 struct scatterlist *sg, int nents,
67354 enum dma_data_direction dir);
67355 - void (*sync_sg_for_device)(struct device *dev,
67356 + void (* const sync_sg_for_device)(struct device *dev,
67357 struct scatterlist *sg, int nents,
67358 enum dma_data_direction dir);
67359 - int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
67360 - int (*dma_supported)(struct device *dev, u64 mask);
67361 + int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
67362 + int (* const dma_supported)(struct device *dev, u64 mask);
67363 int (*set_dma_mask)(struct device *dev, u64 mask);
67364 int is_phys;
67365 -};
67366 +} __do_const;
67367
67368 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
67369
67370 diff --git a/include/linux/dst.h b/include/linux/dst.h
67371 index e26fed8..b976d9f 100644
67372 --- a/include/linux/dst.h
67373 +++ b/include/linux/dst.h
67374 @@ -380,7 +380,7 @@ struct dst_node
67375 struct thread_pool *pool;
67376
67377 /* Transaction IDs live here */
67378 - atomic_long_t gen;
67379 + atomic_long_unchecked_t gen;
67380
67381 /*
67382 * How frequently and how many times transaction
67383 diff --git a/include/linux/elf.h b/include/linux/elf.h
67384 index 90a4ed0..d652617 100644
67385 --- a/include/linux/elf.h
67386 +++ b/include/linux/elf.h
67387 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
67388 #define PT_GNU_EH_FRAME 0x6474e550
67389
67390 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
67391 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
67392 +
67393 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
67394 +
67395 +/* Constants for the e_flags field */
67396 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
67397 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
67398 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
67399 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
67400 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
67401 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
67402
67403 /* These constants define the different elf file types */
67404 #define ET_NONE 0
67405 @@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
67406 #define DT_DEBUG 21
67407 #define DT_TEXTREL 22
67408 #define DT_JMPREL 23
67409 +#define DT_FLAGS 30
67410 + #define DF_TEXTREL 0x00000004
67411 #define DT_ENCODING 32
67412 #define OLD_DT_LOOS 0x60000000
67413 #define DT_LOOS 0x6000000d
67414 @@ -230,6 +243,19 @@ typedef struct elf64_hdr {
67415 #define PF_W 0x2
67416 #define PF_X 0x1
67417
67418 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
67419 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
67420 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
67421 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
67422 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
67423 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
67424 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
67425 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
67426 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
67427 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
67428 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
67429 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
67430 +
67431 typedef struct elf32_phdr{
67432 Elf32_Word p_type;
67433 Elf32_Off p_offset;
67434 @@ -322,6 +348,8 @@ typedef struct elf64_shdr {
67435 #define EI_OSABI 7
67436 #define EI_PAD 8
67437
67438 +#define EI_PAX 14
67439 +
67440 #define ELFMAG0 0x7f /* EI_MAG */
67441 #define ELFMAG1 'E'
67442 #define ELFMAG2 'L'
67443 @@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
67444 #define elf_phdr elf32_phdr
67445 #define elf_note elf32_note
67446 #define elf_addr_t Elf32_Off
67447 +#define elf_dyn Elf32_Dyn
67448
67449 #else
67450
67451 @@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
67452 #define elf_phdr elf64_phdr
67453 #define elf_note elf64_note
67454 #define elf_addr_t Elf64_Off
67455 +#define elf_dyn Elf64_Dyn
67456
67457 #endif
67458
67459 diff --git a/include/linux/fs.h b/include/linux/fs.h
67460 index 1b9a47a..6fe2934 100644
67461 --- a/include/linux/fs.h
67462 +++ b/include/linux/fs.h
67463 @@ -568,41 +568,41 @@ typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
67464 unsigned long, unsigned long);
67465
67466 struct address_space_operations {
67467 - int (*writepage)(struct page *page, struct writeback_control *wbc);
67468 - int (*readpage)(struct file *, struct page *);
67469 - void (*sync_page)(struct page *);
67470 + int (* const writepage)(struct page *page, struct writeback_control *wbc);
67471 + int (* const readpage)(struct file *, struct page *);
67472 + void (* const sync_page)(struct page *);
67473
67474 /* Write back some dirty pages from this mapping. */
67475 - int (*writepages)(struct address_space *, struct writeback_control *);
67476 + int (* const writepages)(struct address_space *, struct writeback_control *);
67477
67478 /* Set a page dirty. Return true if this dirtied it */
67479 - int (*set_page_dirty)(struct page *page);
67480 + int (* const set_page_dirty)(struct page *page);
67481
67482 - int (*readpages)(struct file *filp, struct address_space *mapping,
67483 + int (* const readpages)(struct file *filp, struct address_space *mapping,
67484 struct list_head *pages, unsigned nr_pages);
67485
67486 - int (*write_begin)(struct file *, struct address_space *mapping,
67487 + int (* const write_begin)(struct file *, struct address_space *mapping,
67488 loff_t pos, unsigned len, unsigned flags,
67489 struct page **pagep, void **fsdata);
67490 - int (*write_end)(struct file *, struct address_space *mapping,
67491 + int (* const write_end)(struct file *, struct address_space *mapping,
67492 loff_t pos, unsigned len, unsigned copied,
67493 struct page *page, void *fsdata);
67494
67495 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
67496 - sector_t (*bmap)(struct address_space *, sector_t);
67497 - void (*invalidatepage) (struct page *, unsigned long);
67498 - int (*releasepage) (struct page *, gfp_t);
67499 - ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
67500 + sector_t (* const bmap)(struct address_space *, sector_t);
67501 + void (* const invalidatepage) (struct page *, unsigned long);
67502 + int (* const releasepage) (struct page *, gfp_t);
67503 + ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
67504 loff_t offset, unsigned long nr_segs);
67505 - int (*get_xip_mem)(struct address_space *, pgoff_t, int,
67506 + int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
67507 void **, unsigned long *);
67508 /* migrate the contents of a page to the specified target */
67509 - int (*migratepage) (struct address_space *,
67510 + int (* const migratepage) (struct address_space *,
67511 struct page *, struct page *);
67512 - int (*launder_page) (struct page *);
67513 - int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
67514 + int (* const launder_page) (struct page *);
67515 + int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
67516 unsigned long);
67517 - int (*error_remove_page)(struct address_space *, struct page *);
67518 + int (* const error_remove_page)(struct address_space *, struct page *);
67519 };
67520
67521 /*
67522 @@ -1031,19 +1031,19 @@ static inline int file_check_writeable(struct file *filp)
67523 typedef struct files_struct *fl_owner_t;
67524
67525 struct file_lock_operations {
67526 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
67527 - void (*fl_release_private)(struct file_lock *);
67528 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
67529 + void (* const fl_release_private)(struct file_lock *);
67530 };
67531
67532 struct lock_manager_operations {
67533 - int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
67534 - void (*fl_notify)(struct file_lock *); /* unblock callback */
67535 - int (*fl_grant)(struct file_lock *, struct file_lock *, int);
67536 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
67537 - void (*fl_release_private)(struct file_lock *);
67538 - void (*fl_break)(struct file_lock *);
67539 - int (*fl_mylease)(struct file_lock *, struct file_lock *);
67540 - int (*fl_change)(struct file_lock **, int);
67541 + int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
67542 + void (* const fl_notify)(struct file_lock *); /* unblock callback */
67543 + int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
67544 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
67545 + void (* const fl_release_private)(struct file_lock *);
67546 + void (* const fl_break)(struct file_lock *);
67547 + int (* const fl_mylease)(struct file_lock *, struct file_lock *);
67548 + int (* const fl_change)(struct file_lock **, int);
67549 };
67550
67551 struct lock_manager {
67552 @@ -1442,7 +1442,7 @@ struct fiemap_extent_info {
67553 unsigned int fi_flags; /* Flags as passed from user */
67554 unsigned int fi_extents_mapped; /* Number of mapped extents */
67555 unsigned int fi_extents_max; /* Size of fiemap_extent array */
67556 - struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
67557 + struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
67558 * array */
67559 };
67560 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
67561 @@ -1512,7 +1512,8 @@ struct file_operations {
67562 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
67563 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
67564 int (*setlease)(struct file *, long, struct file_lock **);
67565 -};
67566 +} __do_const;
67567 +typedef struct file_operations __no_const file_operations_no_const;
67568
67569 struct inode_operations {
67570 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
67571 @@ -1559,30 +1560,30 @@ extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
67572 unsigned long, loff_t *);
67573
67574 struct super_operations {
67575 - struct inode *(*alloc_inode)(struct super_block *sb);
67576 - void (*destroy_inode)(struct inode *);
67577 + struct inode *(* const alloc_inode)(struct super_block *sb);
67578 + void (* const destroy_inode)(struct inode *);
67579
67580 - void (*dirty_inode) (struct inode *);
67581 - int (*write_inode) (struct inode *, int);
67582 - void (*drop_inode) (struct inode *);
67583 - void (*delete_inode) (struct inode *);
67584 - void (*put_super) (struct super_block *);
67585 - void (*write_super) (struct super_block *);
67586 - int (*sync_fs)(struct super_block *sb, int wait);
67587 - int (*freeze_fs) (struct super_block *);
67588 - int (*unfreeze_fs) (struct super_block *);
67589 - int (*statfs) (struct dentry *, struct kstatfs *);
67590 - int (*remount_fs) (struct super_block *, int *, char *);
67591 - void (*clear_inode) (struct inode *);
67592 - void (*umount_begin) (struct super_block *);
67593 + void (* const dirty_inode) (struct inode *);
67594 + int (* const write_inode) (struct inode *, int);
67595 + void (* const drop_inode) (struct inode *);
67596 + void (* const delete_inode) (struct inode *);
67597 + void (* const put_super) (struct super_block *);
67598 + void (* const write_super) (struct super_block *);
67599 + int (* const sync_fs)(struct super_block *sb, int wait);
67600 + int (* const freeze_fs) (struct super_block *);
67601 + int (* const unfreeze_fs) (struct super_block *);
67602 + int (* const statfs) (struct dentry *, struct kstatfs *);
67603 + int (* const remount_fs) (struct super_block *, int *, char *);
67604 + void (* const clear_inode) (struct inode *);
67605 + void (* const umount_begin) (struct super_block *);
67606
67607 - int (*show_options)(struct seq_file *, struct vfsmount *);
67608 - int (*show_stats)(struct seq_file *, struct vfsmount *);
67609 + int (* const show_options)(struct seq_file *, struct vfsmount *);
67610 + int (* const show_stats)(struct seq_file *, struct vfsmount *);
67611 #ifdef CONFIG_QUOTA
67612 - ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
67613 - ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
67614 + ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
67615 + ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
67616 #endif
67617 - int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
67618 + int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
67619 };
67620
67621 /*
67622 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
67623 index 78a05bf..2a7d3e1 100644
67624 --- a/include/linux/fs_struct.h
67625 +++ b/include/linux/fs_struct.h
67626 @@ -4,7 +4,7 @@
67627 #include <linux/path.h>
67628
67629 struct fs_struct {
67630 - int users;
67631 + atomic_t users;
67632 rwlock_t lock;
67633 int umask;
67634 int in_exec;
67635 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
67636 index 7be0c6f..2f63a2b 100644
67637 --- a/include/linux/fscache-cache.h
67638 +++ b/include/linux/fscache-cache.h
67639 @@ -116,7 +116,7 @@ struct fscache_operation {
67640 #endif
67641 };
67642
67643 -extern atomic_t fscache_op_debug_id;
67644 +extern atomic_unchecked_t fscache_op_debug_id;
67645 extern const struct slow_work_ops fscache_op_slow_work_ops;
67646
67647 extern void fscache_enqueue_operation(struct fscache_operation *);
67648 @@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
67649 fscache_operation_release_t release)
67650 {
67651 atomic_set(&op->usage, 1);
67652 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
67653 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
67654 op->release = release;
67655 INIT_LIST_HEAD(&op->pend_link);
67656 fscache_set_op_state(op, "Init");
67657 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
67658 index 4d6f47b..00bcedb 100644
67659 --- a/include/linux/fsnotify_backend.h
67660 +++ b/include/linux/fsnotify_backend.h
67661 @@ -86,6 +86,7 @@ struct fsnotify_ops {
67662 void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
67663 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
67664 };
67665 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
67666
67667 /*
67668 * A group is a "thing" that wants to receive notification about filesystem
67669 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
67670 index 4ec5e67..42f1eb9 100644
67671 --- a/include/linux/ftrace_event.h
67672 +++ b/include/linux/ftrace_event.h
67673 @@ -163,7 +163,7 @@ extern int trace_define_field(struct ftrace_event_call *call,
67674 int filter_type);
67675 extern int trace_define_common_fields(struct ftrace_event_call *call);
67676
67677 -#define is_signed_type(type) (((type)(-1)) < 0)
67678 +#define is_signed_type(type) (((type)(-1)) < (type)1)
67679
67680 int trace_set_clr_event(const char *system, const char *event, int set);
67681
67682 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
67683 index 297df45..b6a74ff 100644
67684 --- a/include/linux/genhd.h
67685 +++ b/include/linux/genhd.h
67686 @@ -161,7 +161,7 @@ struct gendisk {
67687
67688 struct timer_rand_state *random;
67689
67690 - atomic_t sync_io; /* RAID */
67691 + atomic_unchecked_t sync_io; /* RAID */
67692 struct work_struct async_notify;
67693 #ifdef CONFIG_BLK_DEV_INTEGRITY
67694 struct blk_integrity *integrity;
67695 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
67696 new file mode 100644
67697 index 0000000..af663cf
67698 --- /dev/null
67699 +++ b/include/linux/gracl.h
67700 @@ -0,0 +1,319 @@
67701 +#ifndef GR_ACL_H
67702 +#define GR_ACL_H
67703 +
67704 +#include <linux/grdefs.h>
67705 +#include <linux/resource.h>
67706 +#include <linux/capability.h>
67707 +#include <linux/dcache.h>
67708 +#include <asm/resource.h>
67709 +
67710 +/* Major status information */
67711 +
67712 +#define GR_VERSION "grsecurity 2.9"
67713 +#define GRSECURITY_VERSION 0x2900
67714 +
67715 +enum {
67716 + GR_SHUTDOWN = 0,
67717 + GR_ENABLE = 1,
67718 + GR_SPROLE = 2,
67719 + GR_RELOAD = 3,
67720 + GR_SEGVMOD = 4,
67721 + GR_STATUS = 5,
67722 + GR_UNSPROLE = 6,
67723 + GR_PASSSET = 7,
67724 + GR_SPROLEPAM = 8,
67725 +};
67726 +
67727 +/* Password setup definitions
67728 + * kernel/grhash.c */
67729 +enum {
67730 + GR_PW_LEN = 128,
67731 + GR_SALT_LEN = 16,
67732 + GR_SHA_LEN = 32,
67733 +};
67734 +
67735 +enum {
67736 + GR_SPROLE_LEN = 64,
67737 +};
67738 +
67739 +enum {
67740 + GR_NO_GLOB = 0,
67741 + GR_REG_GLOB,
67742 + GR_CREATE_GLOB
67743 +};
67744 +
67745 +#define GR_NLIMITS 32
67746 +
67747 +/* Begin Data Structures */
67748 +
67749 +struct sprole_pw {
67750 + unsigned char *rolename;
67751 + unsigned char salt[GR_SALT_LEN];
67752 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
67753 +};
67754 +
67755 +struct name_entry {
67756 + __u32 key;
67757 + ino_t inode;
67758 + dev_t device;
67759 + char *name;
67760 + __u16 len;
67761 + __u8 deleted;
67762 + struct name_entry *prev;
67763 + struct name_entry *next;
67764 +};
67765 +
67766 +struct inodev_entry {
67767 + struct name_entry *nentry;
67768 + struct inodev_entry *prev;
67769 + struct inodev_entry *next;
67770 +};
67771 +
67772 +struct acl_role_db {
67773 + struct acl_role_label **r_hash;
67774 + __u32 r_size;
67775 +};
67776 +
67777 +struct inodev_db {
67778 + struct inodev_entry **i_hash;
67779 + __u32 i_size;
67780 +};
67781 +
67782 +struct name_db {
67783 + struct name_entry **n_hash;
67784 + __u32 n_size;
67785 +};
67786 +
67787 +struct crash_uid {
67788 + uid_t uid;
67789 + unsigned long expires;
67790 +};
67791 +
67792 +struct gr_hash_struct {
67793 + void **table;
67794 + void **nametable;
67795 + void *first;
67796 + __u32 table_size;
67797 + __u32 used_size;
67798 + int type;
67799 +};
67800 +
67801 +/* Userspace Grsecurity ACL data structures */
67802 +
67803 +struct acl_subject_label {
67804 + char *filename;
67805 + ino_t inode;
67806 + dev_t device;
67807 + __u32 mode;
67808 + kernel_cap_t cap_mask;
67809 + kernel_cap_t cap_lower;
67810 + kernel_cap_t cap_invert_audit;
67811 +
67812 + struct rlimit res[GR_NLIMITS];
67813 + __u32 resmask;
67814 +
67815 + __u8 user_trans_type;
67816 + __u8 group_trans_type;
67817 + uid_t *user_transitions;
67818 + gid_t *group_transitions;
67819 + __u16 user_trans_num;
67820 + __u16 group_trans_num;
67821 +
67822 + __u32 sock_families[2];
67823 + __u32 ip_proto[8];
67824 + __u32 ip_type;
67825 + struct acl_ip_label **ips;
67826 + __u32 ip_num;
67827 + __u32 inaddr_any_override;
67828 +
67829 + __u32 crashes;
67830 + unsigned long expires;
67831 +
67832 + struct acl_subject_label *parent_subject;
67833 + struct gr_hash_struct *hash;
67834 + struct acl_subject_label *prev;
67835 + struct acl_subject_label *next;
67836 +
67837 + struct acl_object_label **obj_hash;
67838 + __u32 obj_hash_size;
67839 + __u16 pax_flags;
67840 +};
67841 +
67842 +struct role_allowed_ip {
67843 + __u32 addr;
67844 + __u32 netmask;
67845 +
67846 + struct role_allowed_ip *prev;
67847 + struct role_allowed_ip *next;
67848 +};
67849 +
67850 +struct role_transition {
67851 + char *rolename;
67852 +
67853 + struct role_transition *prev;
67854 + struct role_transition *next;
67855 +};
67856 +
67857 +struct acl_role_label {
67858 + char *rolename;
67859 + uid_t uidgid;
67860 + __u16 roletype;
67861 +
67862 + __u16 auth_attempts;
67863 + unsigned long expires;
67864 +
67865 + struct acl_subject_label *root_label;
67866 + struct gr_hash_struct *hash;
67867 +
67868 + struct acl_role_label *prev;
67869 + struct acl_role_label *next;
67870 +
67871 + struct role_transition *transitions;
67872 + struct role_allowed_ip *allowed_ips;
67873 + uid_t *domain_children;
67874 + __u16 domain_child_num;
67875 +
67876 + mode_t umask;
67877 +
67878 + struct acl_subject_label **subj_hash;
67879 + __u32 subj_hash_size;
67880 +};
67881 +
67882 +struct user_acl_role_db {
67883 + struct acl_role_label **r_table;
67884 + __u32 num_pointers; /* Number of allocations to track */
67885 + __u32 num_roles; /* Number of roles */
67886 + __u32 num_domain_children; /* Number of domain children */
67887 + __u32 num_subjects; /* Number of subjects */
67888 + __u32 num_objects; /* Number of objects */
67889 +};
67890 +
67891 +struct acl_object_label {
67892 + char *filename;
67893 + ino_t inode;
67894 + dev_t device;
67895 + __u32 mode;
67896 +
67897 + struct acl_subject_label *nested;
67898 + struct acl_object_label *globbed;
67899 +
67900 + /* next two structures not used */
67901 +
67902 + struct acl_object_label *prev;
67903 + struct acl_object_label *next;
67904 +};
67905 +
67906 +struct acl_ip_label {
67907 + char *iface;
67908 + __u32 addr;
67909 + __u32 netmask;
67910 + __u16 low, high;
67911 + __u8 mode;
67912 + __u32 type;
67913 + __u32 proto[8];
67914 +
67915 + /* next two structures not used */
67916 +
67917 + struct acl_ip_label *prev;
67918 + struct acl_ip_label *next;
67919 +};
67920 +
67921 +struct gr_arg {
67922 + struct user_acl_role_db role_db;
67923 + unsigned char pw[GR_PW_LEN];
67924 + unsigned char salt[GR_SALT_LEN];
67925 + unsigned char sum[GR_SHA_LEN];
67926 + unsigned char sp_role[GR_SPROLE_LEN];
67927 + struct sprole_pw *sprole_pws;
67928 + dev_t segv_device;
67929 + ino_t segv_inode;
67930 + uid_t segv_uid;
67931 + __u16 num_sprole_pws;
67932 + __u16 mode;
67933 +};
67934 +
67935 +struct gr_arg_wrapper {
67936 + struct gr_arg *arg;
67937 + __u32 version;
67938 + __u32 size;
67939 +};
67940 +
67941 +struct subject_map {
67942 + struct acl_subject_label *user;
67943 + struct acl_subject_label *kernel;
67944 + struct subject_map *prev;
67945 + struct subject_map *next;
67946 +};
67947 +
67948 +struct acl_subj_map_db {
67949 + struct subject_map **s_hash;
67950 + __u32 s_size;
67951 +};
67952 +
67953 +/* End Data Structures Section */
67954 +
67955 +/* Hash functions generated by empirical testing by Brad Spengler
67956 + Makes good use of the low bits of the inode. Generally 0-1 times
67957 + in loop for successful match. 0-3 for unsuccessful match.
67958 + Shift/add algorithm with modulus of table size and an XOR*/
67959 +
67960 +static __inline__ unsigned int
67961 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
67962 +{
67963 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
67964 +}
67965 +
67966 + static __inline__ unsigned int
67967 +shash(const struct acl_subject_label *userp, const unsigned int sz)
67968 +{
67969 + return ((const unsigned long)userp % sz);
67970 +}
67971 +
67972 +static __inline__ unsigned int
67973 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
67974 +{
67975 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
67976 +}
67977 +
67978 +static __inline__ unsigned int
67979 +nhash(const char *name, const __u16 len, const unsigned int sz)
67980 +{
67981 + return full_name_hash((const unsigned char *)name, len) % sz;
67982 +}
67983 +
67984 +#define FOR_EACH_ROLE_START(role) \
67985 + role = role_list; \
67986 + while (role) {
67987 +
67988 +#define FOR_EACH_ROLE_END(role) \
67989 + role = role->prev; \
67990 + }
67991 +
67992 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
67993 + subj = NULL; \
67994 + iter = 0; \
67995 + while (iter < role->subj_hash_size) { \
67996 + if (subj == NULL) \
67997 + subj = role->subj_hash[iter]; \
67998 + if (subj == NULL) { \
67999 + iter++; \
68000 + continue; \
68001 + }
68002 +
68003 +#define FOR_EACH_SUBJECT_END(subj,iter) \
68004 + subj = subj->next; \
68005 + if (subj == NULL) \
68006 + iter++; \
68007 + }
68008 +
68009 +
68010 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
68011 + subj = role->hash->first; \
68012 + while (subj != NULL) {
68013 +
68014 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
68015 + subj = subj->next; \
68016 + }
68017 +
68018 +#endif
68019 +
68020 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
68021 new file mode 100644
68022 index 0000000..323ecf2
68023 --- /dev/null
68024 +++ b/include/linux/gralloc.h
68025 @@ -0,0 +1,9 @@
68026 +#ifndef __GRALLOC_H
68027 +#define __GRALLOC_H
68028 +
68029 +void acl_free_all(void);
68030 +int acl_alloc_stack_init(unsigned long size);
68031 +void *acl_alloc(unsigned long len);
68032 +void *acl_alloc_num(unsigned long num, unsigned long len);
68033 +
68034 +#endif
68035 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
68036 new file mode 100644
68037 index 0000000..70d6cd5
68038 --- /dev/null
68039 +++ b/include/linux/grdefs.h
68040 @@ -0,0 +1,140 @@
68041 +#ifndef GRDEFS_H
68042 +#define GRDEFS_H
68043 +
68044 +/* Begin grsecurity status declarations */
68045 +
68046 +enum {
68047 + GR_READY = 0x01,
68048 + GR_STATUS_INIT = 0x00 // disabled state
68049 +};
68050 +
68051 +/* Begin ACL declarations */
68052 +
68053 +/* Role flags */
68054 +
68055 +enum {
68056 + GR_ROLE_USER = 0x0001,
68057 + GR_ROLE_GROUP = 0x0002,
68058 + GR_ROLE_DEFAULT = 0x0004,
68059 + GR_ROLE_SPECIAL = 0x0008,
68060 + GR_ROLE_AUTH = 0x0010,
68061 + GR_ROLE_NOPW = 0x0020,
68062 + GR_ROLE_GOD = 0x0040,
68063 + GR_ROLE_LEARN = 0x0080,
68064 + GR_ROLE_TPE = 0x0100,
68065 + GR_ROLE_DOMAIN = 0x0200,
68066 + GR_ROLE_PAM = 0x0400,
68067 + GR_ROLE_PERSIST = 0x800
68068 +};
68069 +
68070 +/* ACL Subject and Object mode flags */
68071 +enum {
68072 + GR_DELETED = 0x80000000
68073 +};
68074 +
68075 +/* ACL Object-only mode flags */
68076 +enum {
68077 + GR_READ = 0x00000001,
68078 + GR_APPEND = 0x00000002,
68079 + GR_WRITE = 0x00000004,
68080 + GR_EXEC = 0x00000008,
68081 + GR_FIND = 0x00000010,
68082 + GR_INHERIT = 0x00000020,
68083 + GR_SETID = 0x00000040,
68084 + GR_CREATE = 0x00000080,
68085 + GR_DELETE = 0x00000100,
68086 + GR_LINK = 0x00000200,
68087 + GR_AUDIT_READ = 0x00000400,
68088 + GR_AUDIT_APPEND = 0x00000800,
68089 + GR_AUDIT_WRITE = 0x00001000,
68090 + GR_AUDIT_EXEC = 0x00002000,
68091 + GR_AUDIT_FIND = 0x00004000,
68092 + GR_AUDIT_INHERIT= 0x00008000,
68093 + GR_AUDIT_SETID = 0x00010000,
68094 + GR_AUDIT_CREATE = 0x00020000,
68095 + GR_AUDIT_DELETE = 0x00040000,
68096 + GR_AUDIT_LINK = 0x00080000,
68097 + GR_PTRACERD = 0x00100000,
68098 + GR_NOPTRACE = 0x00200000,
68099 + GR_SUPPRESS = 0x00400000,
68100 + GR_NOLEARN = 0x00800000,
68101 + GR_INIT_TRANSFER= 0x01000000
68102 +};
68103 +
68104 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
68105 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
68106 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
68107 +
68108 +/* ACL subject-only mode flags */
68109 +enum {
68110 + GR_KILL = 0x00000001,
68111 + GR_VIEW = 0x00000002,
68112 + GR_PROTECTED = 0x00000004,
68113 + GR_LEARN = 0x00000008,
68114 + GR_OVERRIDE = 0x00000010,
68115 + /* just a placeholder, this mode is only used in userspace */
68116 + GR_DUMMY = 0x00000020,
68117 + GR_PROTSHM = 0x00000040,
68118 + GR_KILLPROC = 0x00000080,
68119 + GR_KILLIPPROC = 0x00000100,
68120 + /* just a placeholder, this mode is only used in userspace */
68121 + GR_NOTROJAN = 0x00000200,
68122 + GR_PROTPROCFD = 0x00000400,
68123 + GR_PROCACCT = 0x00000800,
68124 + GR_RELAXPTRACE = 0x00001000,
68125 + GR_NESTED = 0x00002000,
68126 + GR_INHERITLEARN = 0x00004000,
68127 + GR_PROCFIND = 0x00008000,
68128 + GR_POVERRIDE = 0x00010000,
68129 + GR_KERNELAUTH = 0x00020000,
68130 + GR_ATSECURE = 0x00040000,
68131 + GR_SHMEXEC = 0x00080000
68132 +};
68133 +
68134 +enum {
68135 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
68136 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
68137 + GR_PAX_ENABLE_MPROTECT = 0x0004,
68138 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
68139 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
68140 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
68141 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
68142 + GR_PAX_DISABLE_MPROTECT = 0x0400,
68143 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
68144 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
68145 +};
68146 +
68147 +enum {
68148 + GR_ID_USER = 0x01,
68149 + GR_ID_GROUP = 0x02,
68150 +};
68151 +
68152 +enum {
68153 + GR_ID_ALLOW = 0x01,
68154 + GR_ID_DENY = 0x02,
68155 +};
68156 +
68157 +#define GR_CRASH_RES 31
68158 +#define GR_UIDTABLE_MAX 500
68159 +
68160 +/* begin resource learning section */
68161 +enum {
68162 + GR_RLIM_CPU_BUMP = 60,
68163 + GR_RLIM_FSIZE_BUMP = 50000,
68164 + GR_RLIM_DATA_BUMP = 10000,
68165 + GR_RLIM_STACK_BUMP = 1000,
68166 + GR_RLIM_CORE_BUMP = 10000,
68167 + GR_RLIM_RSS_BUMP = 500000,
68168 + GR_RLIM_NPROC_BUMP = 1,
68169 + GR_RLIM_NOFILE_BUMP = 5,
68170 + GR_RLIM_MEMLOCK_BUMP = 50000,
68171 + GR_RLIM_AS_BUMP = 500000,
68172 + GR_RLIM_LOCKS_BUMP = 2,
68173 + GR_RLIM_SIGPENDING_BUMP = 5,
68174 + GR_RLIM_MSGQUEUE_BUMP = 10000,
68175 + GR_RLIM_NICE_BUMP = 1,
68176 + GR_RLIM_RTPRIO_BUMP = 1,
68177 + GR_RLIM_RTTIME_BUMP = 1000000
68178 +};
68179 +
68180 +#endif
68181 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
68182 new file mode 100644
68183 index 0000000..3826b91
68184 --- /dev/null
68185 +++ b/include/linux/grinternal.h
68186 @@ -0,0 +1,219 @@
68187 +#ifndef __GRINTERNAL_H
68188 +#define __GRINTERNAL_H
68189 +
68190 +#ifdef CONFIG_GRKERNSEC
68191 +
68192 +#include <linux/fs.h>
68193 +#include <linux/mnt_namespace.h>
68194 +#include <linux/nsproxy.h>
68195 +#include <linux/gracl.h>
68196 +#include <linux/grdefs.h>
68197 +#include <linux/grmsg.h>
68198 +
68199 +void gr_add_learn_entry(const char *fmt, ...)
68200 + __attribute__ ((format (printf, 1, 2)));
68201 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
68202 + const struct vfsmount *mnt);
68203 +__u32 gr_check_create(const struct dentry *new_dentry,
68204 + const struct dentry *parent,
68205 + const struct vfsmount *mnt, const __u32 mode);
68206 +int gr_check_protected_task(const struct task_struct *task);
68207 +__u32 to_gr_audit(const __u32 reqmode);
68208 +int gr_set_acls(const int type);
68209 +int gr_apply_subject_to_task(struct task_struct *task);
68210 +int gr_acl_is_enabled(void);
68211 +char gr_roletype_to_char(void);
68212 +
68213 +void gr_handle_alertkill(struct task_struct *task);
68214 +char *gr_to_filename(const struct dentry *dentry,
68215 + const struct vfsmount *mnt);
68216 +char *gr_to_filename1(const struct dentry *dentry,
68217 + const struct vfsmount *mnt);
68218 +char *gr_to_filename2(const struct dentry *dentry,
68219 + const struct vfsmount *mnt);
68220 +char *gr_to_filename3(const struct dentry *dentry,
68221 + const struct vfsmount *mnt);
68222 +
68223 +extern int grsec_enable_ptrace_readexec;
68224 +extern int grsec_enable_harden_ptrace;
68225 +extern int grsec_enable_link;
68226 +extern int grsec_enable_fifo;
68227 +extern int grsec_enable_shm;
68228 +extern int grsec_enable_execlog;
68229 +extern int grsec_enable_signal;
68230 +extern int grsec_enable_audit_ptrace;
68231 +extern int grsec_enable_forkfail;
68232 +extern int grsec_enable_time;
68233 +extern int grsec_enable_rofs;
68234 +extern int grsec_enable_chroot_shmat;
68235 +extern int grsec_enable_chroot_mount;
68236 +extern int grsec_enable_chroot_double;
68237 +extern int grsec_enable_chroot_pivot;
68238 +extern int grsec_enable_chroot_chdir;
68239 +extern int grsec_enable_chroot_chmod;
68240 +extern int grsec_enable_chroot_mknod;
68241 +extern int grsec_enable_chroot_fchdir;
68242 +extern int grsec_enable_chroot_nice;
68243 +extern int grsec_enable_chroot_execlog;
68244 +extern int grsec_enable_chroot_caps;
68245 +extern int grsec_enable_chroot_sysctl;
68246 +extern int grsec_enable_chroot_unix;
68247 +extern int grsec_enable_tpe;
68248 +extern int grsec_tpe_gid;
68249 +extern int grsec_enable_tpe_all;
68250 +extern int grsec_enable_tpe_invert;
68251 +extern int grsec_enable_socket_all;
68252 +extern int grsec_socket_all_gid;
68253 +extern int grsec_enable_socket_client;
68254 +extern int grsec_socket_client_gid;
68255 +extern int grsec_enable_socket_server;
68256 +extern int grsec_socket_server_gid;
68257 +extern int grsec_audit_gid;
68258 +extern int grsec_enable_group;
68259 +extern int grsec_enable_audit_textrel;
68260 +extern int grsec_enable_log_rwxmaps;
68261 +extern int grsec_enable_mount;
68262 +extern int grsec_enable_chdir;
68263 +extern int grsec_resource_logging;
68264 +extern int grsec_enable_blackhole;
68265 +extern int grsec_lastack_retries;
68266 +extern int grsec_enable_brute;
68267 +extern int grsec_lock;
68268 +
68269 +extern spinlock_t grsec_alert_lock;
68270 +extern unsigned long grsec_alert_wtime;
68271 +extern unsigned long grsec_alert_fyet;
68272 +
68273 +extern spinlock_t grsec_audit_lock;
68274 +
68275 +extern rwlock_t grsec_exec_file_lock;
68276 +
68277 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
68278 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
68279 + (tsk)->exec_file->f_vfsmnt) : "/")
68280 +
68281 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
68282 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
68283 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
68284 +
68285 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
68286 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
68287 + (tsk)->exec_file->f_vfsmnt) : "/")
68288 +
68289 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
68290 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
68291 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
68292 +
68293 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
68294 +
68295 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
68296 +
68297 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
68298 + (task)->pid, (cred)->uid, \
68299 + (cred)->euid, (cred)->gid, (cred)->egid, \
68300 + gr_parent_task_fullpath(task), \
68301 + (task)->real_parent->comm, (task)->real_parent->pid, \
68302 + (pcred)->uid, (pcred)->euid, \
68303 + (pcred)->gid, (pcred)->egid
68304 +
68305 +#define GR_CHROOT_CAPS {{ \
68306 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
68307 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
68308 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
68309 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
68310 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
68311 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
68312 + CAP_TO_MASK(CAP_MAC_ADMIN) }}
68313 +
68314 +#define security_learn(normal_msg,args...) \
68315 +({ \
68316 + read_lock(&grsec_exec_file_lock); \
68317 + gr_add_learn_entry(normal_msg "\n", ## args); \
68318 + read_unlock(&grsec_exec_file_lock); \
68319 +})
68320 +
68321 +enum {
68322 + GR_DO_AUDIT,
68323 + GR_DONT_AUDIT,
68324 + GR_DONT_AUDIT_GOOD
68325 +};
68326 +
68327 +enum {
68328 + GR_TTYSNIFF,
68329 + GR_RBAC,
68330 + GR_RBAC_STR,
68331 + GR_STR_RBAC,
68332 + GR_RBAC_MODE2,
68333 + GR_RBAC_MODE3,
68334 + GR_FILENAME,
68335 + GR_SYSCTL_HIDDEN,
68336 + GR_NOARGS,
68337 + GR_ONE_INT,
68338 + GR_ONE_INT_TWO_STR,
68339 + GR_ONE_STR,
68340 + GR_STR_INT,
68341 + GR_TWO_STR_INT,
68342 + GR_TWO_INT,
68343 + GR_TWO_U64,
68344 + GR_THREE_INT,
68345 + GR_FIVE_INT_TWO_STR,
68346 + GR_TWO_STR,
68347 + GR_THREE_STR,
68348 + GR_FOUR_STR,
68349 + GR_STR_FILENAME,
68350 + GR_FILENAME_STR,
68351 + GR_FILENAME_TWO_INT,
68352 + GR_FILENAME_TWO_INT_STR,
68353 + GR_TEXTREL,
68354 + GR_PTRACE,
68355 + GR_RESOURCE,
68356 + GR_CAP,
68357 + GR_SIG,
68358 + GR_SIG2,
68359 + GR_CRASH1,
68360 + GR_CRASH2,
68361 + GR_PSACCT,
68362 + GR_RWXMAP
68363 +};
68364 +
68365 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
68366 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
68367 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
68368 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
68369 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
68370 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
68371 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
68372 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
68373 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
68374 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
68375 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
68376 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
68377 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
68378 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
68379 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
68380 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
68381 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
68382 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
68383 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
68384 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
68385 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
68386 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
68387 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
68388 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
68389 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
68390 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
68391 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
68392 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
68393 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
68394 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
68395 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
68396 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
68397 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
68398 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
68399 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
68400 +
68401 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
68402 +
68403 +#endif
68404 +
68405 +#endif
68406 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
68407 new file mode 100644
68408 index 0000000..f885406
68409 --- /dev/null
68410 +++ b/include/linux/grmsg.h
68411 @@ -0,0 +1,109 @@
68412 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
68413 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
68414 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
68415 +#define GR_STOPMOD_MSG "denied modification of module state by "
68416 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
68417 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
68418 +#define GR_IOPERM_MSG "denied use of ioperm() by "
68419 +#define GR_IOPL_MSG "denied use of iopl() by "
68420 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
68421 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
68422 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
68423 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
68424 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
68425 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
68426 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
68427 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
68428 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
68429 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
68430 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
68431 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
68432 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
68433 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
68434 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
68435 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
68436 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
68437 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
68438 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
68439 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
68440 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
68441 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
68442 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
68443 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
68444 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
68445 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
68446 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
68447 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
68448 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
68449 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
68450 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
68451 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
68452 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
68453 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
68454 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
68455 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
68456 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
68457 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
68458 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
68459 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
68460 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
68461 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
68462 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
68463 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
68464 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
68465 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
68466 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
68467 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
68468 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
68469 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
68470 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
68471 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
68472 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
68473 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
68474 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
68475 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
68476 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
68477 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
68478 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
68479 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
68480 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
68481 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
68482 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
68483 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
68484 +#define GR_NICE_CHROOT_MSG "denied priority change by "
68485 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
68486 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
68487 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
68488 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
68489 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
68490 +#define GR_TIME_MSG "time set by "
68491 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
68492 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
68493 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
68494 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
68495 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
68496 +#define GR_BIND_MSG "denied bind() by "
68497 +#define GR_CONNECT_MSG "denied connect() by "
68498 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
68499 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
68500 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
68501 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
68502 +#define GR_CAP_ACL_MSG "use of %s denied for "
68503 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
68504 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
68505 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
68506 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
68507 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
68508 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
68509 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
68510 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
68511 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
68512 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
68513 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
68514 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
68515 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
68516 +#define GR_VM86_MSG "denied use of vm86 by "
68517 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
68518 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
68519 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
68520 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
68521 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
68522 new file mode 100644
68523 index 0000000..c1793ae
68524 --- /dev/null
68525 +++ b/include/linux/grsecurity.h
68526 @@ -0,0 +1,219 @@
68527 +#ifndef GR_SECURITY_H
68528 +#define GR_SECURITY_H
68529 +#include <linux/fs.h>
68530 +#include <linux/fs_struct.h>
68531 +#include <linux/binfmts.h>
68532 +#include <linux/gracl.h>
68533 +#include <linux/compat.h>
68534 +
68535 +/* notify of brain-dead configs */
68536 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68537 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
68538 +#endif
68539 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
68540 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
68541 +#endif
68542 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
68543 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
68544 +#endif
68545 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
68546 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
68547 +#endif
68548 +
68549 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
68550 +void gr_handle_brute_check(void);
68551 +void gr_handle_kernel_exploit(void);
68552 +int gr_process_user_ban(void);
68553 +
68554 +char gr_roletype_to_char(void);
68555 +
68556 +int gr_acl_enable_at_secure(void);
68557 +
68558 +int gr_check_user_change(int real, int effective, int fs);
68559 +int gr_check_group_change(int real, int effective, int fs);
68560 +
68561 +void gr_del_task_from_ip_table(struct task_struct *p);
68562 +
68563 +int gr_pid_is_chrooted(struct task_struct *p);
68564 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
68565 +int gr_handle_chroot_nice(void);
68566 +int gr_handle_chroot_sysctl(const int op);
68567 +int gr_handle_chroot_setpriority(struct task_struct *p,
68568 + const int niceval);
68569 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
68570 +int gr_handle_chroot_chroot(const struct dentry *dentry,
68571 + const struct vfsmount *mnt);
68572 +void gr_handle_chroot_chdir(struct path *path);
68573 +int gr_handle_chroot_chmod(const struct dentry *dentry,
68574 + const struct vfsmount *mnt, const int mode);
68575 +int gr_handle_chroot_mknod(const struct dentry *dentry,
68576 + const struct vfsmount *mnt, const int mode);
68577 +int gr_handle_chroot_mount(const struct dentry *dentry,
68578 + const struct vfsmount *mnt,
68579 + const char *dev_name);
68580 +int gr_handle_chroot_pivot(void);
68581 +int gr_handle_chroot_unix(const pid_t pid);
68582 +
68583 +int gr_handle_rawio(const struct inode *inode);
68584 +
68585 +void gr_handle_ioperm(void);
68586 +void gr_handle_iopl(void);
68587 +
68588 +umode_t gr_acl_umask(void);
68589 +
68590 +int gr_tpe_allow(const struct file *file);
68591 +
68592 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
68593 +void gr_clear_chroot_entries(struct task_struct *task);
68594 +
68595 +void gr_log_forkfail(const int retval);
68596 +void gr_log_timechange(void);
68597 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
68598 +void gr_log_chdir(const struct dentry *dentry,
68599 + const struct vfsmount *mnt);
68600 +void gr_log_chroot_exec(const struct dentry *dentry,
68601 + const struct vfsmount *mnt);
68602 +void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
68603 +#ifdef CONFIG_COMPAT
68604 +void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
68605 +#endif
68606 +void gr_log_remount(const char *devname, const int retval);
68607 +void gr_log_unmount(const char *devname, const int retval);
68608 +void gr_log_mount(const char *from, const char *to, const int retval);
68609 +void gr_log_textrel(struct vm_area_struct *vma);
68610 +void gr_log_rwxmmap(struct file *file);
68611 +void gr_log_rwxmprotect(struct file *file);
68612 +
68613 +int gr_handle_follow_link(const struct inode *parent,
68614 + const struct inode *inode,
68615 + const struct dentry *dentry,
68616 + const struct vfsmount *mnt);
68617 +int gr_handle_fifo(const struct dentry *dentry,
68618 + const struct vfsmount *mnt,
68619 + const struct dentry *dir, const int flag,
68620 + const int acc_mode);
68621 +int gr_handle_hardlink(const struct dentry *dentry,
68622 + const struct vfsmount *mnt,
68623 + struct inode *inode,
68624 + const int mode, const char *to);
68625 +
68626 +int gr_is_capable(const int cap);
68627 +int gr_is_capable_nolog(const int cap);
68628 +void gr_learn_resource(const struct task_struct *task, const int limit,
68629 + const unsigned long wanted, const int gt);
68630 +void gr_copy_label(struct task_struct *tsk);
68631 +void gr_handle_crash(struct task_struct *task, const int sig);
68632 +int gr_handle_signal(const struct task_struct *p, const int sig);
68633 +int gr_check_crash_uid(const uid_t uid);
68634 +int gr_check_protected_task(const struct task_struct *task);
68635 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
68636 +int gr_acl_handle_mmap(const struct file *file,
68637 + const unsigned long prot);
68638 +int gr_acl_handle_mprotect(const struct file *file,
68639 + const unsigned long prot);
68640 +int gr_check_hidden_task(const struct task_struct *tsk);
68641 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
68642 + const struct vfsmount *mnt);
68643 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
68644 + const struct vfsmount *mnt);
68645 +__u32 gr_acl_handle_access(const struct dentry *dentry,
68646 + const struct vfsmount *mnt, const int fmode);
68647 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
68648 + const struct vfsmount *mnt, umode_t *mode);
68649 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
68650 + const struct vfsmount *mnt);
68651 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
68652 + const struct vfsmount *mnt);
68653 +int gr_handle_ptrace(struct task_struct *task, const long request);
68654 +int gr_handle_proc_ptrace(struct task_struct *task);
68655 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
68656 + const struct vfsmount *mnt);
68657 +int gr_check_crash_exec(const struct file *filp);
68658 +int gr_acl_is_enabled(void);
68659 +void gr_set_kernel_label(struct task_struct *task);
68660 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
68661 + const gid_t gid);
68662 +int gr_set_proc_label(const struct dentry *dentry,
68663 + const struct vfsmount *mnt,
68664 + const int unsafe_flags);
68665 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
68666 + const struct vfsmount *mnt);
68667 +__u32 gr_acl_handle_open(const struct dentry *dentry,
68668 + const struct vfsmount *mnt, int acc_mode);
68669 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
68670 + const struct dentry *p_dentry,
68671 + const struct vfsmount *p_mnt,
68672 + int open_flags, int acc_mode, const int imode);
68673 +void gr_handle_create(const struct dentry *dentry,
68674 + const struct vfsmount *mnt);
68675 +void gr_handle_proc_create(const struct dentry *dentry,
68676 + const struct inode *inode);
68677 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
68678 + const struct dentry *parent_dentry,
68679 + const struct vfsmount *parent_mnt,
68680 + const int mode);
68681 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
68682 + const struct dentry *parent_dentry,
68683 + const struct vfsmount *parent_mnt);
68684 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
68685 + const struct vfsmount *mnt);
68686 +void gr_handle_delete(const ino_t ino, const dev_t dev);
68687 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
68688 + const struct vfsmount *mnt);
68689 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
68690 + const struct dentry *parent_dentry,
68691 + const struct vfsmount *parent_mnt,
68692 + const char *from);
68693 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
68694 + const struct dentry *parent_dentry,
68695 + const struct vfsmount *parent_mnt,
68696 + const struct dentry *old_dentry,
68697 + const struct vfsmount *old_mnt, const char *to);
68698 +int gr_acl_handle_rename(struct dentry *new_dentry,
68699 + struct dentry *parent_dentry,
68700 + const struct vfsmount *parent_mnt,
68701 + struct dentry *old_dentry,
68702 + struct inode *old_parent_inode,
68703 + struct vfsmount *old_mnt, const char *newname);
68704 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
68705 + struct dentry *old_dentry,
68706 + struct dentry *new_dentry,
68707 + struct vfsmount *mnt, const __u8 replace);
68708 +__u32 gr_check_link(const struct dentry *new_dentry,
68709 + const struct dentry *parent_dentry,
68710 + const struct vfsmount *parent_mnt,
68711 + const struct dentry *old_dentry,
68712 + const struct vfsmount *old_mnt);
68713 +int gr_acl_handle_filldir(const struct file *file, const char *name,
68714 + const unsigned int namelen, const ino_t ino);
68715 +
68716 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
68717 + const struct vfsmount *mnt);
68718 +void gr_acl_handle_exit(void);
68719 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
68720 +int gr_acl_handle_procpidmem(const struct task_struct *task);
68721 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
68722 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
68723 +void gr_audit_ptrace(struct task_struct *task);
68724 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
68725 +
68726 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
68727 +
68728 +#ifdef CONFIG_GRKERNSEC
68729 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
68730 +void gr_handle_vm86(void);
68731 +void gr_handle_mem_readwrite(u64 from, u64 to);
68732 +
68733 +void gr_log_badprocpid(const char *entry);
68734 +
68735 +extern int grsec_enable_dmesg;
68736 +extern int grsec_disable_privio;
68737 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68738 +extern int grsec_enable_chroot_findtask;
68739 +#endif
68740 +#ifdef CONFIG_GRKERNSEC_SETXID
68741 +extern int grsec_enable_setxid;
68742 +#endif
68743 +#endif
68744 +
68745 +#endif
68746 diff --git a/include/linux/hdpu_features.h b/include/linux/hdpu_features.h
68747 index 6a87154..a3ce57b 100644
68748 --- a/include/linux/hdpu_features.h
68749 +++ b/include/linux/hdpu_features.h
68750 @@ -3,7 +3,7 @@
68751 struct cpustate_t {
68752 spinlock_t lock;
68753 int excl;
68754 - int open_count;
68755 + atomic_t open_count;
68756 unsigned char cached_val;
68757 int inited;
68758 unsigned long *set_addr;
68759 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
68760 index 211ff44..00ab6d7 100644
68761 --- a/include/linux/highmem.h
68762 +++ b/include/linux/highmem.h
68763 @@ -137,6 +137,18 @@ static inline void clear_highpage(struct page *page)
68764 kunmap_atomic(kaddr, KM_USER0);
68765 }
68766
68767 +static inline void sanitize_highpage(struct page *page)
68768 +{
68769 + void *kaddr;
68770 + unsigned long flags;
68771 +
68772 + local_irq_save(flags);
68773 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
68774 + clear_page(kaddr);
68775 + kunmap_atomic(kaddr, KM_CLEARPAGE);
68776 + local_irq_restore(flags);
68777 +}
68778 +
68779 static inline void zero_user_segments(struct page *page,
68780 unsigned start1, unsigned end1,
68781 unsigned start2, unsigned end2)
68782 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
68783 index 7b40cda..24eb44e 100644
68784 --- a/include/linux/i2c.h
68785 +++ b/include/linux/i2c.h
68786 @@ -325,6 +325,7 @@ struct i2c_algorithm {
68787 /* To determine what the adapter supports */
68788 u32 (*functionality) (struct i2c_adapter *);
68789 };
68790 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
68791
68792 /*
68793 * i2c_adapter is the structure used to identify a physical i2c bus along
68794 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
68795 index 4c4e57d..f3c5303 100644
68796 --- a/include/linux/i2o.h
68797 +++ b/include/linux/i2o.h
68798 @@ -564,7 +564,7 @@ struct i2o_controller {
68799 struct i2o_device *exec; /* Executive */
68800 #if BITS_PER_LONG == 64
68801 spinlock_t context_list_lock; /* lock for context_list */
68802 - atomic_t context_list_counter; /* needed for unique contexts */
68803 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
68804 struct list_head context_list; /* list of context id's
68805 and pointers */
68806 #endif
68807 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
68808 index 21a6f5d..dc42eab 100644
68809 --- a/include/linux/init_task.h
68810 +++ b/include/linux/init_task.h
68811 @@ -83,6 +83,12 @@ extern struct group_info init_groups;
68812 #define INIT_IDS
68813 #endif
68814
68815 +#ifdef CONFIG_X86
68816 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
68817 +#else
68818 +#define INIT_TASK_THREAD_INFO
68819 +#endif
68820 +
68821 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
68822 /*
68823 * Because of the reduced scope of CAP_SETPCAP when filesystem
68824 @@ -156,6 +162,7 @@ extern struct cred init_cred;
68825 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
68826 .comm = "swapper", \
68827 .thread = INIT_THREAD, \
68828 + INIT_TASK_THREAD_INFO \
68829 .fs = &init_fs, \
68830 .files = &init_files, \
68831 .signal = &init_signals, \
68832 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
68833 index 4f0a72a..a849599 100644
68834 --- a/include/linux/intel-iommu.h
68835 +++ b/include/linux/intel-iommu.h
68836 @@ -296,7 +296,7 @@ struct iommu_flush {
68837 u8 fm, u64 type);
68838 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
68839 unsigned int size_order, u64 type);
68840 -};
68841 +} __no_const;
68842
68843 enum {
68844 SR_DMAR_FECTL_REG,
68845 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
68846 index c739150..be577b5 100644
68847 --- a/include/linux/interrupt.h
68848 +++ b/include/linux/interrupt.h
68849 @@ -369,7 +369,7 @@ enum
68850 /* map softirq index to softirq name. update 'softirq_to_name' in
68851 * kernel/softirq.c when adding a new softirq.
68852 */
68853 -extern char *softirq_to_name[NR_SOFTIRQS];
68854 +extern const char * const softirq_to_name[NR_SOFTIRQS];
68855
68856 /* softirq mask and active fields moved to irq_cpustat_t in
68857 * asm/hardirq.h to get better cache usage. KAO
68858 @@ -377,12 +377,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
68859
68860 struct softirq_action
68861 {
68862 - void (*action)(struct softirq_action *);
68863 + void (*action)(void);
68864 };
68865
68866 asmlinkage void do_softirq(void);
68867 asmlinkage void __do_softirq(void);
68868 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
68869 +extern void open_softirq(int nr, void (*action)(void));
68870 extern void softirq_init(void);
68871 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
68872 extern void raise_softirq_irqoff(unsigned int nr);
68873 diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
68874 index eb73632..19abfc1 100644
68875 --- a/include/linux/iocontext.h
68876 +++ b/include/linux/iocontext.h
68877 @@ -94,14 +94,15 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
68878 return NULL;
68879 }
68880
68881 +struct task_struct;
68882 #ifdef CONFIG_BLOCK
68883 int put_io_context(struct io_context *ioc);
68884 -void exit_io_context(void);
68885 +void exit_io_context(struct task_struct *task);
68886 struct io_context *get_io_context(gfp_t gfp_flags, int node);
68887 struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
68888 void copy_io_context(struct io_context **pdst, struct io_context **psrc);
68889 #else
68890 -static inline void exit_io_context(void)
68891 +static inline void exit_io_context(struct task_struct *task)
68892 {
68893 }
68894
68895 diff --git a/include/linux/irq.h b/include/linux/irq.h
68896 index 9e5f45a..025865b 100644
68897 --- a/include/linux/irq.h
68898 +++ b/include/linux/irq.h
68899 @@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
68900 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
68901 bool boot)
68902 {
68903 +#ifdef CONFIG_CPUMASK_OFFSTACK
68904 gfp_t gfp = GFP_ATOMIC;
68905
68906 if (boot)
68907 gfp = GFP_NOWAIT;
68908
68909 -#ifdef CONFIG_CPUMASK_OFFSTACK
68910 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
68911 return false;
68912
68913 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
68914 index 7922742..27306a2 100644
68915 --- a/include/linux/kallsyms.h
68916 +++ b/include/linux/kallsyms.h
68917 @@ -15,7 +15,8 @@
68918
68919 struct module;
68920
68921 -#ifdef CONFIG_KALLSYMS
68922 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
68923 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
68924 /* Lookup the address for a symbol. Returns 0 if not found. */
68925 unsigned long kallsyms_lookup_name(const char *name);
68926
68927 @@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
68928 /* Stupid that this does nothing, but I didn't create this mess. */
68929 #define __print_symbol(fmt, addr)
68930 #endif /*CONFIG_KALLSYMS*/
68931 +#else /* when included by kallsyms.c, vsnprintf.c, or
68932 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
68933 +extern void __print_symbol(const char *fmt, unsigned long address);
68934 +extern int sprint_symbol(char *buffer, unsigned long address);
68935 +const char *kallsyms_lookup(unsigned long addr,
68936 + unsigned long *symbolsize,
68937 + unsigned long *offset,
68938 + char **modname, char *namebuf);
68939 +#endif
68940
68941 /* This macro allows us to keep printk typechecking */
68942 static void __check_printsym_format(const char *fmt, ...)
68943 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
68944 index 6adcc29..13369e8 100644
68945 --- a/include/linux/kgdb.h
68946 +++ b/include/linux/kgdb.h
68947 @@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
68948
68949 extern int kgdb_connected;
68950
68951 -extern atomic_t kgdb_setting_breakpoint;
68952 -extern atomic_t kgdb_cpu_doing_single_step;
68953 +extern atomic_unchecked_t kgdb_setting_breakpoint;
68954 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
68955
68956 extern struct task_struct *kgdb_usethread;
68957 extern struct task_struct *kgdb_contthread;
68958 @@ -235,7 +235,7 @@ struct kgdb_arch {
68959 int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
68960 void (*remove_all_hw_break)(void);
68961 void (*correct_hw_break)(void);
68962 -};
68963 +} __do_const;
68964
68965 /**
68966 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
68967 @@ -257,14 +257,14 @@ struct kgdb_io {
68968 int (*init) (void);
68969 void (*pre_exception) (void);
68970 void (*post_exception) (void);
68971 -};
68972 +} __do_const;
68973
68974 -extern struct kgdb_arch arch_kgdb_ops;
68975 +extern const struct kgdb_arch arch_kgdb_ops;
68976
68977 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
68978
68979 -extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
68980 -extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
68981 +extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
68982 +extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
68983
68984 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
68985 extern int kgdb_mem2hex(char *mem, char *buf, int count);
68986 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
68987 index 0546fe7..2a22bc1 100644
68988 --- a/include/linux/kmod.h
68989 +++ b/include/linux/kmod.h
68990 @@ -31,6 +31,8 @@
68991 * usually useless though. */
68992 extern int __request_module(bool wait, const char *name, ...) \
68993 __attribute__((format(printf, 2, 3)));
68994 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
68995 + __attribute__((format(printf, 3, 4)));
68996 #define request_module(mod...) __request_module(true, mod)
68997 #define request_module_nowait(mod...) __request_module(false, mod)
68998 #define try_then_request_module(x, mod...) \
68999 diff --git a/include/linux/kobject.h b/include/linux/kobject.h
69000 index 58ae8e0..3950d3c 100644
69001 --- a/include/linux/kobject.h
69002 +++ b/include/linux/kobject.h
69003 @@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
69004
69005 struct kobj_type {
69006 void (*release)(struct kobject *kobj);
69007 - struct sysfs_ops *sysfs_ops;
69008 + const struct sysfs_ops *sysfs_ops;
69009 struct attribute **default_attrs;
69010 };
69011
69012 @@ -118,9 +118,9 @@ struct kobj_uevent_env {
69013 };
69014
69015 struct kset_uevent_ops {
69016 - int (*filter)(struct kset *kset, struct kobject *kobj);
69017 - const char *(*name)(struct kset *kset, struct kobject *kobj);
69018 - int (*uevent)(struct kset *kset, struct kobject *kobj,
69019 + int (* const filter)(struct kset *kset, struct kobject *kobj);
69020 + const char *(* const name)(struct kset *kset, struct kobject *kobj);
69021 + int (* const uevent)(struct kset *kset, struct kobject *kobj,
69022 struct kobj_uevent_env *env);
69023 };
69024
69025 @@ -132,7 +132,7 @@ struct kobj_attribute {
69026 const char *buf, size_t count);
69027 };
69028
69029 -extern struct sysfs_ops kobj_sysfs_ops;
69030 +extern const struct sysfs_ops kobj_sysfs_ops;
69031
69032 /**
69033 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
69034 @@ -155,14 +155,14 @@ struct kset {
69035 struct list_head list;
69036 spinlock_t list_lock;
69037 struct kobject kobj;
69038 - struct kset_uevent_ops *uevent_ops;
69039 + const struct kset_uevent_ops *uevent_ops;
69040 };
69041
69042 extern void kset_init(struct kset *kset);
69043 extern int __must_check kset_register(struct kset *kset);
69044 extern void kset_unregister(struct kset *kset);
69045 extern struct kset * __must_check kset_create_and_add(const char *name,
69046 - struct kset_uevent_ops *u,
69047 + const struct kset_uevent_ops *u,
69048 struct kobject *parent_kobj);
69049
69050 static inline struct kset *to_kset(struct kobject *kobj)
69051 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
69052 index c728a50..752d821 100644
69053 --- a/include/linux/kvm_host.h
69054 +++ b/include/linux/kvm_host.h
69055 @@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
69056 void vcpu_load(struct kvm_vcpu *vcpu);
69057 void vcpu_put(struct kvm_vcpu *vcpu);
69058
69059 -int kvm_init(void *opaque, unsigned int vcpu_size,
69060 +int kvm_init(const void *opaque, unsigned int vcpu_size,
69061 struct module *module);
69062 void kvm_exit(void);
69063
69064 @@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
69065 struct kvm_guest_debug *dbg);
69066 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
69067
69068 -int kvm_arch_init(void *opaque);
69069 +int kvm_arch_init(const void *opaque);
69070 void kvm_arch_exit(void);
69071
69072 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
69073 diff --git a/include/linux/libata.h b/include/linux/libata.h
69074 index a069916..223edde 100644
69075 --- a/include/linux/libata.h
69076 +++ b/include/linux/libata.h
69077 @@ -525,11 +525,11 @@ struct ata_ioports {
69078
69079 struct ata_host {
69080 spinlock_t lock;
69081 - struct device *dev;
69082 + struct device *dev;
69083 void __iomem * const *iomap;
69084 unsigned int n_ports;
69085 void *private_data;
69086 - struct ata_port_operations *ops;
69087 + const struct ata_port_operations *ops;
69088 unsigned long flags;
69089 #ifdef CONFIG_ATA_ACPI
69090 acpi_handle acpi_handle;
69091 @@ -710,7 +710,7 @@ struct ata_link {
69092
69093 struct ata_port {
69094 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
69095 - struct ata_port_operations *ops;
69096 + const struct ata_port_operations *ops;
69097 spinlock_t *lock;
69098 /* Flags owned by the EH context. Only EH should touch these once the
69099 port is active */
69100 @@ -884,7 +884,7 @@ struct ata_port_operations {
69101 * fields must be pointers.
69102 */
69103 const struct ata_port_operations *inherits;
69104 -};
69105 +} __do_const;
69106
69107 struct ata_port_info {
69108 unsigned long flags;
69109 @@ -892,7 +892,7 @@ struct ata_port_info {
69110 unsigned long pio_mask;
69111 unsigned long mwdma_mask;
69112 unsigned long udma_mask;
69113 - struct ata_port_operations *port_ops;
69114 + const struct ata_port_operations *port_ops;
69115 void *private_data;
69116 };
69117
69118 @@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timing_normal[];
69119 extern const unsigned long sata_deb_timing_hotplug[];
69120 extern const unsigned long sata_deb_timing_long[];
69121
69122 -extern struct ata_port_operations ata_dummy_port_ops;
69123 +extern const struct ata_port_operations ata_dummy_port_ops;
69124 extern const struct ata_port_info ata_dummy_port_info;
69125
69126 static inline const unsigned long *
69127 @@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_host *host, int irq,
69128 struct scsi_host_template *sht);
69129 extern void ata_host_detach(struct ata_host *host);
69130 extern void ata_host_init(struct ata_host *, struct device *,
69131 - unsigned long, struct ata_port_operations *);
69132 + unsigned long, const struct ata_port_operations *);
69133 extern int ata_scsi_detect(struct scsi_host_template *sht);
69134 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
69135 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
69136 diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
69137 index fbc48f8..0886e57 100644
69138 --- a/include/linux/lockd/bind.h
69139 +++ b/include/linux/lockd/bind.h
69140 @@ -23,13 +23,13 @@ struct svc_rqst;
69141 * This is the set of functions for lockd->nfsd communication
69142 */
69143 struct nlmsvc_binding {
69144 - __be32 (*fopen)(struct svc_rqst *,
69145 + __be32 (* const fopen)(struct svc_rqst *,
69146 struct nfs_fh *,
69147 struct file **);
69148 - void (*fclose)(struct file *);
69149 + void (* const fclose)(struct file *);
69150 };
69151
69152 -extern struct nlmsvc_binding * nlmsvc_ops;
69153 +extern const struct nlmsvc_binding * nlmsvc_ops;
69154
69155 /*
69156 * Similar to nfs_client_initdata, but without the NFS-specific
69157 diff --git a/include/linux/mca.h b/include/linux/mca.h
69158 index 3797270..7765ede 100644
69159 --- a/include/linux/mca.h
69160 +++ b/include/linux/mca.h
69161 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
69162 int region);
69163 void * (*mca_transform_memory)(struct mca_device *,
69164 void *memory);
69165 -};
69166 +} __no_const;
69167
69168 struct mca_bus {
69169 u64 default_dma_mask;
69170 diff --git a/include/linux/memory.h b/include/linux/memory.h
69171 index 37fa19b..b597c85 100644
69172 --- a/include/linux/memory.h
69173 +++ b/include/linux/memory.h
69174 @@ -108,7 +108,7 @@ struct memory_accessor {
69175 size_t count);
69176 ssize_t (*write)(struct memory_accessor *, const char *buf,
69177 off_t offset, size_t count);
69178 -};
69179 +} __no_const;
69180
69181 /*
69182 * Kernel text modification mutex, used for code patching. Users of this lock
69183 diff --git a/include/linux/mm.h b/include/linux/mm.h
69184 index 11e5be6..1ff2423 100644
69185 --- a/include/linux/mm.h
69186 +++ b/include/linux/mm.h
69187 @@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void *objp);
69188
69189 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
69190 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
69191 +
69192 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
69193 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
69194 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
69195 +#else
69196 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
69197 +#endif
69198 +
69199 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
69200 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
69201
69202 @@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
69203 int set_page_dirty_lock(struct page *page);
69204 int clear_page_dirty_for_io(struct page *page);
69205
69206 -/* Is the vma a continuation of the stack vma above it? */
69207 -static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
69208 -{
69209 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
69210 -}
69211 -
69212 extern unsigned long move_page_tables(struct vm_area_struct *vma,
69213 unsigned long old_addr, struct vm_area_struct *new_vma,
69214 unsigned long new_addr, unsigned long len);
69215 @@ -890,6 +891,8 @@ struct shrinker {
69216 extern void register_shrinker(struct shrinker *);
69217 extern void unregister_shrinker(struct shrinker *);
69218
69219 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
69220 +
69221 int vma_wants_writenotify(struct vm_area_struct *vma);
69222
69223 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
69224 @@ -1162,6 +1165,7 @@ out:
69225 }
69226
69227 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
69228 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
69229
69230 extern unsigned long do_brk(unsigned long, unsigned long);
69231
69232 @@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
69233 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
69234 struct vm_area_struct **pprev);
69235
69236 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
69237 +extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
69238 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
69239 +
69240 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
69241 NULL if none. Assume start_addr < end_addr. */
69242 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
69243 @@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
69244 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
69245 }
69246
69247 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
69248 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
69249 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
69250 unsigned long pfn, unsigned long size, pgprot_t);
69251 @@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long pfn, int trapno);
69252 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
69253 extern int sysctl_memory_failure_early_kill;
69254 extern int sysctl_memory_failure_recovery;
69255 -extern atomic_long_t mce_bad_pages;
69256 +extern atomic_long_unchecked_t mce_bad_pages;
69257 +
69258 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69259 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
69260 +#else
69261 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
69262 +#endif
69263
69264 #endif /* __KERNEL__ */
69265 #endif /* _LINUX_MM_H */
69266 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
69267 index 9d12ed5..6d9707a 100644
69268 --- a/include/linux/mm_types.h
69269 +++ b/include/linux/mm_types.h
69270 @@ -186,6 +186,8 @@ struct vm_area_struct {
69271 #ifdef CONFIG_NUMA
69272 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
69273 #endif
69274 +
69275 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
69276 };
69277
69278 struct core_thread {
69279 @@ -287,6 +289,24 @@ struct mm_struct {
69280 #ifdef CONFIG_MMU_NOTIFIER
69281 struct mmu_notifier_mm *mmu_notifier_mm;
69282 #endif
69283 +
69284 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
69285 + unsigned long pax_flags;
69286 +#endif
69287 +
69288 +#ifdef CONFIG_PAX_DLRESOLVE
69289 + unsigned long call_dl_resolve;
69290 +#endif
69291 +
69292 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
69293 + unsigned long call_syscall;
69294 +#endif
69295 +
69296 +#ifdef CONFIG_PAX_ASLR
69297 + unsigned long delta_mmap; /* randomized offset */
69298 + unsigned long delta_stack; /* randomized offset */
69299 +#endif
69300 +
69301 };
69302
69303 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
69304 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
69305 index 4e02ee2..afb159e 100644
69306 --- a/include/linux/mmu_notifier.h
69307 +++ b/include/linux/mmu_notifier.h
69308 @@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
69309 */
69310 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
69311 ({ \
69312 - pte_t __pte; \
69313 + pte_t ___pte; \
69314 struct vm_area_struct *___vma = __vma; \
69315 unsigned long ___address = __address; \
69316 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
69317 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
69318 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
69319 - __pte; \
69320 + ___pte; \
69321 })
69322
69323 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
69324 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
69325 index 6c31a2a..4b0e930 100644
69326 --- a/include/linux/mmzone.h
69327 +++ b/include/linux/mmzone.h
69328 @@ -350,7 +350,7 @@ struct zone {
69329 unsigned long flags; /* zone flags, see below */
69330
69331 /* Zone statistics */
69332 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
69333 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
69334
69335 /*
69336 * prev_priority holds the scanning priority for this zone. It is
69337 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
69338 index f58e9d8..3503935 100644
69339 --- a/include/linux/mod_devicetable.h
69340 +++ b/include/linux/mod_devicetable.h
69341 @@ -12,7 +12,7 @@
69342 typedef unsigned long kernel_ulong_t;
69343 #endif
69344
69345 -#define PCI_ANY_ID (~0)
69346 +#define PCI_ANY_ID ((__u16)~0)
69347
69348 struct pci_device_id {
69349 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
69350 @@ -131,7 +131,7 @@ struct usb_device_id {
69351 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
69352 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
69353
69354 -#define HID_ANY_ID (~0)
69355 +#define HID_ANY_ID (~0U)
69356
69357 struct hid_device_id {
69358 __u16 bus;
69359 diff --git a/include/linux/module.h b/include/linux/module.h
69360 index 482efc8..642032b 100644
69361 --- a/include/linux/module.h
69362 +++ b/include/linux/module.h
69363 @@ -16,6 +16,7 @@
69364 #include <linux/kobject.h>
69365 #include <linux/moduleparam.h>
69366 #include <linux/tracepoint.h>
69367 +#include <linux/fs.h>
69368
69369 #include <asm/local.h>
69370 #include <asm/module.h>
69371 @@ -287,16 +288,16 @@ struct module
69372 int (*init)(void);
69373
69374 /* If this is non-NULL, vfree after init() returns */
69375 - void *module_init;
69376 + void *module_init_rx, *module_init_rw;
69377
69378 /* Here is the actual code + data, vfree'd on unload. */
69379 - void *module_core;
69380 + void *module_core_rx, *module_core_rw;
69381
69382 /* Here are the sizes of the init and core sections */
69383 - unsigned int init_size, core_size;
69384 + unsigned int init_size_rw, core_size_rw;
69385
69386 /* The size of the executable code in each section. */
69387 - unsigned int init_text_size, core_text_size;
69388 + unsigned int init_size_rx, core_size_rx;
69389
69390 /* Arch-specific module values */
69391 struct mod_arch_specific arch;
69392 @@ -345,6 +346,10 @@ struct module
69393 #ifdef CONFIG_EVENT_TRACING
69394 struct ftrace_event_call *trace_events;
69395 unsigned int num_trace_events;
69396 + struct file_operations trace_id;
69397 + struct file_operations trace_enable;
69398 + struct file_operations trace_format;
69399 + struct file_operations trace_filter;
69400 #endif
69401 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
69402 unsigned long *ftrace_callsites;
69403 @@ -393,16 +398,46 @@ struct module *__module_address(unsigned long addr);
69404 bool is_module_address(unsigned long addr);
69405 bool is_module_text_address(unsigned long addr);
69406
69407 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
69408 +{
69409 +
69410 +#ifdef CONFIG_PAX_KERNEXEC
69411 + if (ktla_ktva(addr) >= (unsigned long)start &&
69412 + ktla_ktva(addr) < (unsigned long)start + size)
69413 + return 1;
69414 +#endif
69415 +
69416 + return ((void *)addr >= start && (void *)addr < start + size);
69417 +}
69418 +
69419 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
69420 +{
69421 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
69422 +}
69423 +
69424 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
69425 +{
69426 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
69427 +}
69428 +
69429 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
69430 +{
69431 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
69432 +}
69433 +
69434 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
69435 +{
69436 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
69437 +}
69438 +
69439 static inline int within_module_core(unsigned long addr, struct module *mod)
69440 {
69441 - return (unsigned long)mod->module_core <= addr &&
69442 - addr < (unsigned long)mod->module_core + mod->core_size;
69443 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
69444 }
69445
69446 static inline int within_module_init(unsigned long addr, struct module *mod)
69447 {
69448 - return (unsigned long)mod->module_init <= addr &&
69449 - addr < (unsigned long)mod->module_init + mod->init_size;
69450 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
69451 }
69452
69453 /* Search for module by name: must hold module_mutex. */
69454 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
69455 index c1f40c2..682ca53 100644
69456 --- a/include/linux/moduleloader.h
69457 +++ b/include/linux/moduleloader.h
69458 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
69459 sections. Returns NULL on failure. */
69460 void *module_alloc(unsigned long size);
69461
69462 +#ifdef CONFIG_PAX_KERNEXEC
69463 +void *module_alloc_exec(unsigned long size);
69464 +#else
69465 +#define module_alloc_exec(x) module_alloc(x)
69466 +#endif
69467 +
69468 /* Free memory returned from module_alloc. */
69469 void module_free(struct module *mod, void *module_region);
69470
69471 +#ifdef CONFIG_PAX_KERNEXEC
69472 +void module_free_exec(struct module *mod, void *module_region);
69473 +#else
69474 +#define module_free_exec(x, y) module_free((x), (y))
69475 +#endif
69476 +
69477 /* Apply the given relocation to the (simplified) ELF. Return -error
69478 or 0. */
69479 int apply_relocate(Elf_Shdr *sechdrs,
69480 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
69481 index 82a9124..8a5f622 100644
69482 --- a/include/linux/moduleparam.h
69483 +++ b/include/linux/moduleparam.h
69484 @@ -132,7 +132,7 @@ struct kparam_array
69485
69486 /* Actually copy string: maxlen param is usually sizeof(string). */
69487 #define module_param_string(name, string, len, perm) \
69488 - static const struct kparam_string __param_string_##name \
69489 + static const struct kparam_string __param_string_##name __used \
69490 = { len, string }; \
69491 __module_param_call(MODULE_PARAM_PREFIX, name, \
69492 param_set_copystring, param_get_string, \
69493 @@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffer, struct kernel_param *kp);
69494
69495 /* Comma-separated array: *nump is set to number they actually specified. */
69496 #define module_param_array_named(name, array, type, nump, perm) \
69497 - static const struct kparam_array __param_arr_##name \
69498 + static const struct kparam_array __param_arr_##name __used \
69499 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
69500 sizeof(array[0]), array }; \
69501 __module_param_call(MODULE_PARAM_PREFIX, name, \
69502 diff --git a/include/linux/mutex.h b/include/linux/mutex.h
69503 index 878cab4..c92cb3e 100644
69504 --- a/include/linux/mutex.h
69505 +++ b/include/linux/mutex.h
69506 @@ -51,7 +51,7 @@ struct mutex {
69507 spinlock_t wait_lock;
69508 struct list_head wait_list;
69509 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
69510 - struct thread_info *owner;
69511 + struct task_struct *owner;
69512 #endif
69513 #ifdef CONFIG_DEBUG_MUTEXES
69514 const char *name;
69515 diff --git a/include/linux/namei.h b/include/linux/namei.h
69516 index ec0f607..d19e675 100644
69517 --- a/include/linux/namei.h
69518 +++ b/include/linux/namei.h
69519 @@ -22,7 +22,7 @@ struct nameidata {
69520 unsigned int flags;
69521 int last_type;
69522 unsigned depth;
69523 - char *saved_names[MAX_NESTED_LINKS + 1];
69524 + const char *saved_names[MAX_NESTED_LINKS + 1];
69525
69526 /* Intent data */
69527 union {
69528 @@ -84,12 +84,12 @@ extern int follow_up(struct path *);
69529 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
69530 extern void unlock_rename(struct dentry *, struct dentry *);
69531
69532 -static inline void nd_set_link(struct nameidata *nd, char *path)
69533 +static inline void nd_set_link(struct nameidata *nd, const char *path)
69534 {
69535 nd->saved_names[nd->depth] = path;
69536 }
69537
69538 -static inline char *nd_get_link(struct nameidata *nd)
69539 +static inline const char *nd_get_link(const struct nameidata *nd)
69540 {
69541 return nd->saved_names[nd->depth];
69542 }
69543 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
69544 index 9d7e8f7..04428c5 100644
69545 --- a/include/linux/netdevice.h
69546 +++ b/include/linux/netdevice.h
69547 @@ -637,6 +637,7 @@ struct net_device_ops {
69548 u16 xid);
69549 #endif
69550 };
69551 +typedef struct net_device_ops __no_const net_device_ops_no_const;
69552
69553 /*
69554 * The DEVICE structure.
69555 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
69556 new file mode 100644
69557 index 0000000..33f4af8
69558 --- /dev/null
69559 +++ b/include/linux/netfilter/xt_gradm.h
69560 @@ -0,0 +1,9 @@
69561 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
69562 +#define _LINUX_NETFILTER_XT_GRADM_H 1
69563 +
69564 +struct xt_gradm_mtinfo {
69565 + __u16 flags;
69566 + __u16 invflags;
69567 +};
69568 +
69569 +#endif
69570 diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
69571 index b359c4a..c08b334 100644
69572 --- a/include/linux/nodemask.h
69573 +++ b/include/linux/nodemask.h
69574 @@ -464,11 +464,11 @@ static inline int num_node_state(enum node_states state)
69575
69576 #define any_online_node(mask) \
69577 ({ \
69578 - int node; \
69579 - for_each_node_mask(node, (mask)) \
69580 - if (node_online(node)) \
69581 + int __node; \
69582 + for_each_node_mask(__node, (mask)) \
69583 + if (node_online(__node)) \
69584 break; \
69585 - node; \
69586 + __node; \
69587 })
69588
69589 #define num_online_nodes() num_node_state(N_ONLINE)
69590 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
69591 index 5171639..7cf4235 100644
69592 --- a/include/linux/oprofile.h
69593 +++ b/include/linux/oprofile.h
69594 @@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
69595 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
69596 char const * name, ulong * val);
69597
69598 -/** Create a file for read-only access to an atomic_t. */
69599 +/** Create a file for read-only access to an atomic_unchecked_t. */
69600 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
69601 - char const * name, atomic_t * val);
69602 + char const * name, atomic_unchecked_t * val);
69603
69604 /** create a directory */
69605 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
69606 diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
69607 index 3c62ed4..8924c7c 100644
69608 --- a/include/linux/pagemap.h
69609 +++ b/include/linux/pagemap.h
69610 @@ -425,7 +425,9 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
69611 if (((unsigned long)uaddr & PAGE_MASK) !=
69612 ((unsigned long)end & PAGE_MASK))
69613 ret = __get_user(c, end);
69614 + (void)c;
69615 }
69616 + (void)c;
69617 return ret;
69618 }
69619
69620 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
69621 index 81c9689..a567a55 100644
69622 --- a/include/linux/perf_event.h
69623 +++ b/include/linux/perf_event.h
69624 @@ -476,7 +476,7 @@ struct hw_perf_event {
69625 struct hrtimer hrtimer;
69626 };
69627 };
69628 - atomic64_t prev_count;
69629 + atomic64_unchecked_t prev_count;
69630 u64 sample_period;
69631 u64 last_period;
69632 atomic64_t period_left;
69633 @@ -557,7 +557,7 @@ struct perf_event {
69634 const struct pmu *pmu;
69635
69636 enum perf_event_active_state state;
69637 - atomic64_t count;
69638 + atomic64_unchecked_t count;
69639
69640 /*
69641 * These are the total time in nanoseconds that the event
69642 @@ -595,8 +595,8 @@ struct perf_event {
69643 * These accumulate total time (in nanoseconds) that children
69644 * events have been enabled and running, respectively.
69645 */
69646 - atomic64_t child_total_time_enabled;
69647 - atomic64_t child_total_time_running;
69648 + atomic64_unchecked_t child_total_time_enabled;
69649 + atomic64_unchecked_t child_total_time_running;
69650
69651 /*
69652 * Protect attach/detach and child_list:
69653 diff --git a/include/linux/personality.h b/include/linux/personality.h
69654 index 1261208..ddef96f 100644
69655 --- a/include/linux/personality.h
69656 +++ b/include/linux/personality.h
69657 @@ -43,6 +43,7 @@ enum {
69658 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
69659 ADDR_NO_RANDOMIZE | \
69660 ADDR_COMPAT_LAYOUT | \
69661 + ADDR_LIMIT_3GB | \
69662 MMAP_PAGE_ZERO)
69663
69664 /*
69665 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
69666 index b43a9e0..b77d869 100644
69667 --- a/include/linux/pipe_fs_i.h
69668 +++ b/include/linux/pipe_fs_i.h
69669 @@ -46,9 +46,9 @@ struct pipe_inode_info {
69670 wait_queue_head_t wait;
69671 unsigned int nrbufs, curbuf;
69672 struct page *tmp_page;
69673 - unsigned int readers;
69674 - unsigned int writers;
69675 - unsigned int waiting_writers;
69676 + atomic_t readers;
69677 + atomic_t writers;
69678 + atomic_t waiting_writers;
69679 unsigned int r_counter;
69680 unsigned int w_counter;
69681 struct fasync_struct *fasync_readers;
69682 diff --git a/include/linux/poison.h b/include/linux/poison.h
69683 index 34066ff..e95d744 100644
69684 --- a/include/linux/poison.h
69685 +++ b/include/linux/poison.h
69686 @@ -19,8 +19,8 @@
69687 * under normal circumstances, used to verify that nobody uses
69688 * non-initialized list entries.
69689 */
69690 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
69691 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
69692 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
69693 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
69694
69695 /********** include/linux/timer.h **********/
69696 /*
69697 diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
69698 index 4f71bf4..cd2f68e 100644
69699 --- a/include/linux/posix-timers.h
69700 +++ b/include/linux/posix-timers.h
69701 @@ -82,7 +82,8 @@ struct k_clock {
69702 #define TIMER_RETRY 1
69703 void (*timer_get) (struct k_itimer * timr,
69704 struct itimerspec * cur_setting);
69705 -};
69706 +} __do_const;
69707 +typedef struct k_clock __no_const k_clock_no_const;
69708
69709 void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock);
69710
69711 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
69712 index 72b1a10..13303a9 100644
69713 --- a/include/linux/preempt.h
69714 +++ b/include/linux/preempt.h
69715 @@ -110,7 +110,7 @@ struct preempt_ops {
69716 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
69717 void (*sched_out)(struct preempt_notifier *notifier,
69718 struct task_struct *next);
69719 -};
69720 +} __no_const;
69721
69722 /**
69723 * preempt_notifier - key for installing preemption notifiers
69724 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
69725 index 379eaed..1bf73e3 100644
69726 --- a/include/linux/proc_fs.h
69727 +++ b/include/linux/proc_fs.h
69728 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
69729 return proc_create_data(name, mode, parent, proc_fops, NULL);
69730 }
69731
69732 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
69733 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
69734 +{
69735 +#ifdef CONFIG_GRKERNSEC_PROC_USER
69736 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
69737 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
69738 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
69739 +#else
69740 + return proc_create_data(name, mode, parent, proc_fops, NULL);
69741 +#endif
69742 +}
69743 +
69744 +
69745 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
69746 mode_t mode, struct proc_dir_entry *base,
69747 read_proc_t *read_proc, void * data)
69748 @@ -256,7 +269,7 @@ union proc_op {
69749 int (*proc_show)(struct seq_file *m,
69750 struct pid_namespace *ns, struct pid *pid,
69751 struct task_struct *task);
69752 -};
69753 +} __no_const;
69754
69755 struct ctl_table_header;
69756 struct ctl_table;
69757 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
69758 index 7456d7d..6c1cfc9 100644
69759 --- a/include/linux/ptrace.h
69760 +++ b/include/linux/ptrace.h
69761 @@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_struct *child);
69762 extern void exit_ptrace(struct task_struct *tracer);
69763 #define PTRACE_MODE_READ 1
69764 #define PTRACE_MODE_ATTACH 2
69765 -/* Returns 0 on success, -errno on denial. */
69766 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
69767 /* Returns true on success, false on denial. */
69768 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
69769 +/* Returns true on success, false on denial. */
69770 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
69771
69772 static inline int ptrace_reparented(struct task_struct *child)
69773 {
69774 diff --git a/include/linux/random.h b/include/linux/random.h
69775 index 2948046..3262567 100644
69776 --- a/include/linux/random.h
69777 +++ b/include/linux/random.h
69778 @@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
69779 u32 random32(void);
69780 void srandom32(u32 seed);
69781
69782 +static inline unsigned long pax_get_random_long(void)
69783 +{
69784 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
69785 +}
69786 +
69787 #endif /* __KERNEL___ */
69788
69789 #endif /* _LINUX_RANDOM_H */
69790 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
69791 index 988e55f..17cb4ef 100644
69792 --- a/include/linux/reboot.h
69793 +++ b/include/linux/reboot.h
69794 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
69795 * Architecture-specific implementations of sys_reboot commands.
69796 */
69797
69798 -extern void machine_restart(char *cmd);
69799 -extern void machine_halt(void);
69800 -extern void machine_power_off(void);
69801 +extern void machine_restart(char *cmd) __noreturn;
69802 +extern void machine_halt(void) __noreturn;
69803 +extern void machine_power_off(void) __noreturn;
69804
69805 extern void machine_shutdown(void);
69806 struct pt_regs;
69807 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
69808 */
69809
69810 extern void kernel_restart_prepare(char *cmd);
69811 -extern void kernel_restart(char *cmd);
69812 -extern void kernel_halt(void);
69813 -extern void kernel_power_off(void);
69814 +extern void kernel_restart(char *cmd) __noreturn;
69815 +extern void kernel_halt(void) __noreturn;
69816 +extern void kernel_power_off(void) __noreturn;
69817
69818 void ctrl_alt_del(void);
69819
69820 @@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
69821 * Emergency restart, callable from an interrupt handler.
69822 */
69823
69824 -extern void emergency_restart(void);
69825 +extern void emergency_restart(void) __noreturn;
69826 #include <asm/emergency-restart.h>
69827
69828 #endif
69829 diff --git a/include/linux/regset.h b/include/linux/regset.h
69830 index 8abee65..5150fd1 100644
69831 --- a/include/linux/regset.h
69832 +++ b/include/linux/regset.h
69833 @@ -335,6 +335,9 @@ static inline int copy_regset_to_user(struct task_struct *target,
69834 {
69835 const struct user_regset *regset = &view->regsets[setno];
69836
69837 + if (!regset->get)
69838 + return -EOPNOTSUPP;
69839 +
69840 if (!access_ok(VERIFY_WRITE, data, size))
69841 return -EIO;
69842
69843 @@ -358,6 +361,9 @@ static inline int copy_regset_from_user(struct task_struct *target,
69844 {
69845 const struct user_regset *regset = &view->regsets[setno];
69846
69847 + if (!regset->set)
69848 + return -EOPNOTSUPP;
69849 +
69850 if (!access_ok(VERIFY_READ, data, size))
69851 return -EIO;
69852
69853 diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
69854 index dd31e7b..5b03c5c 100644
69855 --- a/include/linux/reiserfs_fs.h
69856 +++ b/include/linux/reiserfs_fs.h
69857 @@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
69858 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
69859
69860 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
69861 -#define get_generation(s) atomic_read (&fs_generation(s))
69862 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
69863 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
69864 #define __fs_changed(gen,s) (gen != get_generation (s))
69865 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
69866 @@ -1534,24 +1534,24 @@ static inline struct super_block *sb_from_bi(struct buffer_info *bi)
69867 */
69868
69869 struct item_operations {
69870 - int (*bytes_number) (struct item_head * ih, int block_size);
69871 - void (*decrement_key) (struct cpu_key *);
69872 - int (*is_left_mergeable) (struct reiserfs_key * ih,
69873 + int (* const bytes_number) (struct item_head * ih, int block_size);
69874 + void (* const decrement_key) (struct cpu_key *);
69875 + int (* const is_left_mergeable) (struct reiserfs_key * ih,
69876 unsigned long bsize);
69877 - void (*print_item) (struct item_head *, char *item);
69878 - void (*check_item) (struct item_head *, char *item);
69879 + void (* const print_item) (struct item_head *, char *item);
69880 + void (* const check_item) (struct item_head *, char *item);
69881
69882 - int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
69883 + int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
69884 int is_affected, int insert_size);
69885 - int (*check_left) (struct virtual_item * vi, int free,
69886 + int (* const check_left) (struct virtual_item * vi, int free,
69887 int start_skip, int end_skip);
69888 - int (*check_right) (struct virtual_item * vi, int free);
69889 - int (*part_size) (struct virtual_item * vi, int from, int to);
69890 - int (*unit_num) (struct virtual_item * vi);
69891 - void (*print_vi) (struct virtual_item * vi);
69892 + int (* const check_right) (struct virtual_item * vi, int free);
69893 + int (* const part_size) (struct virtual_item * vi, int from, int to);
69894 + int (* const unit_num) (struct virtual_item * vi);
69895 + void (* const print_vi) (struct virtual_item * vi);
69896 };
69897
69898 -extern struct item_operations *item_ops[TYPE_ANY + 1];
69899 +extern const struct item_operations * const item_ops[TYPE_ANY + 1];
69900
69901 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
69902 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
69903 diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
69904 index dab68bb..0688727 100644
69905 --- a/include/linux/reiserfs_fs_sb.h
69906 +++ b/include/linux/reiserfs_fs_sb.h
69907 @@ -377,7 +377,7 @@ struct reiserfs_sb_info {
69908 /* Comment? -Hans */
69909 wait_queue_head_t s_wait;
69910 /* To be obsoleted soon by per buffer seals.. -Hans */
69911 - atomic_t s_generation_counter; // increased by one every time the
69912 + atomic_unchecked_t s_generation_counter; // increased by one every time the
69913 // tree gets re-balanced
69914 unsigned long s_properties; /* File system properties. Currently holds
69915 on-disk FS format */
69916 diff --git a/include/linux/relay.h b/include/linux/relay.h
69917 index 14a86bc..17d0700 100644
69918 --- a/include/linux/relay.h
69919 +++ b/include/linux/relay.h
69920 @@ -159,7 +159,7 @@ struct rchan_callbacks
69921 * The callback should return 0 if successful, negative if not.
69922 */
69923 int (*remove_buf_file)(struct dentry *dentry);
69924 -};
69925 +} __no_const;
69926
69927 /*
69928 * CONFIG_RELAY kernel API, kernel/relay.c
69929 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
69930 index 3392c59..a746428 100644
69931 --- a/include/linux/rfkill.h
69932 +++ b/include/linux/rfkill.h
69933 @@ -144,6 +144,7 @@ struct rfkill_ops {
69934 void (*query)(struct rfkill *rfkill, void *data);
69935 int (*set_block)(void *data, bool blocked);
69936 };
69937 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
69938
69939 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
69940 /**
69941 diff --git a/include/linux/sched.h b/include/linux/sched.h
69942 index 71849bf..a2380d9 100644
69943 --- a/include/linux/sched.h
69944 +++ b/include/linux/sched.h
69945 @@ -101,6 +101,7 @@ struct bio;
69946 struct fs_struct;
69947 struct bts_context;
69948 struct perf_event_context;
69949 +struct linux_binprm;
69950
69951 /*
69952 * List of flags we want to share for kernel threads,
69953 @@ -350,7 +351,7 @@ extern signed long schedule_timeout_killable(signed long timeout);
69954 extern signed long schedule_timeout_uninterruptible(signed long timeout);
69955 asmlinkage void __schedule(void);
69956 asmlinkage void schedule(void);
69957 -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
69958 +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
69959
69960 struct nsproxy;
69961 struct user_namespace;
69962 @@ -371,9 +372,12 @@ struct user_namespace;
69963 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
69964
69965 extern int sysctl_max_map_count;
69966 +extern unsigned long sysctl_heap_stack_gap;
69967
69968 #include <linux/aio.h>
69969
69970 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
69971 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
69972 extern unsigned long
69973 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
69974 unsigned long, unsigned long);
69975 @@ -666,6 +670,16 @@ struct signal_struct {
69976 struct tty_audit_buf *tty_audit_buf;
69977 #endif
69978
69979 +#ifdef CONFIG_GRKERNSEC
69980 + u32 curr_ip;
69981 + u32 saved_ip;
69982 + u32 gr_saddr;
69983 + u32 gr_daddr;
69984 + u16 gr_sport;
69985 + u16 gr_dport;
69986 + u8 used_accept:1;
69987 +#endif
69988 +
69989 int oom_adj; /* OOM kill score adjustment (bit shift) */
69990 };
69991
69992 @@ -723,6 +737,11 @@ struct user_struct {
69993 struct key *session_keyring; /* UID's default session keyring */
69994 #endif
69995
69996 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
69997 + unsigned int banned;
69998 + unsigned long ban_expires;
69999 +#endif
70000 +
70001 /* Hash table maintenance information */
70002 struct hlist_node uidhash_node;
70003 uid_t uid;
70004 @@ -1328,8 +1347,8 @@ struct task_struct {
70005 struct list_head thread_group;
70006
70007 struct completion *vfork_done; /* for vfork() */
70008 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
70009 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
70010 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
70011 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
70012
70013 cputime_t utime, stime, utimescaled, stimescaled;
70014 cputime_t gtime;
70015 @@ -1343,16 +1362,6 @@ struct task_struct {
70016 struct task_cputime cputime_expires;
70017 struct list_head cpu_timers[3];
70018
70019 -/* process credentials */
70020 - const struct cred *real_cred; /* objective and real subjective task
70021 - * credentials (COW) */
70022 - const struct cred *cred; /* effective (overridable) subjective task
70023 - * credentials (COW) */
70024 - struct mutex cred_guard_mutex; /* guard against foreign influences on
70025 - * credential calculations
70026 - * (notably. ptrace) */
70027 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
70028 -
70029 char comm[TASK_COMM_LEN]; /* executable name excluding path
70030 - access with [gs]et_task_comm (which lock
70031 it with task_lock())
70032 @@ -1369,6 +1378,10 @@ struct task_struct {
70033 #endif
70034 /* CPU-specific state of this task */
70035 struct thread_struct thread;
70036 +/* thread_info moved to task_struct */
70037 +#ifdef CONFIG_X86
70038 + struct thread_info tinfo;
70039 +#endif
70040 /* filesystem information */
70041 struct fs_struct *fs;
70042 /* open file information */
70043 @@ -1436,6 +1449,15 @@ struct task_struct {
70044 int hardirq_context;
70045 int softirq_context;
70046 #endif
70047 +
70048 +/* process credentials */
70049 + const struct cred *real_cred; /* objective and real subjective task
70050 + * credentials (COW) */
70051 + struct mutex cred_guard_mutex; /* guard against foreign influences on
70052 + * credential calculations
70053 + * (notably. ptrace) */
70054 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
70055 +
70056 #ifdef CONFIG_LOCKDEP
70057 # define MAX_LOCK_DEPTH 48UL
70058 u64 curr_chain_key;
70059 @@ -1456,6 +1478,9 @@ struct task_struct {
70060
70061 struct backing_dev_info *backing_dev_info;
70062
70063 + const struct cred *cred; /* effective (overridable) subjective task
70064 + * credentials (COW) */
70065 +
70066 struct io_context *io_context;
70067
70068 unsigned long ptrace_message;
70069 @@ -1519,6 +1544,27 @@ struct task_struct {
70070 unsigned long default_timer_slack_ns;
70071
70072 struct list_head *scm_work_list;
70073 +
70074 +#ifdef CONFIG_GRKERNSEC
70075 + /* grsecurity */
70076 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
70077 + u64 exec_id;
70078 +#endif
70079 +#ifdef CONFIG_GRKERNSEC_SETXID
70080 + const struct cred *delayed_cred;
70081 +#endif
70082 + struct dentry *gr_chroot_dentry;
70083 + struct acl_subject_label *acl;
70084 + struct acl_role_label *role;
70085 + struct file *exec_file;
70086 + u16 acl_role_id;
70087 + /* is this the task that authenticated to the special role */
70088 + u8 acl_sp_role;
70089 + u8 is_writable;
70090 + u8 brute;
70091 + u8 gr_is_chrooted;
70092 +#endif
70093 +
70094 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
70095 /* Index of current stored adress in ret_stack */
70096 int curr_ret_stack;
70097 @@ -1542,6 +1588,57 @@ struct task_struct {
70098 #endif /* CONFIG_TRACING */
70099 };
70100
70101 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
70102 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
70103 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
70104 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
70105 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
70106 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
70107 +
70108 +#ifdef CONFIG_PAX_SOFTMODE
70109 +extern int pax_softmode;
70110 +#endif
70111 +
70112 +extern int pax_check_flags(unsigned long *);
70113 +
70114 +/* if tsk != current then task_lock must be held on it */
70115 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
70116 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
70117 +{
70118 + if (likely(tsk->mm))
70119 + return tsk->mm->pax_flags;
70120 + else
70121 + return 0UL;
70122 +}
70123 +
70124 +/* if tsk != current then task_lock must be held on it */
70125 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
70126 +{
70127 + if (likely(tsk->mm)) {
70128 + tsk->mm->pax_flags = flags;
70129 + return 0;
70130 + }
70131 + return -EINVAL;
70132 +}
70133 +#endif
70134 +
70135 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
70136 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
70137 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
70138 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
70139 +#endif
70140 +
70141 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
70142 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
70143 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
70144 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
70145 +
70146 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
70147 +extern void pax_track_stack(void);
70148 +#else
70149 +static inline void pax_track_stack(void) {}
70150 +#endif
70151 +
70152 /* Future-safe accessor for struct task_struct's cpus_allowed. */
70153 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
70154
70155 @@ -1740,7 +1837,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
70156 #define PF_DUMPCORE 0x00000200 /* dumped core */
70157 #define PF_SIGNALED 0x00000400 /* killed by a signal */
70158 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
70159 -#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
70160 +#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
70161 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
70162 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
70163 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
70164 @@ -1978,7 +2075,9 @@ void yield(void);
70165 extern struct exec_domain default_exec_domain;
70166
70167 union thread_union {
70168 +#ifndef CONFIG_X86
70169 struct thread_info thread_info;
70170 +#endif
70171 unsigned long stack[THREAD_SIZE/sizeof(long)];
70172 };
70173
70174 @@ -2011,6 +2110,7 @@ extern struct pid_namespace init_pid_ns;
70175 */
70176
70177 extern struct task_struct *find_task_by_vpid(pid_t nr);
70178 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
70179 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
70180 struct pid_namespace *ns);
70181
70182 @@ -2155,7 +2255,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
70183 extern void exit_itimers(struct signal_struct *);
70184 extern void flush_itimer_signals(void);
70185
70186 -extern NORET_TYPE void do_group_exit(int);
70187 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
70188
70189 extern void daemonize(const char *, ...);
70190 extern int allow_signal(int);
70191 @@ -2284,13 +2384,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
70192
70193 #endif
70194
70195 -static inline int object_is_on_stack(void *obj)
70196 +static inline int object_starts_on_stack(void *obj)
70197 {
70198 - void *stack = task_stack_page(current);
70199 + const void *stack = task_stack_page(current);
70200
70201 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
70202 }
70203
70204 +#ifdef CONFIG_PAX_USERCOPY
70205 +extern int object_is_on_stack(const void *obj, unsigned long len);
70206 +#endif
70207 +
70208 extern void thread_info_cache_init(void);
70209
70210 #ifdef CONFIG_DEBUG_STACK_USAGE
70211 @@ -2616,6 +2720,23 @@ static inline unsigned long rlimit_max(unsigned int limit)
70212 return task_rlimit_max(current, limit);
70213 }
70214
70215 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
70216 +DECLARE_PER_CPU(u64, exec_counter);
70217 +static inline void increment_exec_counter(void)
70218 +{
70219 + unsigned int cpu;
70220 + u64 *exec_id_ptr;
70221 + BUILD_BUG_ON(NR_CPUS > (1 << 16));
70222 + cpu = get_cpu();
70223 + exec_id_ptr = &per_cpu(exec_counter, cpu);
70224 + *exec_id_ptr += 1ULL << 16;
70225 + current->exec_id = *exec_id_ptr;
70226 + put_cpu();
70227 +}
70228 +#else
70229 +static inline void increment_exec_counter(void) {}
70230 +#endif
70231 +
70232 #endif /* __KERNEL__ */
70233
70234 #endif
70235 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
70236 index 1ee2c05..81b7ec4 100644
70237 --- a/include/linux/screen_info.h
70238 +++ b/include/linux/screen_info.h
70239 @@ -42,7 +42,8 @@ struct screen_info {
70240 __u16 pages; /* 0x32 */
70241 __u16 vesa_attributes; /* 0x34 */
70242 __u32 capabilities; /* 0x36 */
70243 - __u8 _reserved[6]; /* 0x3a */
70244 + __u16 vesapm_size; /* 0x3a */
70245 + __u8 _reserved[4]; /* 0x3c */
70246 } __attribute__((packed));
70247
70248 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
70249 diff --git a/include/linux/security.h b/include/linux/security.h
70250 index d40d23f..d739b08 100644
70251 --- a/include/linux/security.h
70252 +++ b/include/linux/security.h
70253 @@ -34,6 +34,7 @@
70254 #include <linux/key.h>
70255 #include <linux/xfrm.h>
70256 #include <linux/gfp.h>
70257 +#include <linux/grsecurity.h>
70258 #include <net/flow.h>
70259
70260 /* Maximum number of letters for an LSM name string */
70261 @@ -76,7 +77,7 @@ extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
70262 extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
70263 extern int cap_task_setioprio(struct task_struct *p, int ioprio);
70264 extern int cap_task_setnice(struct task_struct *p, int nice);
70265 -extern int cap_syslog(int type);
70266 +extern int cap_syslog(int type, bool from_file);
70267 extern int cap_vm_enough_memory(struct mm_struct *mm, long pages);
70268
70269 struct msghdr;
70270 @@ -1331,6 +1332,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
70271 * logging to the console.
70272 * See the syslog(2) manual page for an explanation of the @type values.
70273 * @type contains the type of action.
70274 + * @from_file indicates the context of action (if it came from /proc).
70275 * Return 0 if permission is granted.
70276 * @settime:
70277 * Check permission to change the system time.
70278 @@ -1445,7 +1447,7 @@ struct security_operations {
70279 int (*sysctl) (struct ctl_table *table, int op);
70280 int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
70281 int (*quota_on) (struct dentry *dentry);
70282 - int (*syslog) (int type);
70283 + int (*syslog) (int type, bool from_file);
70284 int (*settime) (struct timespec *ts, struct timezone *tz);
70285 int (*vm_enough_memory) (struct mm_struct *mm, long pages);
70286
70287 @@ -1740,7 +1742,7 @@ int security_acct(struct file *file);
70288 int security_sysctl(struct ctl_table *table, int op);
70289 int security_quotactl(int cmds, int type, int id, struct super_block *sb);
70290 int security_quota_on(struct dentry *dentry);
70291 -int security_syslog(int type);
70292 +int security_syslog(int type, bool from_file);
70293 int security_settime(struct timespec *ts, struct timezone *tz);
70294 int security_vm_enough_memory(long pages);
70295 int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
70296 @@ -1986,9 +1988,9 @@ static inline int security_quota_on(struct dentry *dentry)
70297 return 0;
70298 }
70299
70300 -static inline int security_syslog(int type)
70301 +static inline int security_syslog(int type, bool from_file)
70302 {
70303 - return cap_syslog(type);
70304 + return cap_syslog(type, from_file);
70305 }
70306
70307 static inline int security_settime(struct timespec *ts, struct timezone *tz)
70308 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
70309 index 8366d8f..cc5f9d6 100644
70310 --- a/include/linux/seq_file.h
70311 +++ b/include/linux/seq_file.h
70312 @@ -23,6 +23,9 @@ struct seq_file {
70313 u64 version;
70314 struct mutex lock;
70315 const struct seq_operations *op;
70316 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
70317 + u64 exec_id;
70318 +#endif
70319 void *private;
70320 };
70321
70322 @@ -32,6 +35,7 @@ struct seq_operations {
70323 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
70324 int (*show) (struct seq_file *m, void *v);
70325 };
70326 +typedef struct seq_operations __no_const seq_operations_no_const;
70327
70328 #define SEQ_SKIP 1
70329
70330 diff --git a/include/linux/shm.h b/include/linux/shm.h
70331 index eca6235..c7417ed 100644
70332 --- a/include/linux/shm.h
70333 +++ b/include/linux/shm.h
70334 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the kernel */
70335 pid_t shm_cprid;
70336 pid_t shm_lprid;
70337 struct user_struct *mlock_user;
70338 +#ifdef CONFIG_GRKERNSEC
70339 + time_t shm_createtime;
70340 + pid_t shm_lapid;
70341 +#endif
70342 };
70343
70344 /* shm_mode upper byte flags */
70345 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
70346 index bcdd660..6e12e11 100644
70347 --- a/include/linux/skbuff.h
70348 +++ b/include/linux/skbuff.h
70349 @@ -14,6 +14,7 @@
70350 #ifndef _LINUX_SKBUFF_H
70351 #define _LINUX_SKBUFF_H
70352
70353 +#include <linux/const.h>
70354 #include <linux/kernel.h>
70355 #include <linux/kmemcheck.h>
70356 #include <linux/compiler.h>
70357 @@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_tx(struct sk_buff *skb)
70358 */
70359 static inline int skb_queue_empty(const struct sk_buff_head *list)
70360 {
70361 - return list->next == (struct sk_buff *)list;
70362 + return list->next == (const struct sk_buff *)list;
70363 }
70364
70365 /**
70366 @@ -557,7 +558,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
70367 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
70368 const struct sk_buff *skb)
70369 {
70370 - return (skb->next == (struct sk_buff *) list);
70371 + return (skb->next == (const struct sk_buff *) list);
70372 }
70373
70374 /**
70375 @@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
70376 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
70377 const struct sk_buff *skb)
70378 {
70379 - return (skb->prev == (struct sk_buff *) list);
70380 + return (skb->prev == (const struct sk_buff *) list);
70381 }
70382
70383 /**
70384 @@ -1367,7 +1368,7 @@ static inline int skb_network_offset(const struct sk_buff *skb)
70385 * headroom, you should not reduce this.
70386 */
70387 #ifndef NET_SKB_PAD
70388 -#define NET_SKB_PAD 32
70389 +#define NET_SKB_PAD (_AC(32,UL))
70390 #endif
70391
70392 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
70393 diff --git a/include/linux/slab.h b/include/linux/slab.h
70394 index 2da8372..a3be824 100644
70395 --- a/include/linux/slab.h
70396 +++ b/include/linux/slab.h
70397 @@ -11,12 +11,20 @@
70398
70399 #include <linux/gfp.h>
70400 #include <linux/types.h>
70401 +#include <linux/err.h>
70402
70403 /*
70404 * Flags to pass to kmem_cache_create().
70405 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
70406 */
70407 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
70408 +
70409 +#ifdef CONFIG_PAX_USERCOPY
70410 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
70411 +#else
70412 +#define SLAB_USERCOPY 0x00000000UL
70413 +#endif
70414 +
70415 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
70416 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
70417 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
70418 @@ -82,10 +90,13 @@
70419 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
70420 * Both make kfree a no-op.
70421 */
70422 -#define ZERO_SIZE_PTR ((void *)16)
70423 +#define ZERO_SIZE_PTR \
70424 +({ \
70425 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
70426 + (void *)(-MAX_ERRNO-1L); \
70427 +})
70428
70429 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
70430 - (unsigned long)ZERO_SIZE_PTR)
70431 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
70432
70433 /*
70434 * struct kmem_cache related prototypes
70435 @@ -138,6 +149,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
70436 void kfree(const void *);
70437 void kzfree(const void *);
70438 size_t ksize(const void *);
70439 +void check_object_size(const void *ptr, unsigned long n, bool to);
70440
70441 /*
70442 * Allocator specific definitions. These are mainly used to establish optimized
70443 @@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
70444
70445 void __init kmem_cache_init_late(void);
70446
70447 +#define kmalloc(x, y) \
70448 +({ \
70449 + void *___retval; \
70450 + intoverflow_t ___x = (intoverflow_t)x; \
70451 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
70452 + ___retval = NULL; \
70453 + else \
70454 + ___retval = kmalloc((size_t)___x, (y)); \
70455 + ___retval; \
70456 +})
70457 +
70458 +#define kmalloc_node(x, y, z) \
70459 +({ \
70460 + void *___retval; \
70461 + intoverflow_t ___x = (intoverflow_t)x; \
70462 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
70463 + ___retval = NULL; \
70464 + else \
70465 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
70466 + ___retval; \
70467 +})
70468 +
70469 +#define kzalloc(x, y) \
70470 +({ \
70471 + void *___retval; \
70472 + intoverflow_t ___x = (intoverflow_t)x; \
70473 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
70474 + ___retval = NULL; \
70475 + else \
70476 + ___retval = kzalloc((size_t)___x, (y)); \
70477 + ___retval; \
70478 +})
70479 +
70480 #endif /* _LINUX_SLAB_H */
70481 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
70482 index 850d057..d9dfe3c 100644
70483 --- a/include/linux/slab_def.h
70484 +++ b/include/linux/slab_def.h
70485 @@ -69,10 +69,10 @@ struct kmem_cache {
70486 unsigned long node_allocs;
70487 unsigned long node_frees;
70488 unsigned long node_overflow;
70489 - atomic_t allochit;
70490 - atomic_t allocmiss;
70491 - atomic_t freehit;
70492 - atomic_t freemiss;
70493 + atomic_unchecked_t allochit;
70494 + atomic_unchecked_t allocmiss;
70495 + atomic_unchecked_t freehit;
70496 + atomic_unchecked_t freemiss;
70497
70498 /*
70499 * If debugging is enabled, then the allocator can add additional
70500 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
70501 index 5ad70a6..57f9f65 100644
70502 --- a/include/linux/slub_def.h
70503 +++ b/include/linux/slub_def.h
70504 @@ -86,7 +86,7 @@ struct kmem_cache {
70505 struct kmem_cache_order_objects max;
70506 struct kmem_cache_order_objects min;
70507 gfp_t allocflags; /* gfp flags to use on each alloc */
70508 - int refcount; /* Refcount for slab cache destroy */
70509 + atomic_t refcount; /* Refcount for slab cache destroy */
70510 void (*ctor)(void *);
70511 int inuse; /* Offset to metadata */
70512 int align; /* Alignment */
70513 @@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
70514 #endif
70515
70516 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
70517 -void *__kmalloc(size_t size, gfp_t flags);
70518 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
70519
70520 #ifdef CONFIG_KMEMTRACE
70521 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
70522 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
70523 index 67ad11f..0bbd8af 100644
70524 --- a/include/linux/sonet.h
70525 +++ b/include/linux/sonet.h
70526 @@ -61,7 +61,7 @@ struct sonet_stats {
70527 #include <asm/atomic.h>
70528
70529 struct k_sonet_stats {
70530 -#define __HANDLE_ITEM(i) atomic_t i
70531 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
70532 __SONET_ITEMS
70533 #undef __HANDLE_ITEM
70534 };
70535 diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
70536 index 6f52b4d..5500323 100644
70537 --- a/include/linux/sunrpc/cache.h
70538 +++ b/include/linux/sunrpc/cache.h
70539 @@ -125,7 +125,7 @@ struct cache_detail {
70540 */
70541 struct cache_req {
70542 struct cache_deferred_req *(*defer)(struct cache_req *req);
70543 -};
70544 +} __no_const;
70545 /* this must be embedded in a deferred_request that is being
70546 * delayed awaiting cache-fill
70547 */
70548 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
70549 index 8ed9642..101ceab 100644
70550 --- a/include/linux/sunrpc/clnt.h
70551 +++ b/include/linux/sunrpc/clnt.h
70552 @@ -167,9 +167,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
70553 {
70554 switch (sap->sa_family) {
70555 case AF_INET:
70556 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
70557 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
70558 case AF_INET6:
70559 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
70560 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
70561 }
70562 return 0;
70563 }
70564 @@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
70565 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
70566 const struct sockaddr *src)
70567 {
70568 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
70569 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
70570 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
70571
70572 dsin->sin_family = ssin->sin_family;
70573 @@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
70574 if (sa->sa_family != AF_INET6)
70575 return 0;
70576
70577 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
70578 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
70579 }
70580
70581 #endif /* __KERNEL__ */
70582 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
70583 index c14fe86..393245e 100644
70584 --- a/include/linux/sunrpc/svc_rdma.h
70585 +++ b/include/linux/sunrpc/svc_rdma.h
70586 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
70587 extern unsigned int svcrdma_max_requests;
70588 extern unsigned int svcrdma_max_req_size;
70589
70590 -extern atomic_t rdma_stat_recv;
70591 -extern atomic_t rdma_stat_read;
70592 -extern atomic_t rdma_stat_write;
70593 -extern atomic_t rdma_stat_sq_starve;
70594 -extern atomic_t rdma_stat_rq_starve;
70595 -extern atomic_t rdma_stat_rq_poll;
70596 -extern atomic_t rdma_stat_rq_prod;
70597 -extern atomic_t rdma_stat_sq_poll;
70598 -extern atomic_t rdma_stat_sq_prod;
70599 +extern atomic_unchecked_t rdma_stat_recv;
70600 +extern atomic_unchecked_t rdma_stat_read;
70601 +extern atomic_unchecked_t rdma_stat_write;
70602 +extern atomic_unchecked_t rdma_stat_sq_starve;
70603 +extern atomic_unchecked_t rdma_stat_rq_starve;
70604 +extern atomic_unchecked_t rdma_stat_rq_poll;
70605 +extern atomic_unchecked_t rdma_stat_rq_prod;
70606 +extern atomic_unchecked_t rdma_stat_sq_poll;
70607 +extern atomic_unchecked_t rdma_stat_sq_prod;
70608
70609 #define RPCRDMA_VERSION 1
70610
70611 diff --git a/include/linux/suspend.h b/include/linux/suspend.h
70612 index 5e781d8..1e62818 100644
70613 --- a/include/linux/suspend.h
70614 +++ b/include/linux/suspend.h
70615 @@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
70616 * which require special recovery actions in that situation.
70617 */
70618 struct platform_suspend_ops {
70619 - int (*valid)(suspend_state_t state);
70620 - int (*begin)(suspend_state_t state);
70621 - int (*prepare)(void);
70622 - int (*prepare_late)(void);
70623 - int (*enter)(suspend_state_t state);
70624 - void (*wake)(void);
70625 - void (*finish)(void);
70626 - void (*end)(void);
70627 - void (*recover)(void);
70628 + int (* const valid)(suspend_state_t state);
70629 + int (* const begin)(suspend_state_t state);
70630 + int (* const prepare)(void);
70631 + int (* const prepare_late)(void);
70632 + int (* const enter)(suspend_state_t state);
70633 + void (* const wake)(void);
70634 + void (* const finish)(void);
70635 + void (* const end)(void);
70636 + void (* const recover)(void);
70637 };
70638
70639 #ifdef CONFIG_SUSPEND
70640 @@ -120,7 +120,7 @@ struct platform_suspend_ops {
70641 * suspend_set_ops - set platform dependent suspend operations
70642 * @ops: The new suspend operations to set.
70643 */
70644 -extern void suspend_set_ops(struct platform_suspend_ops *ops);
70645 +extern void suspend_set_ops(const struct platform_suspend_ops *ops);
70646 extern int suspend_valid_only_mem(suspend_state_t state);
70647
70648 /**
70649 @@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t state);
70650 #else /* !CONFIG_SUSPEND */
70651 #define suspend_valid_only_mem NULL
70652
70653 -static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
70654 +static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
70655 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
70656 #endif /* !CONFIG_SUSPEND */
70657
70658 @@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone *zone);
70659 * platforms which require special recovery actions in that situation.
70660 */
70661 struct platform_hibernation_ops {
70662 - int (*begin)(void);
70663 - void (*end)(void);
70664 - int (*pre_snapshot)(void);
70665 - void (*finish)(void);
70666 - int (*prepare)(void);
70667 - int (*enter)(void);
70668 - void (*leave)(void);
70669 - int (*pre_restore)(void);
70670 - void (*restore_cleanup)(void);
70671 - void (*recover)(void);
70672 + int (* const begin)(void);
70673 + void (* const end)(void);
70674 + int (* const pre_snapshot)(void);
70675 + void (* const finish)(void);
70676 + int (* const prepare)(void);
70677 + int (* const enter)(void);
70678 + void (* const leave)(void);
70679 + int (* const pre_restore)(void);
70680 + void (* const restore_cleanup)(void);
70681 + void (* const recover)(void);
70682 };
70683
70684 #ifdef CONFIG_HIBERNATION
70685 @@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct page *);
70686 extern void swsusp_unset_page_free(struct page *);
70687 extern unsigned long get_safe_page(gfp_t gfp_mask);
70688
70689 -extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
70690 +extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
70691 extern int hibernate(void);
70692 extern bool system_entering_hibernation(void);
70693 #else /* CONFIG_HIBERNATION */
70694 @@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
70695 static inline void swsusp_set_page_free(struct page *p) {}
70696 static inline void swsusp_unset_page_free(struct page *p) {}
70697
70698 -static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
70699 +static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
70700 static inline int hibernate(void) { return -ENOSYS; }
70701 static inline bool system_entering_hibernation(void) { return false; }
70702 #endif /* CONFIG_HIBERNATION */
70703 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
70704 index 0eb6942..a805cb6 100644
70705 --- a/include/linux/sysctl.h
70706 +++ b/include/linux/sysctl.h
70707 @@ -164,7 +164,11 @@ enum
70708 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
70709 };
70710
70711 -
70712 +#ifdef CONFIG_PAX_SOFTMODE
70713 +enum {
70714 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
70715 +};
70716 +#endif
70717
70718 /* CTL_VM names: */
70719 enum
70720 @@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
70721
70722 extern int proc_dostring(struct ctl_table *, int,
70723 void __user *, size_t *, loff_t *);
70724 +extern int proc_dostring_modpriv(struct ctl_table *, int,
70725 + void __user *, size_t *, loff_t *);
70726 extern int proc_dointvec(struct ctl_table *, int,
70727 void __user *, size_t *, loff_t *);
70728 extern int proc_dointvec_minmax(struct ctl_table *, int,
70729 @@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name, int nlen,
70730
70731 extern ctl_handler sysctl_data;
70732 extern ctl_handler sysctl_string;
70733 +extern ctl_handler sysctl_string_modpriv;
70734 extern ctl_handler sysctl_intvec;
70735 extern ctl_handler sysctl_jiffies;
70736 extern ctl_handler sysctl_ms_jiffies;
70737 diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
70738 index 9d68fed..71f02cc 100644
70739 --- a/include/linux/sysfs.h
70740 +++ b/include/linux/sysfs.h
70741 @@ -75,8 +75,8 @@ struct bin_attribute {
70742 };
70743
70744 struct sysfs_ops {
70745 - ssize_t (*show)(struct kobject *, struct attribute *,char *);
70746 - ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
70747 + ssize_t (* const show)(struct kobject *, struct attribute *,char *);
70748 + ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
70749 };
70750
70751 struct sysfs_dirent;
70752 diff --git a/include/linux/syslog.h b/include/linux/syslog.h
70753 new file mode 100644
70754 index 0000000..3891139
70755 --- /dev/null
70756 +++ b/include/linux/syslog.h
70757 @@ -0,0 +1,52 @@
70758 +/* Syslog internals
70759 + *
70760 + * Copyright 2010 Canonical, Ltd.
70761 + * Author: Kees Cook <kees.cook@canonical.com>
70762 + *
70763 + * This program is free software; you can redistribute it and/or modify
70764 + * it under the terms of the GNU General Public License as published by
70765 + * the Free Software Foundation; either version 2, or (at your option)
70766 + * any later version.
70767 + *
70768 + * This program is distributed in the hope that it will be useful,
70769 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
70770 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
70771 + * GNU General Public License for more details.
70772 + *
70773 + * You should have received a copy of the GNU General Public License
70774 + * along with this program; see the file COPYING. If not, write to
70775 + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
70776 + */
70777 +
70778 +#ifndef _LINUX_SYSLOG_H
70779 +#define _LINUX_SYSLOG_H
70780 +
70781 +/* Close the log. Currently a NOP. */
70782 +#define SYSLOG_ACTION_CLOSE 0
70783 +/* Open the log. Currently a NOP. */
70784 +#define SYSLOG_ACTION_OPEN 1
70785 +/* Read from the log. */
70786 +#define SYSLOG_ACTION_READ 2
70787 +/* Read all messages remaining in the ring buffer. */
70788 +#define SYSLOG_ACTION_READ_ALL 3
70789 +/* Read and clear all messages remaining in the ring buffer */
70790 +#define SYSLOG_ACTION_READ_CLEAR 4
70791 +/* Clear ring buffer. */
70792 +#define SYSLOG_ACTION_CLEAR 5
70793 +/* Disable printk's to console */
70794 +#define SYSLOG_ACTION_CONSOLE_OFF 6
70795 +/* Enable printk's to console */
70796 +#define SYSLOG_ACTION_CONSOLE_ON 7
70797 +/* Set level of messages printed to console */
70798 +#define SYSLOG_ACTION_CONSOLE_LEVEL 8
70799 +/* Return number of unread characters in the log buffer */
70800 +#define SYSLOG_ACTION_SIZE_UNREAD 9
70801 +/* Return size of the log buffer */
70802 +#define SYSLOG_ACTION_SIZE_BUFFER 10
70803 +
70804 +#define SYSLOG_FROM_CALL 0
70805 +#define SYSLOG_FROM_FILE 1
70806 +
70807 +int do_syslog(int type, char __user *buf, int count, bool from_file);
70808 +
70809 +#endif /* _LINUX_SYSLOG_H */
70810 diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
70811 index a8cc4e1..98d3b85 100644
70812 --- a/include/linux/thread_info.h
70813 +++ b/include/linux/thread_info.h
70814 @@ -23,7 +23,7 @@ struct restart_block {
70815 };
70816 /* For futex_wait and futex_wait_requeue_pi */
70817 struct {
70818 - u32 *uaddr;
70819 + u32 __user *uaddr;
70820 u32 val;
70821 u32 flags;
70822 u32 bitset;
70823 diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
70824 index 1eb44a9..f582df3 100644
70825 --- a/include/linux/tracehook.h
70826 +++ b/include/linux/tracehook.h
70827 @@ -69,12 +69,12 @@ static inline int tracehook_expect_breakpoints(struct task_struct *task)
70828 /*
70829 * ptrace report for syscall entry and exit looks identical.
70830 */
70831 -static inline void ptrace_report_syscall(struct pt_regs *regs)
70832 +static inline int ptrace_report_syscall(struct pt_regs *regs)
70833 {
70834 int ptrace = task_ptrace(current);
70835
70836 if (!(ptrace & PT_PTRACED))
70837 - return;
70838 + return 0;
70839
70840 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
70841
70842 @@ -87,6 +87,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
70843 send_sig(current->exit_code, current, 1);
70844 current->exit_code = 0;
70845 }
70846 +
70847 + return fatal_signal_pending(current);
70848 }
70849
70850 /**
70851 @@ -111,8 +113,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
70852 static inline __must_check int tracehook_report_syscall_entry(
70853 struct pt_regs *regs)
70854 {
70855 - ptrace_report_syscall(regs);
70856 - return 0;
70857 + return ptrace_report_syscall(regs);
70858 }
70859
70860 /**
70861 diff --git a/include/linux/tty.h b/include/linux/tty.h
70862 index e9c57e9..ee6d489 100644
70863 --- a/include/linux/tty.h
70864 +++ b/include/linux/tty.h
70865 @@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
70866 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
70867 extern void tty_ldisc_enable(struct tty_struct *tty);
70868
70869 -
70870 /* n_tty.c */
70871 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
70872
70873 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
70874 index 0c4ee9b..9f7c426 100644
70875 --- a/include/linux/tty_ldisc.h
70876 +++ b/include/linux/tty_ldisc.h
70877 @@ -139,7 +139,7 @@ struct tty_ldisc_ops {
70878
70879 struct module *owner;
70880
70881 - int refcount;
70882 + atomic_t refcount;
70883 };
70884
70885 struct tty_ldisc {
70886 diff --git a/include/linux/types.h b/include/linux/types.h
70887 index c42724f..d190eee 100644
70888 --- a/include/linux/types.h
70889 +++ b/include/linux/types.h
70890 @@ -191,10 +191,26 @@ typedef struct {
70891 volatile int counter;
70892 } atomic_t;
70893
70894 +#ifdef CONFIG_PAX_REFCOUNT
70895 +typedef struct {
70896 + volatile int counter;
70897 +} atomic_unchecked_t;
70898 +#else
70899 +typedef atomic_t atomic_unchecked_t;
70900 +#endif
70901 +
70902 #ifdef CONFIG_64BIT
70903 typedef struct {
70904 volatile long counter;
70905 } atomic64_t;
70906 +
70907 +#ifdef CONFIG_PAX_REFCOUNT
70908 +typedef struct {
70909 + volatile long counter;
70910 +} atomic64_unchecked_t;
70911 +#else
70912 +typedef atomic64_t atomic64_unchecked_t;
70913 +#endif
70914 #endif
70915
70916 struct ustat {
70917 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
70918 index 6b58367..53a3e8e 100644
70919 --- a/include/linux/uaccess.h
70920 +++ b/include/linux/uaccess.h
70921 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
70922 long ret; \
70923 mm_segment_t old_fs = get_fs(); \
70924 \
70925 - set_fs(KERNEL_DS); \
70926 pagefault_disable(); \
70927 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
70928 - pagefault_enable(); \
70929 + set_fs(KERNEL_DS); \
70930 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
70931 set_fs(old_fs); \
70932 + pagefault_enable(); \
70933 ret; \
70934 })
70935
70936 @@ -93,7 +93,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
70937 * Safely read from address @src to the buffer at @dst. If a kernel fault
70938 * happens, handle that and return -EFAULT.
70939 */
70940 -extern long probe_kernel_read(void *dst, void *src, size_t size);
70941 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
70942
70943 /*
70944 * probe_kernel_write(): safely attempt to write to a location
70945 @@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst, void *src, size_t size);
70946 * Safely write to address @dst from the buffer at @src. If a kernel fault
70947 * happens, handle that and return -EFAULT.
70948 */
70949 -extern long probe_kernel_write(void *dst, void *src, size_t size);
70950 +extern long probe_kernel_write(void *dst, const void *src, size_t size);
70951
70952 #endif /* __LINUX_UACCESS_H__ */
70953 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
70954 index 99c1b4d..bb94261 100644
70955 --- a/include/linux/unaligned/access_ok.h
70956 +++ b/include/linux/unaligned/access_ok.h
70957 @@ -6,32 +6,32 @@
70958
70959 static inline u16 get_unaligned_le16(const void *p)
70960 {
70961 - return le16_to_cpup((__le16 *)p);
70962 + return le16_to_cpup((const __le16 *)p);
70963 }
70964
70965 static inline u32 get_unaligned_le32(const void *p)
70966 {
70967 - return le32_to_cpup((__le32 *)p);
70968 + return le32_to_cpup((const __le32 *)p);
70969 }
70970
70971 static inline u64 get_unaligned_le64(const void *p)
70972 {
70973 - return le64_to_cpup((__le64 *)p);
70974 + return le64_to_cpup((const __le64 *)p);
70975 }
70976
70977 static inline u16 get_unaligned_be16(const void *p)
70978 {
70979 - return be16_to_cpup((__be16 *)p);
70980 + return be16_to_cpup((const __be16 *)p);
70981 }
70982
70983 static inline u32 get_unaligned_be32(const void *p)
70984 {
70985 - return be32_to_cpup((__be32 *)p);
70986 + return be32_to_cpup((const __be32 *)p);
70987 }
70988
70989 static inline u64 get_unaligned_be64(const void *p)
70990 {
70991 - return be64_to_cpup((__be64 *)p);
70992 + return be64_to_cpup((const __be64 *)p);
70993 }
70994
70995 static inline void put_unaligned_le16(u16 val, void *p)
70996 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
70997 index 79b9837..b5a56f9 100644
70998 --- a/include/linux/vermagic.h
70999 +++ b/include/linux/vermagic.h
71000 @@ -26,9 +26,35 @@
71001 #define MODULE_ARCH_VERMAGIC ""
71002 #endif
71003
71004 +#ifdef CONFIG_PAX_REFCOUNT
71005 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
71006 +#else
71007 +#define MODULE_PAX_REFCOUNT ""
71008 +#endif
71009 +
71010 +#ifdef CONSTIFY_PLUGIN
71011 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
71012 +#else
71013 +#define MODULE_CONSTIFY_PLUGIN ""
71014 +#endif
71015 +
71016 +#ifdef STACKLEAK_PLUGIN
71017 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
71018 +#else
71019 +#define MODULE_STACKLEAK_PLUGIN ""
71020 +#endif
71021 +
71022 +#ifdef CONFIG_GRKERNSEC
71023 +#define MODULE_GRSEC "GRSEC "
71024 +#else
71025 +#define MODULE_GRSEC ""
71026 +#endif
71027 +
71028 #define VERMAGIC_STRING \
71029 UTS_RELEASE " " \
71030 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
71031 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
71032 - MODULE_ARCH_VERMAGIC
71033 + MODULE_ARCH_VERMAGIC \
71034 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
71035 + MODULE_GRSEC
71036
71037 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
71038 index 819a634..462ac12 100644
71039 --- a/include/linux/vmalloc.h
71040 +++ b/include/linux/vmalloc.h
71041 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
71042 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
71043 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
71044 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
71045 +
71046 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71047 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
71048 +#endif
71049 +
71050 /* bits [20..32] reserved for arch specific ioremap internals */
71051
71052 /*
71053 @@ -124,4 +129,81 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
71054
71055 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
71056
71057 +#define vmalloc(x) \
71058 +({ \
71059 + void *___retval; \
71060 + intoverflow_t ___x = (intoverflow_t)x; \
71061 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
71062 + ___retval = NULL; \
71063 + else \
71064 + ___retval = vmalloc((unsigned long)___x); \
71065 + ___retval; \
71066 +})
71067 +
71068 +#define __vmalloc(x, y, z) \
71069 +({ \
71070 + void *___retval; \
71071 + intoverflow_t ___x = (intoverflow_t)x; \
71072 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
71073 + ___retval = NULL; \
71074 + else \
71075 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
71076 + ___retval; \
71077 +})
71078 +
71079 +#define vmalloc_user(x) \
71080 +({ \
71081 + void *___retval; \
71082 + intoverflow_t ___x = (intoverflow_t)x; \
71083 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
71084 + ___retval = NULL; \
71085 + else \
71086 + ___retval = vmalloc_user((unsigned long)___x); \
71087 + ___retval; \
71088 +})
71089 +
71090 +#define vmalloc_exec(x) \
71091 +({ \
71092 + void *___retval; \
71093 + intoverflow_t ___x = (intoverflow_t)x; \
71094 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
71095 + ___retval = NULL; \
71096 + else \
71097 + ___retval = vmalloc_exec((unsigned long)___x); \
71098 + ___retval; \
71099 +})
71100 +
71101 +#define vmalloc_node(x, y) \
71102 +({ \
71103 + void *___retval; \
71104 + intoverflow_t ___x = (intoverflow_t)x; \
71105 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
71106 + ___retval = NULL; \
71107 + else \
71108 + ___retval = vmalloc_node((unsigned long)___x, (y));\
71109 + ___retval; \
71110 +})
71111 +
71112 +#define vmalloc_32(x) \
71113 +({ \
71114 + void *___retval; \
71115 + intoverflow_t ___x = (intoverflow_t)x; \
71116 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
71117 + ___retval = NULL; \
71118 + else \
71119 + ___retval = vmalloc_32((unsigned long)___x); \
71120 + ___retval; \
71121 +})
71122 +
71123 +#define vmalloc_32_user(x) \
71124 +({ \
71125 + void *___retval; \
71126 + intoverflow_t ___x = (intoverflow_t)x; \
71127 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
71128 + ___retval = NULL; \
71129 + else \
71130 + ___retval = vmalloc_32_user((unsigned long)___x);\
71131 + ___retval; \
71132 +})
71133 +
71134 #endif /* _LINUX_VMALLOC_H */
71135 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
71136 index 13070d6..aa4159a 100644
71137 --- a/include/linux/vmstat.h
71138 +++ b/include/linux/vmstat.h
71139 @@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(int cpu)
71140 /*
71141 * Zone based page accounting with per cpu differentials.
71142 */
71143 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
71144 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
71145
71146 static inline void zone_page_state_add(long x, struct zone *zone,
71147 enum zone_stat_item item)
71148 {
71149 - atomic_long_add(x, &zone->vm_stat[item]);
71150 - atomic_long_add(x, &vm_stat[item]);
71151 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
71152 + atomic_long_add_unchecked(x, &vm_stat[item]);
71153 }
71154
71155 static inline unsigned long global_page_state(enum zone_stat_item item)
71156 {
71157 - long x = atomic_long_read(&vm_stat[item]);
71158 + long x = atomic_long_read_unchecked(&vm_stat[item]);
71159 #ifdef CONFIG_SMP
71160 if (x < 0)
71161 x = 0;
71162 @@ -158,7 +158,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
71163 static inline unsigned long zone_page_state(struct zone *zone,
71164 enum zone_stat_item item)
71165 {
71166 - long x = atomic_long_read(&zone->vm_stat[item]);
71167 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
71168 #ifdef CONFIG_SMP
71169 if (x < 0)
71170 x = 0;
71171 @@ -175,7 +175,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
71172 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
71173 enum zone_stat_item item)
71174 {
71175 - long x = atomic_long_read(&zone->vm_stat[item]);
71176 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
71177
71178 #ifdef CONFIG_SMP
71179 int cpu;
71180 @@ -264,8 +264,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
71181
71182 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
71183 {
71184 - atomic_long_inc(&zone->vm_stat[item]);
71185 - atomic_long_inc(&vm_stat[item]);
71186 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
71187 + atomic_long_inc_unchecked(&vm_stat[item]);
71188 }
71189
71190 static inline void __inc_zone_page_state(struct page *page,
71191 @@ -276,8 +276,8 @@ static inline void __inc_zone_page_state(struct page *page,
71192
71193 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
71194 {
71195 - atomic_long_dec(&zone->vm_stat[item]);
71196 - atomic_long_dec(&vm_stat[item]);
71197 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
71198 + atomic_long_dec_unchecked(&vm_stat[item]);
71199 }
71200
71201 static inline void __dec_zone_page_state(struct page *page,
71202 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
71203 index 5c84af8..1a3b6e2 100644
71204 --- a/include/linux/xattr.h
71205 +++ b/include/linux/xattr.h
71206 @@ -33,6 +33,11 @@
71207 #define XATTR_USER_PREFIX "user."
71208 #define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
71209
71210 +/* User namespace */
71211 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
71212 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
71213 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
71214 +
71215 struct inode;
71216 struct dentry;
71217
71218 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
71219 index eed5fcc..5080d24 100644
71220 --- a/include/media/saa7146_vv.h
71221 +++ b/include/media/saa7146_vv.h
71222 @@ -167,7 +167,7 @@ struct saa7146_ext_vv
71223 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
71224
71225 /* the extension can override this */
71226 - struct v4l2_ioctl_ops ops;
71227 + v4l2_ioctl_ops_no_const ops;
71228 /* pointer to the saa7146 core ops */
71229 const struct v4l2_ioctl_ops *core_ops;
71230
71231 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
71232 index 73c9867..2da8837 100644
71233 --- a/include/media/v4l2-dev.h
71234 +++ b/include/media/v4l2-dev.h
71235 @@ -34,7 +34,7 @@ struct v4l2_device;
71236 #define V4L2_FL_UNREGISTERED (0)
71237
71238 struct v4l2_file_operations {
71239 - struct module *owner;
71240 + struct module * const owner;
71241 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
71242 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
71243 unsigned int (*poll) (struct file *, struct poll_table_struct *);
71244 @@ -46,6 +46,7 @@ struct v4l2_file_operations {
71245 int (*open) (struct file *);
71246 int (*release) (struct file *);
71247 };
71248 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
71249
71250 /*
71251 * Newer version of video_device, handled by videodev2.c
71252 diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
71253 index 5d5d550..f559ef1 100644
71254 --- a/include/media/v4l2-device.h
71255 +++ b/include/media/v4l2-device.h
71256 @@ -71,7 +71,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
71257 this function returns 0. If the name ends with a digit (e.g. cx18),
71258 then the name will be set to cx18-0 since cx180 looks really odd. */
71259 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
71260 - atomic_t *instance);
71261 + atomic_unchecked_t *instance);
71262
71263 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
71264 Since the parent disappears this ensures that v4l2_dev doesn't have an
71265 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
71266 index 7a4529d..7244290 100644
71267 --- a/include/media/v4l2-ioctl.h
71268 +++ b/include/media/v4l2-ioctl.h
71269 @@ -243,6 +243,7 @@ struct v4l2_ioctl_ops {
71270 long (*vidioc_default) (struct file *file, void *fh,
71271 int cmd, void *arg);
71272 };
71273 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
71274
71275
71276 /* v4l debugging and diagnostics */
71277 diff --git a/include/net/flow.h b/include/net/flow.h
71278 index 809970b..c3df4f3 100644
71279 --- a/include/net/flow.h
71280 +++ b/include/net/flow.h
71281 @@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net *net, struct flowi *key, u16 family,
71282 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
71283 u8 dir, flow_resolve_t resolver);
71284 extern void flow_cache_flush(void);
71285 -extern atomic_t flow_cache_genid;
71286 +extern atomic_unchecked_t flow_cache_genid;
71287
71288 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
71289 {
71290 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
71291 index 15e1f8fe..668837c 100644
71292 --- a/include/net/inetpeer.h
71293 +++ b/include/net/inetpeer.h
71294 @@ -24,7 +24,7 @@ struct inet_peer
71295 __u32 dtime; /* the time of last use of not
71296 * referenced entries */
71297 atomic_t refcnt;
71298 - atomic_t rid; /* Frag reception counter */
71299 + atomic_unchecked_t rid; /* Frag reception counter */
71300 __u32 tcp_ts;
71301 unsigned long tcp_ts_stamp;
71302 };
71303 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
71304 index 98978e7..2243a3d 100644
71305 --- a/include/net/ip_vs.h
71306 +++ b/include/net/ip_vs.h
71307 @@ -365,7 +365,7 @@ struct ip_vs_conn {
71308 struct ip_vs_conn *control; /* Master control connection */
71309 atomic_t n_control; /* Number of controlled ones */
71310 struct ip_vs_dest *dest; /* real server */
71311 - atomic_t in_pkts; /* incoming packet counter */
71312 + atomic_unchecked_t in_pkts; /* incoming packet counter */
71313
71314 /* packet transmitter for different forwarding methods. If it
71315 mangles the packet, it must return NF_DROP or better NF_STOLEN,
71316 @@ -466,7 +466,7 @@ struct ip_vs_dest {
71317 union nf_inet_addr addr; /* IP address of the server */
71318 __be16 port; /* port number of the server */
71319 volatile unsigned flags; /* dest status flags */
71320 - atomic_t conn_flags; /* flags to copy to conn */
71321 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
71322 atomic_t weight; /* server weight */
71323
71324 atomic_t refcnt; /* reference counter */
71325 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
71326 index 69b610a..fe3962c 100644
71327 --- a/include/net/irda/ircomm_core.h
71328 +++ b/include/net/irda/ircomm_core.h
71329 @@ -51,7 +51,7 @@ typedef struct {
71330 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
71331 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
71332 struct ircomm_info *);
71333 -} call_t;
71334 +} __no_const call_t;
71335
71336 struct ircomm_cb {
71337 irda_queue_t queue;
71338 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
71339 index eea2e61..08c692d 100644
71340 --- a/include/net/irda/ircomm_tty.h
71341 +++ b/include/net/irda/ircomm_tty.h
71342 @@ -35,6 +35,7 @@
71343 #include <linux/termios.h>
71344 #include <linux/timer.h>
71345 #include <linux/tty.h> /* struct tty_struct */
71346 +#include <asm/local.h>
71347
71348 #include <net/irda/irias_object.h>
71349 #include <net/irda/ircomm_core.h>
71350 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
71351 unsigned short close_delay;
71352 unsigned short closing_wait; /* time to wait before closing */
71353
71354 - int open_count;
71355 - int blocked_open; /* # of blocked opens */
71356 + local_t open_count;
71357 + local_t blocked_open; /* # of blocked opens */
71358
71359 /* Protect concurent access to :
71360 * o self->open_count
71361 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
71362 index f82a1e8..82d81e8 100644
71363 --- a/include/net/iucv/af_iucv.h
71364 +++ b/include/net/iucv/af_iucv.h
71365 @@ -87,7 +87,7 @@ struct iucv_sock {
71366 struct iucv_sock_list {
71367 struct hlist_head head;
71368 rwlock_t lock;
71369 - atomic_t autobind_name;
71370 + atomic_unchecked_t autobind_name;
71371 };
71372
71373 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
71374 diff --git a/include/net/lapb.h b/include/net/lapb.h
71375 index 96cb5dd..25e8d4f 100644
71376 --- a/include/net/lapb.h
71377 +++ b/include/net/lapb.h
71378 @@ -95,7 +95,7 @@ struct lapb_cb {
71379 struct sk_buff_head write_queue;
71380 struct sk_buff_head ack_queue;
71381 unsigned char window;
71382 - struct lapb_register_struct callbacks;
71383 + struct lapb_register_struct *callbacks;
71384
71385 /* FRMR control information */
71386 struct lapb_frame frmr_data;
71387 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
71388 index 3817fda..cdb2343 100644
71389 --- a/include/net/neighbour.h
71390 +++ b/include/net/neighbour.h
71391 @@ -131,7 +131,7 @@ struct neigh_ops
71392 int (*connected_output)(struct sk_buff*);
71393 int (*hh_output)(struct sk_buff*);
71394 int (*queue_xmit)(struct sk_buff*);
71395 -};
71396 +} __do_const;
71397
71398 struct pneigh_entry
71399 {
71400 diff --git a/include/net/netlink.h b/include/net/netlink.h
71401 index c344646..4778c71 100644
71402 --- a/include/net/netlink.h
71403 +++ b/include/net/netlink.h
71404 @@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
71405 {
71406 return (remaining >= (int) sizeof(struct nlmsghdr) &&
71407 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
71408 - nlh->nlmsg_len <= remaining);
71409 + nlh->nlmsg_len <= (unsigned int)remaining);
71410 }
71411
71412 /**
71413 @@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
71414 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
71415 {
71416 if (mark)
71417 - skb_trim(skb, (unsigned char *) mark - skb->data);
71418 + skb_trim(skb, (const unsigned char *) mark - skb->data);
71419 }
71420
71421 /**
71422 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
71423 index 9a4b8b7..e49e077 100644
71424 --- a/include/net/netns/ipv4.h
71425 +++ b/include/net/netns/ipv4.h
71426 @@ -54,7 +54,7 @@ struct netns_ipv4 {
71427 int current_rt_cache_rebuild_count;
71428
71429 struct timer_list rt_secret_timer;
71430 - atomic_t rt_genid;
71431 + atomic_unchecked_t rt_genid;
71432
71433 #ifdef CONFIG_IP_MROUTE
71434 struct sock *mroute_sk;
71435 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
71436 index 8a6d529..171f401 100644
71437 --- a/include/net/sctp/sctp.h
71438 +++ b/include/net/sctp/sctp.h
71439 @@ -305,8 +305,8 @@ extern int sctp_debug_flag;
71440
71441 #else /* SCTP_DEBUG */
71442
71443 -#define SCTP_DEBUG_PRINTK(whatever...)
71444 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
71445 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
71446 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
71447 #define SCTP_ENABLE_DEBUG
71448 #define SCTP_DISABLE_DEBUG
71449 #define SCTP_ASSERT(expr, str, func)
71450 diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
71451 index d97f689..f3b90ab 100644
71452 --- a/include/net/secure_seq.h
71453 +++ b/include/net/secure_seq.h
71454 @@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
71455 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
71456 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
71457 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
71458 - __be16 dport);
71459 + __be16 dport);
71460 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
71461 __be16 sport, __be16 dport);
71462 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
71463 - __be16 sport, __be16 dport);
71464 + __be16 sport, __be16 dport);
71465 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
71466 - __be16 sport, __be16 dport);
71467 + __be16 sport, __be16 dport);
71468 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
71469 - __be16 sport, __be16 dport);
71470 + __be16 sport, __be16 dport);
71471
71472 #endif /* _NET_SECURE_SEQ */
71473 diff --git a/include/net/sock.h b/include/net/sock.h
71474 index 78adf52..99afd29 100644
71475 --- a/include/net/sock.h
71476 +++ b/include/net/sock.h
71477 @@ -272,7 +272,7 @@ struct sock {
71478 rwlock_t sk_callback_lock;
71479 int sk_err,
71480 sk_err_soft;
71481 - atomic_t sk_drops;
71482 + atomic_unchecked_t sk_drops;
71483 unsigned short sk_ack_backlog;
71484 unsigned short sk_max_ack_backlog;
71485 __u32 sk_priority;
71486 @@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
71487 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
71488 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
71489 #else
71490 -static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
71491 +static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
71492 int inc)
71493 {
71494 }
71495 diff --git a/include/net/tcp.h b/include/net/tcp.h
71496 index 6cfe18b..dd21acb 100644
71497 --- a/include/net/tcp.h
71498 +++ b/include/net/tcp.h
71499 @@ -1444,8 +1444,8 @@ enum tcp_seq_states {
71500 struct tcp_seq_afinfo {
71501 char *name;
71502 sa_family_t family;
71503 - struct file_operations seq_fops;
71504 - struct seq_operations seq_ops;
71505 + file_operations_no_const seq_fops;
71506 + seq_operations_no_const seq_ops;
71507 };
71508
71509 struct tcp_iter_state {
71510 diff --git a/include/net/udp.h b/include/net/udp.h
71511 index f98abd2..b4b042f 100644
71512 --- a/include/net/udp.h
71513 +++ b/include/net/udp.h
71514 @@ -187,8 +187,8 @@ struct udp_seq_afinfo {
71515 char *name;
71516 sa_family_t family;
71517 struct udp_table *udp_table;
71518 - struct file_operations seq_fops;
71519 - struct seq_operations seq_ops;
71520 + file_operations_no_const seq_fops;
71521 + seq_operations_no_const seq_ops;
71522 };
71523
71524 struct udp_iter_state {
71525 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
71526 index cbb822e..e9c1cbe 100644
71527 --- a/include/rdma/iw_cm.h
71528 +++ b/include/rdma/iw_cm.h
71529 @@ -129,7 +129,7 @@ struct iw_cm_verbs {
71530 int backlog);
71531
71532 int (*destroy_listen)(struct iw_cm_id *cm_id);
71533 -};
71534 +} __no_const;
71535
71536 /**
71537 * iw_create_cm_id - Create an IW CM identifier.
71538 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
71539 index 09a124b..caa8ca8 100644
71540 --- a/include/scsi/libfc.h
71541 +++ b/include/scsi/libfc.h
71542 @@ -675,6 +675,7 @@ struct libfc_function_template {
71543 */
71544 void (*disc_stop_final) (struct fc_lport *);
71545 };
71546 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
71547
71548 /* information used by the discovery layer */
71549 struct fc_disc {
71550 @@ -707,7 +708,7 @@ struct fc_lport {
71551 struct fc_disc disc;
71552
71553 /* Operational Information */
71554 - struct libfc_function_template tt;
71555 + libfc_function_template_no_const tt;
71556 u8 link_up;
71557 u8 qfull;
71558 enum fc_lport_state state;
71559 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
71560 index de8e180..f15e0d7 100644
71561 --- a/include/scsi/scsi_device.h
71562 +++ b/include/scsi/scsi_device.h
71563 @@ -156,9 +156,9 @@ struct scsi_device {
71564 unsigned int max_device_blocked; /* what device_blocked counts down from */
71565 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
71566
71567 - atomic_t iorequest_cnt;
71568 - atomic_t iodone_cnt;
71569 - atomic_t ioerr_cnt;
71570 + atomic_unchecked_t iorequest_cnt;
71571 + atomic_unchecked_t iodone_cnt;
71572 + atomic_unchecked_t ioerr_cnt;
71573
71574 struct device sdev_gendev,
71575 sdev_dev;
71576 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
71577 index fc50bd6..81ba9cb 100644
71578 --- a/include/scsi/scsi_transport_fc.h
71579 +++ b/include/scsi/scsi_transport_fc.h
71580 @@ -708,7 +708,7 @@ struct fc_function_template {
71581 unsigned long show_host_system_hostname:1;
71582
71583 unsigned long disable_target_scan:1;
71584 -};
71585 +} __do_const;
71586
71587
71588 /**
71589 diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
71590 index 3dae3f7..8440d6f 100644
71591 --- a/include/sound/ac97_codec.h
71592 +++ b/include/sound/ac97_codec.h
71593 @@ -419,15 +419,15 @@
71594 struct snd_ac97;
71595
71596 struct snd_ac97_build_ops {
71597 - int (*build_3d) (struct snd_ac97 *ac97);
71598 - int (*build_specific) (struct snd_ac97 *ac97);
71599 - int (*build_spdif) (struct snd_ac97 *ac97);
71600 - int (*build_post_spdif) (struct snd_ac97 *ac97);
71601 + int (* const build_3d) (struct snd_ac97 *ac97);
71602 + int (* const build_specific) (struct snd_ac97 *ac97);
71603 + int (* const build_spdif) (struct snd_ac97 *ac97);
71604 + int (* const build_post_spdif) (struct snd_ac97 *ac97);
71605 #ifdef CONFIG_PM
71606 - void (*suspend) (struct snd_ac97 *ac97);
71607 - void (*resume) (struct snd_ac97 *ac97);
71608 + void (* const suspend) (struct snd_ac97 *ac97);
71609 + void (* const resume) (struct snd_ac97 *ac97);
71610 #endif
71611 - void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
71612 + void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
71613 };
71614
71615 struct snd_ac97_bus_ops {
71616 @@ -477,7 +477,7 @@ struct snd_ac97_template {
71617
71618 struct snd_ac97 {
71619 /* -- lowlevel (hardware) driver specific -- */
71620 - struct snd_ac97_build_ops * build_ops;
71621 + const struct snd_ac97_build_ops * build_ops;
71622 void *private_data;
71623 void (*private_free) (struct snd_ac97 *ac97);
71624 /* --- */
71625 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
71626 index 891cf1a..a94ba2b 100644
71627 --- a/include/sound/ak4xxx-adda.h
71628 +++ b/include/sound/ak4xxx-adda.h
71629 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
71630 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
71631 unsigned char val);
71632 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
71633 -};
71634 +} __no_const;
71635
71636 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
71637
71638 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
71639 index 8c05e47..2b5df97 100644
71640 --- a/include/sound/hwdep.h
71641 +++ b/include/sound/hwdep.h
71642 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
71643 struct snd_hwdep_dsp_status *status);
71644 int (*dsp_load)(struct snd_hwdep *hw,
71645 struct snd_hwdep_dsp_image *image);
71646 -};
71647 +} __no_const;
71648
71649 struct snd_hwdep {
71650 struct snd_card *card;
71651 diff --git a/include/sound/info.h b/include/sound/info.h
71652 index 112e894..6fda5b5 100644
71653 --- a/include/sound/info.h
71654 +++ b/include/sound/info.h
71655 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
71656 struct snd_info_buffer *buffer);
71657 void (*write)(struct snd_info_entry *entry,
71658 struct snd_info_buffer *buffer);
71659 -};
71660 +} __no_const;
71661
71662 struct snd_info_entry_ops {
71663 int (*open)(struct snd_info_entry *entry,
71664 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
71665 index de6d981..590a550 100644
71666 --- a/include/sound/pcm.h
71667 +++ b/include/sound/pcm.h
71668 @@ -80,6 +80,7 @@ struct snd_pcm_ops {
71669 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
71670 int (*ack)(struct snd_pcm_substream *substream);
71671 };
71672 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
71673
71674 /*
71675 *
71676 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
71677 index 736eac7..fe8a80f 100644
71678 --- a/include/sound/sb16_csp.h
71679 +++ b/include/sound/sb16_csp.h
71680 @@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
71681 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
71682 int (*csp_stop) (struct snd_sb_csp * p);
71683 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
71684 -};
71685 +} __no_const;
71686
71687 /*
71688 * CSP private data
71689 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
71690 index 444cd6b..3327cc5 100644
71691 --- a/include/sound/ymfpci.h
71692 +++ b/include/sound/ymfpci.h
71693 @@ -358,7 +358,7 @@ struct snd_ymfpci {
71694 spinlock_t reg_lock;
71695 spinlock_t voice_lock;
71696 wait_queue_head_t interrupt_sleep;
71697 - atomic_t interrupt_sleep_count;
71698 + atomic_unchecked_t interrupt_sleep_count;
71699 struct snd_info_entry *proc_entry;
71700 const struct firmware *dsp_microcode;
71701 const struct firmware *controller_microcode;
71702 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
71703 index b89f9db..f097b38 100644
71704 --- a/include/trace/events/irq.h
71705 +++ b/include/trace/events/irq.h
71706 @@ -34,7 +34,7 @@
71707 */
71708 TRACE_EVENT(irq_handler_entry,
71709
71710 - TP_PROTO(int irq, struct irqaction *action),
71711 + TP_PROTO(int irq, const struct irqaction *action),
71712
71713 TP_ARGS(irq, action),
71714
71715 @@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
71716 */
71717 TRACE_EVENT(irq_handler_exit,
71718
71719 - TP_PROTO(int irq, struct irqaction *action, int ret),
71720 + TP_PROTO(int irq, const struct irqaction *action, int ret),
71721
71722 TP_ARGS(irq, action, ret),
71723
71724 @@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
71725 */
71726 TRACE_EVENT(softirq_entry,
71727
71728 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
71729 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
71730
71731 TP_ARGS(h, vec),
71732
71733 @@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
71734 */
71735 TRACE_EVENT(softirq_exit,
71736
71737 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
71738 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
71739
71740 TP_ARGS(h, vec),
71741
71742 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
71743 index 0993a22..32ba2fe 100644
71744 --- a/include/video/uvesafb.h
71745 +++ b/include/video/uvesafb.h
71746 @@ -177,6 +177,7 @@ struct uvesafb_par {
71747 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
71748 u8 pmi_setpal; /* PMI for palette changes */
71749 u16 *pmi_base; /* protected mode interface location */
71750 + u8 *pmi_code; /* protected mode code location */
71751 void *pmi_start;
71752 void *pmi_pal;
71753 u8 *vbe_state_orig; /*
71754 diff --git a/init/Kconfig b/init/Kconfig
71755 index d72691b..3996e54 100644
71756 --- a/init/Kconfig
71757 +++ b/init/Kconfig
71758 @@ -1004,7 +1004,7 @@ config SLUB_DEBUG
71759
71760 config COMPAT_BRK
71761 bool "Disable heap randomization"
71762 - default y
71763 + default n
71764 help
71765 Randomizing heap placement makes heap exploits harder, but it
71766 also breaks ancient binaries (including anything libc5 based).
71767 diff --git a/init/do_mounts.c b/init/do_mounts.c
71768 index bb008d0..4fa3933 100644
71769 --- a/init/do_mounts.c
71770 +++ b/init/do_mounts.c
71771 @@ -216,11 +216,11 @@ static void __init get_fs_names(char *page)
71772
71773 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
71774 {
71775 - int err = sys_mount(name, "/root", fs, flags, data);
71776 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
71777 if (err)
71778 return err;
71779
71780 - sys_chdir("/root");
71781 + sys_chdir((__force const char __user *)"/root");
71782 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
71783 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
71784 current->fs->pwd.mnt->mnt_sb->s_type->name,
71785 @@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...)
71786 va_start(args, fmt);
71787 vsprintf(buf, fmt, args);
71788 va_end(args);
71789 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
71790 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
71791 if (fd >= 0) {
71792 sys_ioctl(fd, FDEJECT, 0);
71793 sys_close(fd);
71794 }
71795 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
71796 - fd = sys_open("/dev/console", O_RDWR, 0);
71797 + fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
71798 if (fd >= 0) {
71799 sys_ioctl(fd, TCGETS, (long)&termios);
71800 termios.c_lflag &= ~ICANON;
71801 sys_ioctl(fd, TCSETSF, (long)&termios);
71802 - sys_read(fd, &c, 1);
71803 + sys_read(fd, (char __user *)&c, 1);
71804 termios.c_lflag |= ICANON;
71805 sys_ioctl(fd, TCSETSF, (long)&termios);
71806 sys_close(fd);
71807 @@ -416,6 +416,6 @@ void __init prepare_namespace(void)
71808 mount_root();
71809 out:
71810 devtmpfs_mount("dev");
71811 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
71812 - sys_chroot(".");
71813 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
71814 + sys_chroot((__force char __user *)".");
71815 }
71816 diff --git a/init/do_mounts.h b/init/do_mounts.h
71817 index f5b978a..69dbfe8 100644
71818 --- a/init/do_mounts.h
71819 +++ b/init/do_mounts.h
71820 @@ -15,15 +15,15 @@ extern int root_mountflags;
71821
71822 static inline int create_dev(char *name, dev_t dev)
71823 {
71824 - sys_unlink(name);
71825 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
71826 + sys_unlink((char __force_user *)name);
71827 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
71828 }
71829
71830 #if BITS_PER_LONG == 32
71831 static inline u32 bstat(char *name)
71832 {
71833 struct stat64 stat;
71834 - if (sys_stat64(name, &stat) != 0)
71835 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
71836 return 0;
71837 if (!S_ISBLK(stat.st_mode))
71838 return 0;
71839 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
71840 static inline u32 bstat(char *name)
71841 {
71842 struct stat stat;
71843 - if (sys_newstat(name, &stat) != 0)
71844 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
71845 return 0;
71846 if (!S_ISBLK(stat.st_mode))
71847 return 0;
71848 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
71849 index 614241b..4da046b 100644
71850 --- a/init/do_mounts_initrd.c
71851 +++ b/init/do_mounts_initrd.c
71852 @@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shell)
71853 sys_close(old_fd);sys_close(root_fd);
71854 sys_close(0);sys_close(1);sys_close(2);
71855 sys_setsid();
71856 - (void) sys_open("/dev/console",O_RDWR,0);
71857 + (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
71858 (void) sys_dup(0);
71859 (void) sys_dup(0);
71860 return kernel_execve(shell, argv, envp_init);
71861 @@ -47,13 +47,13 @@ static void __init handle_initrd(void)
71862 create_dev("/dev/root.old", Root_RAM0);
71863 /* mount initrd on rootfs' /root */
71864 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
71865 - sys_mkdir("/old", 0700);
71866 - root_fd = sys_open("/", 0, 0);
71867 - old_fd = sys_open("/old", 0, 0);
71868 + sys_mkdir((const char __force_user *)"/old", 0700);
71869 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
71870 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
71871 /* move initrd over / and chdir/chroot in initrd root */
71872 - sys_chdir("/root");
71873 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
71874 - sys_chroot(".");
71875 + sys_chdir((const char __force_user *)"/root");
71876 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
71877 + sys_chroot((const char __force_user *)".");
71878
71879 /*
71880 * In case that a resume from disk is carried out by linuxrc or one of
71881 @@ -70,15 +70,15 @@ static void __init handle_initrd(void)
71882
71883 /* move initrd to rootfs' /old */
71884 sys_fchdir(old_fd);
71885 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
71886 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
71887 /* switch root and cwd back to / of rootfs */
71888 sys_fchdir(root_fd);
71889 - sys_chroot(".");
71890 + sys_chroot((const char __force_user *)".");
71891 sys_close(old_fd);
71892 sys_close(root_fd);
71893
71894 if (new_decode_dev(real_root_dev) == Root_RAM0) {
71895 - sys_chdir("/old");
71896 + sys_chdir((const char __force_user *)"/old");
71897 return;
71898 }
71899
71900 @@ -86,17 +86,17 @@ static void __init handle_initrd(void)
71901 mount_root();
71902
71903 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
71904 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
71905 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
71906 if (!error)
71907 printk("okay\n");
71908 else {
71909 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
71910 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
71911 if (error == -ENOENT)
71912 printk("/initrd does not exist. Ignored.\n");
71913 else
71914 printk("failed\n");
71915 printk(KERN_NOTICE "Unmounting old root\n");
71916 - sys_umount("/old", MNT_DETACH);
71917 + sys_umount((char __force_user *)"/old", MNT_DETACH);
71918 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
71919 if (fd < 0) {
71920 error = fd;
71921 @@ -119,11 +119,11 @@ int __init initrd_load(void)
71922 * mounted in the normal path.
71923 */
71924 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
71925 - sys_unlink("/initrd.image");
71926 + sys_unlink((const char __force_user *)"/initrd.image");
71927 handle_initrd();
71928 return 1;
71929 }
71930 }
71931 - sys_unlink("/initrd.image");
71932 + sys_unlink((const char __force_user *)"/initrd.image");
71933 return 0;
71934 }
71935 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
71936 index 69aebbf..c0bf6a7 100644
71937 --- a/init/do_mounts_md.c
71938 +++ b/init/do_mounts_md.c
71939 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
71940 partitioned ? "_d" : "", minor,
71941 md_setup_args[ent].device_names);
71942
71943 - fd = sys_open(name, 0, 0);
71944 + fd = sys_open((char __force_user *)name, 0, 0);
71945 if (fd < 0) {
71946 printk(KERN_ERR "md: open failed - cannot start "
71947 "array %s\n", name);
71948 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
71949 * array without it
71950 */
71951 sys_close(fd);
71952 - fd = sys_open(name, 0, 0);
71953 + fd = sys_open((char __force_user *)name, 0, 0);
71954 sys_ioctl(fd, BLKRRPART, 0);
71955 }
71956 sys_close(fd);
71957 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
71958
71959 wait_for_device_probe();
71960
71961 - fd = sys_open("/dev/md0", 0, 0);
71962 + fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
71963 if (fd >= 0) {
71964 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
71965 sys_close(fd);
71966 diff --git a/init/initramfs.c b/init/initramfs.c
71967 index 1fd59b8..a01b079 100644
71968 --- a/init/initramfs.c
71969 +++ b/init/initramfs.c
71970 @@ -74,7 +74,7 @@ static void __init free_hash(void)
71971 }
71972 }
71973
71974 -static long __init do_utime(char __user *filename, time_t mtime)
71975 +static long __init do_utime(__force char __user *filename, time_t mtime)
71976 {
71977 struct timespec t[2];
71978
71979 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
71980 struct dir_entry *de, *tmp;
71981 list_for_each_entry_safe(de, tmp, &dir_list, list) {
71982 list_del(&de->list);
71983 - do_utime(de->name, de->mtime);
71984 + do_utime((char __force_user *)de->name, de->mtime);
71985 kfree(de->name);
71986 kfree(de);
71987 }
71988 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
71989 if (nlink >= 2) {
71990 char *old = find_link(major, minor, ino, mode, collected);
71991 if (old)
71992 - return (sys_link(old, collected) < 0) ? -1 : 1;
71993 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
71994 }
71995 return 0;
71996 }
71997 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
71998 {
71999 struct stat st;
72000
72001 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
72002 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
72003 if (S_ISDIR(st.st_mode))
72004 - sys_rmdir(path);
72005 + sys_rmdir((char __force_user *)path);
72006 else
72007 - sys_unlink(path);
72008 + sys_unlink((char __force_user *)path);
72009 }
72010 }
72011
72012 @@ -305,7 +305,7 @@ static int __init do_name(void)
72013 int openflags = O_WRONLY|O_CREAT;
72014 if (ml != 1)
72015 openflags |= O_TRUNC;
72016 - wfd = sys_open(collected, openflags, mode);
72017 + wfd = sys_open((char __force_user *)collected, openflags, mode);
72018
72019 if (wfd >= 0) {
72020 sys_fchown(wfd, uid, gid);
72021 @@ -317,17 +317,17 @@ static int __init do_name(void)
72022 }
72023 }
72024 } else if (S_ISDIR(mode)) {
72025 - sys_mkdir(collected, mode);
72026 - sys_chown(collected, uid, gid);
72027 - sys_chmod(collected, mode);
72028 + sys_mkdir((char __force_user *)collected, mode);
72029 + sys_chown((char __force_user *)collected, uid, gid);
72030 + sys_chmod((char __force_user *)collected, mode);
72031 dir_add(collected, mtime);
72032 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
72033 S_ISFIFO(mode) || S_ISSOCK(mode)) {
72034 if (maybe_link() == 0) {
72035 - sys_mknod(collected, mode, rdev);
72036 - sys_chown(collected, uid, gid);
72037 - sys_chmod(collected, mode);
72038 - do_utime(collected, mtime);
72039 + sys_mknod((char __force_user *)collected, mode, rdev);
72040 + sys_chown((char __force_user *)collected, uid, gid);
72041 + sys_chmod((char __force_user *)collected, mode);
72042 + do_utime((char __force_user *)collected, mtime);
72043 }
72044 }
72045 return 0;
72046 @@ -336,15 +336,15 @@ static int __init do_name(void)
72047 static int __init do_copy(void)
72048 {
72049 if (count >= body_len) {
72050 - sys_write(wfd, victim, body_len);
72051 + sys_write(wfd, (char __force_user *)victim, body_len);
72052 sys_close(wfd);
72053 - do_utime(vcollected, mtime);
72054 + do_utime((char __force_user *)vcollected, mtime);
72055 kfree(vcollected);
72056 eat(body_len);
72057 state = SkipIt;
72058 return 0;
72059 } else {
72060 - sys_write(wfd, victim, count);
72061 + sys_write(wfd, (char __force_user *)victim, count);
72062 body_len -= count;
72063 eat(count);
72064 return 1;
72065 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
72066 {
72067 collected[N_ALIGN(name_len) + body_len] = '\0';
72068 clean_path(collected, 0);
72069 - sys_symlink(collected + N_ALIGN(name_len), collected);
72070 - sys_lchown(collected, uid, gid);
72071 - do_utime(collected, mtime);
72072 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
72073 + sys_lchown((char __force_user *)collected, uid, gid);
72074 + do_utime((char __force_user *)collected, mtime);
72075 state = SkipIt;
72076 next_state = Reset;
72077 return 0;
72078 diff --git a/init/main.c b/init/main.c
72079 index 1eb4bd5..fea5bbe 100644
72080 --- a/init/main.c
72081 +++ b/init/main.c
72082 @@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void) { }
72083 #ifdef CONFIG_TC
72084 extern void tc_init(void);
72085 #endif
72086 +extern void grsecurity_init(void);
72087
72088 enum system_states system_state __read_mostly;
72089 EXPORT_SYMBOL(system_state);
72090 @@ -183,6 +184,49 @@ static int __init set_reset_devices(char *str)
72091
72092 __setup("reset_devices", set_reset_devices);
72093
72094 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
72095 +extern char pax_enter_kernel_user[];
72096 +extern char pax_exit_kernel_user[];
72097 +extern pgdval_t clone_pgd_mask;
72098 +#endif
72099 +
72100 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
72101 +static int __init setup_pax_nouderef(char *str)
72102 +{
72103 +#ifdef CONFIG_X86_32
72104 + unsigned int cpu;
72105 + struct desc_struct *gdt;
72106 +
72107 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
72108 + gdt = get_cpu_gdt_table(cpu);
72109 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
72110 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
72111 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
72112 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
72113 + }
72114 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
72115 +#else
72116 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
72117 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
72118 + clone_pgd_mask = ~(pgdval_t)0UL;
72119 +#endif
72120 +
72121 + return 0;
72122 +}
72123 +early_param("pax_nouderef", setup_pax_nouderef);
72124 +#endif
72125 +
72126 +#ifdef CONFIG_PAX_SOFTMODE
72127 +int pax_softmode;
72128 +
72129 +static int __init setup_pax_softmode(char *str)
72130 +{
72131 + get_option(&str, &pax_softmode);
72132 + return 1;
72133 +}
72134 +__setup("pax_softmode=", setup_pax_softmode);
72135 +#endif
72136 +
72137 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
72138 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
72139 static const char *panic_later, *panic_param;
72140 @@ -705,52 +749,53 @@ int initcall_debug;
72141 core_param(initcall_debug, initcall_debug, bool, 0644);
72142
72143 static char msgbuf[64];
72144 -static struct boot_trace_call call;
72145 -static struct boot_trace_ret ret;
72146 +static struct boot_trace_call trace_call;
72147 +static struct boot_trace_ret trace_ret;
72148
72149 int do_one_initcall(initcall_t fn)
72150 {
72151 int count = preempt_count();
72152 ktime_t calltime, delta, rettime;
72153 + const char *msg1 = "", *msg2 = "";
72154
72155 if (initcall_debug) {
72156 - call.caller = task_pid_nr(current);
72157 - printk("calling %pF @ %i\n", fn, call.caller);
72158 + trace_call.caller = task_pid_nr(current);
72159 + printk("calling %pF @ %i\n", fn, trace_call.caller);
72160 calltime = ktime_get();
72161 - trace_boot_call(&call, fn);
72162 + trace_boot_call(&trace_call, fn);
72163 enable_boot_trace();
72164 }
72165
72166 - ret.result = fn();
72167 + trace_ret.result = fn();
72168
72169 if (initcall_debug) {
72170 disable_boot_trace();
72171 rettime = ktime_get();
72172 delta = ktime_sub(rettime, calltime);
72173 - ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
72174 - trace_boot_ret(&ret, fn);
72175 + trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
72176 + trace_boot_ret(&trace_ret, fn);
72177 printk("initcall %pF returned %d after %Ld usecs\n", fn,
72178 - ret.result, ret.duration);
72179 + trace_ret.result, trace_ret.duration);
72180 }
72181
72182 msgbuf[0] = 0;
72183
72184 - if (ret.result && ret.result != -ENODEV && initcall_debug)
72185 - sprintf(msgbuf, "error code %d ", ret.result);
72186 + if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
72187 + sprintf(msgbuf, "error code %d ", trace_ret.result);
72188
72189 if (preempt_count() != count) {
72190 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
72191 + msg1 = " preemption imbalance";
72192 preempt_count() = count;
72193 }
72194 if (irqs_disabled()) {
72195 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
72196 + msg2 = " disabled interrupts";
72197 local_irq_enable();
72198 }
72199 - if (msgbuf[0]) {
72200 - printk("initcall %pF returned with %s\n", fn, msgbuf);
72201 + if (msgbuf[0] || *msg1 || *msg2) {
72202 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
72203 }
72204
72205 - return ret.result;
72206 + return trace_ret.result;
72207 }
72208
72209
72210 @@ -893,11 +938,13 @@ static int __init kernel_init(void * unused)
72211 if (!ramdisk_execute_command)
72212 ramdisk_execute_command = "/init";
72213
72214 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
72215 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
72216 ramdisk_execute_command = NULL;
72217 prepare_namespace();
72218 }
72219
72220 + grsecurity_init();
72221 +
72222 /*
72223 * Ok, we have completed the initial bootup, and
72224 * we're essentially up and running. Get rid of the
72225 diff --git a/init/noinitramfs.c b/init/noinitramfs.c
72226 index f4c1a3a..96c19bd 100644
72227 --- a/init/noinitramfs.c
72228 +++ b/init/noinitramfs.c
72229 @@ -29,7 +29,7 @@ static int __init default_rootfs(void)
72230 {
72231 int err;
72232
72233 - err = sys_mkdir("/dev", 0755);
72234 + err = sys_mkdir((const char __user *)"/dev", 0755);
72235 if (err < 0)
72236 goto out;
72237
72238 @@ -39,7 +39,7 @@ static int __init default_rootfs(void)
72239 if (err < 0)
72240 goto out;
72241
72242 - err = sys_mkdir("/root", 0700);
72243 + err = sys_mkdir((const char __user *)"/root", 0700);
72244 if (err < 0)
72245 goto out;
72246
72247 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
72248 index d01bc14..8df81db 100644
72249 --- a/ipc/mqueue.c
72250 +++ b/ipc/mqueue.c
72251 @@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
72252 mq_bytes = (mq_msg_tblsz +
72253 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
72254
72255 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
72256 spin_lock(&mq_lock);
72257 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
72258 u->mq_bytes + mq_bytes >
72259 diff --git a/ipc/msg.c b/ipc/msg.c
72260 index 779f762..4af9e36 100644
72261 --- a/ipc/msg.c
72262 +++ b/ipc/msg.c
72263 @@ -310,18 +310,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
72264 return security_msg_queue_associate(msq, msgflg);
72265 }
72266
72267 +static struct ipc_ops msg_ops = {
72268 + .getnew = newque,
72269 + .associate = msg_security,
72270 + .more_checks = NULL
72271 +};
72272 +
72273 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
72274 {
72275 struct ipc_namespace *ns;
72276 - struct ipc_ops msg_ops;
72277 struct ipc_params msg_params;
72278
72279 ns = current->nsproxy->ipc_ns;
72280
72281 - msg_ops.getnew = newque;
72282 - msg_ops.associate = msg_security;
72283 - msg_ops.more_checks = NULL;
72284 -
72285 msg_params.key = key;
72286 msg_params.flg = msgflg;
72287
72288 diff --git a/ipc/sem.c b/ipc/sem.c
72289 index b781007..f738b04 100644
72290 --- a/ipc/sem.c
72291 +++ b/ipc/sem.c
72292 @@ -309,10 +309,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
72293 return 0;
72294 }
72295
72296 +static struct ipc_ops sem_ops = {
72297 + .getnew = newary,
72298 + .associate = sem_security,
72299 + .more_checks = sem_more_checks
72300 +};
72301 +
72302 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
72303 {
72304 struct ipc_namespace *ns;
72305 - struct ipc_ops sem_ops;
72306 struct ipc_params sem_params;
72307
72308 ns = current->nsproxy->ipc_ns;
72309 @@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
72310 if (nsems < 0 || nsems > ns->sc_semmsl)
72311 return -EINVAL;
72312
72313 - sem_ops.getnew = newary;
72314 - sem_ops.associate = sem_security;
72315 - sem_ops.more_checks = sem_more_checks;
72316 -
72317 sem_params.key = key;
72318 sem_params.flg = semflg;
72319 sem_params.u.nsems = nsems;
72320 @@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
72321 ushort* sem_io = fast_sem_io;
72322 int nsems;
72323
72324 + pax_track_stack();
72325 +
72326 sma = sem_lock_check(ns, semid);
72327 if (IS_ERR(sma))
72328 return PTR_ERR(sma);
72329 @@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
72330 unsigned long jiffies_left = 0;
72331 struct ipc_namespace *ns;
72332
72333 + pax_track_stack();
72334 +
72335 ns = current->nsproxy->ipc_ns;
72336
72337 if (nsops < 1 || semid < 0)
72338 diff --git a/ipc/shm.c b/ipc/shm.c
72339 index d30732c..e4992cd 100644
72340 --- a/ipc/shm.c
72341 +++ b/ipc/shm.c
72342 @@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
72343 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
72344 #endif
72345
72346 +#ifdef CONFIG_GRKERNSEC
72347 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
72348 + const time_t shm_createtime, const uid_t cuid,
72349 + const int shmid);
72350 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
72351 + const time_t shm_createtime);
72352 +#endif
72353 +
72354 void shm_init_ns(struct ipc_namespace *ns)
72355 {
72356 ns->shm_ctlmax = SHMMAX;
72357 @@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
72358 shp->shm_lprid = 0;
72359 shp->shm_atim = shp->shm_dtim = 0;
72360 shp->shm_ctim = get_seconds();
72361 +#ifdef CONFIG_GRKERNSEC
72362 + {
72363 + struct timespec timeval;
72364 + do_posix_clock_monotonic_gettime(&timeval);
72365 +
72366 + shp->shm_createtime = timeval.tv_sec;
72367 + }
72368 +#endif
72369 shp->shm_segsz = size;
72370 shp->shm_nattch = 0;
72371 shp->shm_file = file;
72372 @@ -446,18 +462,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
72373 return 0;
72374 }
72375
72376 +static struct ipc_ops shm_ops = {
72377 + .getnew = newseg,
72378 + .associate = shm_security,
72379 + .more_checks = shm_more_checks
72380 +};
72381 +
72382 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
72383 {
72384 struct ipc_namespace *ns;
72385 - struct ipc_ops shm_ops;
72386 struct ipc_params shm_params;
72387
72388 ns = current->nsproxy->ipc_ns;
72389
72390 - shm_ops.getnew = newseg;
72391 - shm_ops.associate = shm_security;
72392 - shm_ops.more_checks = shm_more_checks;
72393 -
72394 shm_params.key = key;
72395 shm_params.flg = shmflg;
72396 shm_params.u.size = size;
72397 @@ -857,6 +874,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
72398 f_mode = FMODE_READ | FMODE_WRITE;
72399 }
72400 if (shmflg & SHM_EXEC) {
72401 +
72402 +#ifdef CONFIG_PAX_MPROTECT
72403 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
72404 + goto out;
72405 +#endif
72406 +
72407 prot |= PROT_EXEC;
72408 acc_mode |= S_IXUGO;
72409 }
72410 @@ -880,9 +903,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
72411 if (err)
72412 goto out_unlock;
72413
72414 +#ifdef CONFIG_GRKERNSEC
72415 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
72416 + shp->shm_perm.cuid, shmid) ||
72417 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
72418 + err = -EACCES;
72419 + goto out_unlock;
72420 + }
72421 +#endif
72422 +
72423 path.dentry = dget(shp->shm_file->f_path.dentry);
72424 path.mnt = shp->shm_file->f_path.mnt;
72425 shp->shm_nattch++;
72426 +#ifdef CONFIG_GRKERNSEC
72427 + shp->shm_lapid = current->pid;
72428 +#endif
72429 size = i_size_read(path.dentry->d_inode);
72430 shm_unlock(shp);
72431
72432 diff --git a/kernel/acct.c b/kernel/acct.c
72433 index a6605ca..ca91111 100644
72434 --- a/kernel/acct.c
72435 +++ b/kernel/acct.c
72436 @@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
72437 */
72438 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
72439 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
72440 - file->f_op->write(file, (char *)&ac,
72441 + file->f_op->write(file, (char __force_user *)&ac,
72442 sizeof(acct_t), &file->f_pos);
72443 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
72444 set_fs(fs);
72445 diff --git a/kernel/audit.c b/kernel/audit.c
72446 index 5feed23..48415fd 100644
72447 --- a/kernel/audit.c
72448 +++ b/kernel/audit.c
72449 @@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
72450 3) suppressed due to audit_rate_limit
72451 4) suppressed due to audit_backlog_limit
72452 */
72453 -static atomic_t audit_lost = ATOMIC_INIT(0);
72454 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
72455
72456 /* The netlink socket. */
72457 static struct sock *audit_sock;
72458 @@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
72459 unsigned long now;
72460 int print;
72461
72462 - atomic_inc(&audit_lost);
72463 + atomic_inc_unchecked(&audit_lost);
72464
72465 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
72466
72467 @@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
72468 printk(KERN_WARNING
72469 "audit: audit_lost=%d audit_rate_limit=%d "
72470 "audit_backlog_limit=%d\n",
72471 - atomic_read(&audit_lost),
72472 + atomic_read_unchecked(&audit_lost),
72473 audit_rate_limit,
72474 audit_backlog_limit);
72475 audit_panic(message);
72476 @@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
72477 status_set.pid = audit_pid;
72478 status_set.rate_limit = audit_rate_limit;
72479 status_set.backlog_limit = audit_backlog_limit;
72480 - status_set.lost = atomic_read(&audit_lost);
72481 + status_set.lost = atomic_read_unchecked(&audit_lost);
72482 status_set.backlog = skb_queue_len(&audit_skb_queue);
72483 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
72484 &status_set, sizeof(status_set));
72485 @@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
72486 spin_unlock_irq(&tsk->sighand->siglock);
72487 }
72488 read_unlock(&tasklist_lock);
72489 - audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
72490 - &s, sizeof(s));
72491 +
72492 + if (!err)
72493 + audit_send_reply(NETLINK_CB(skb).pid, seq,
72494 + AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
72495 break;
72496 }
72497 case AUDIT_TTY_SET: {
72498 @@ -1262,12 +1264,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
72499 avail = audit_expand(ab,
72500 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
72501 if (!avail)
72502 - goto out;
72503 + goto out_va_end;
72504 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
72505 }
72506 - va_end(args2);
72507 if (len > 0)
72508 skb_put(skb, len);
72509 +out_va_end:
72510 + va_end(args2);
72511 out:
72512 return;
72513 }
72514 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
72515 index 267e484..ac41bc3 100644
72516 --- a/kernel/auditsc.c
72517 +++ b/kernel/auditsc.c
72518 @@ -1157,8 +1157,8 @@ static void audit_log_execve_info(struct audit_context *context,
72519 struct audit_buffer **ab,
72520 struct audit_aux_data_execve *axi)
72521 {
72522 - int i;
72523 - size_t len, len_sent = 0;
72524 + int i, len;
72525 + size_t len_sent = 0;
72526 const char __user *p;
72527 char *buf;
72528
72529 @@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
72530 }
72531
72532 /* global counter which is incremented every time something logs in */
72533 -static atomic_t session_id = ATOMIC_INIT(0);
72534 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
72535
72536 /**
72537 * audit_set_loginuid - set a task's audit_context loginuid
72538 @@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
72539 */
72540 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
72541 {
72542 - unsigned int sessionid = atomic_inc_return(&session_id);
72543 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
72544 struct audit_context *context = task->audit_context;
72545
72546 if (context && context->in_syscall) {
72547 diff --git a/kernel/capability.c b/kernel/capability.c
72548 index 8a944f5..db5001e 100644
72549 --- a/kernel/capability.c
72550 +++ b/kernel/capability.c
72551 @@ -305,10 +305,26 @@ int capable(int cap)
72552 BUG();
72553 }
72554
72555 - if (security_capable(cap) == 0) {
72556 + if (security_capable(cap) == 0 && gr_is_capable(cap)) {
72557 current->flags |= PF_SUPERPRIV;
72558 return 1;
72559 }
72560 return 0;
72561 }
72562 +
72563 +int capable_nolog(int cap)
72564 +{
72565 + if (unlikely(!cap_valid(cap))) {
72566 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
72567 + BUG();
72568 + }
72569 +
72570 + if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
72571 + current->flags |= PF_SUPERPRIV;
72572 + return 1;
72573 + }
72574 + return 0;
72575 +}
72576 +
72577 EXPORT_SYMBOL(capable);
72578 +EXPORT_SYMBOL(capable_nolog);
72579 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
72580 index 1fbcc74..7000012 100644
72581 --- a/kernel/cgroup.c
72582 +++ b/kernel/cgroup.c
72583 @@ -536,6 +536,8 @@ static struct css_set *find_css_set(
72584 struct hlist_head *hhead;
72585 struct cg_cgroup_link *link;
72586
72587 + pax_track_stack();
72588 +
72589 /* First see if we already have a cgroup group that matches
72590 * the desired set */
72591 read_lock(&css_set_lock);
72592 diff --git a/kernel/compat.c b/kernel/compat.c
72593 index 8bc5578..186e44a 100644
72594 --- a/kernel/compat.c
72595 +++ b/kernel/compat.c
72596 @@ -108,7 +108,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
72597 mm_segment_t oldfs;
72598 long ret;
72599
72600 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
72601 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
72602 oldfs = get_fs();
72603 set_fs(KERNEL_DS);
72604 ret = hrtimer_nanosleep_restart(restart);
72605 @@ -140,7 +140,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
72606 oldfs = get_fs();
72607 set_fs(KERNEL_DS);
72608 ret = hrtimer_nanosleep(&tu,
72609 - rmtp ? (struct timespec __user *)&rmt : NULL,
72610 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
72611 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
72612 set_fs(oldfs);
72613
72614 @@ -247,7 +247,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
72615 mm_segment_t old_fs = get_fs();
72616
72617 set_fs(KERNEL_DS);
72618 - ret = sys_sigpending((old_sigset_t __user *) &s);
72619 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
72620 set_fs(old_fs);
72621 if (ret == 0)
72622 ret = put_user(s, set);
72623 @@ -266,8 +266,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
72624 old_fs = get_fs();
72625 set_fs(KERNEL_DS);
72626 ret = sys_sigprocmask(how,
72627 - set ? (old_sigset_t __user *) &s : NULL,
72628 - oset ? (old_sigset_t __user *) &s : NULL);
72629 + set ? (old_sigset_t __force_user *) &s : NULL,
72630 + oset ? (old_sigset_t __force_user *) &s : NULL);
72631 set_fs(old_fs);
72632 if (ret == 0)
72633 if (oset)
72634 @@ -310,7 +310,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
72635 mm_segment_t old_fs = get_fs();
72636
72637 set_fs(KERNEL_DS);
72638 - ret = sys_old_getrlimit(resource, &r);
72639 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
72640 set_fs(old_fs);
72641
72642 if (!ret) {
72643 @@ -385,7 +385,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
72644 mm_segment_t old_fs = get_fs();
72645
72646 set_fs(KERNEL_DS);
72647 - ret = sys_getrusage(who, (struct rusage __user *) &r);
72648 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
72649 set_fs(old_fs);
72650
72651 if (ret)
72652 @@ -412,8 +412,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
72653 set_fs (KERNEL_DS);
72654 ret = sys_wait4(pid,
72655 (stat_addr ?
72656 - (unsigned int __user *) &status : NULL),
72657 - options, (struct rusage __user *) &r);
72658 + (unsigned int __force_user *) &status : NULL),
72659 + options, (struct rusage __force_user *) &r);
72660 set_fs (old_fs);
72661
72662 if (ret > 0) {
72663 @@ -438,8 +438,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
72664 memset(&info, 0, sizeof(info));
72665
72666 set_fs(KERNEL_DS);
72667 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
72668 - uru ? (struct rusage __user *)&ru : NULL);
72669 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
72670 + uru ? (struct rusage __force_user *)&ru : NULL);
72671 set_fs(old_fs);
72672
72673 if ((ret < 0) || (info.si_signo == 0))
72674 @@ -569,8 +569,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
72675 oldfs = get_fs();
72676 set_fs(KERNEL_DS);
72677 err = sys_timer_settime(timer_id, flags,
72678 - (struct itimerspec __user *) &newts,
72679 - (struct itimerspec __user *) &oldts);
72680 + (struct itimerspec __force_user *) &newts,
72681 + (struct itimerspec __force_user *) &oldts);
72682 set_fs(oldfs);
72683 if (!err && old && put_compat_itimerspec(old, &oldts))
72684 return -EFAULT;
72685 @@ -587,7 +587,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
72686 oldfs = get_fs();
72687 set_fs(KERNEL_DS);
72688 err = sys_timer_gettime(timer_id,
72689 - (struct itimerspec __user *) &ts);
72690 + (struct itimerspec __force_user *) &ts);
72691 set_fs(oldfs);
72692 if (!err && put_compat_itimerspec(setting, &ts))
72693 return -EFAULT;
72694 @@ -606,7 +606,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
72695 oldfs = get_fs();
72696 set_fs(KERNEL_DS);
72697 err = sys_clock_settime(which_clock,
72698 - (struct timespec __user *) &ts);
72699 + (struct timespec __force_user *) &ts);
72700 set_fs(oldfs);
72701 return err;
72702 }
72703 @@ -621,7 +621,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
72704 oldfs = get_fs();
72705 set_fs(KERNEL_DS);
72706 err = sys_clock_gettime(which_clock,
72707 - (struct timespec __user *) &ts);
72708 + (struct timespec __force_user *) &ts);
72709 set_fs(oldfs);
72710 if (!err && put_compat_timespec(&ts, tp))
72711 return -EFAULT;
72712 @@ -638,7 +638,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
72713 oldfs = get_fs();
72714 set_fs(KERNEL_DS);
72715 err = sys_clock_getres(which_clock,
72716 - (struct timespec __user *) &ts);
72717 + (struct timespec __force_user *) &ts);
72718 set_fs(oldfs);
72719 if (!err && tp && put_compat_timespec(&ts, tp))
72720 return -EFAULT;
72721 @@ -650,9 +650,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
72722 long err;
72723 mm_segment_t oldfs;
72724 struct timespec tu;
72725 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
72726 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
72727
72728 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
72729 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
72730 oldfs = get_fs();
72731 set_fs(KERNEL_DS);
72732 err = clock_nanosleep_restart(restart);
72733 @@ -684,8 +684,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
72734 oldfs = get_fs();
72735 set_fs(KERNEL_DS);
72736 err = sys_clock_nanosleep(which_clock, flags,
72737 - (struct timespec __user *) &in,
72738 - (struct timespec __user *) &out);
72739 + (struct timespec __force_user *) &in,
72740 + (struct timespec __force_user *) &out);
72741 set_fs(oldfs);
72742
72743 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
72744 diff --git a/kernel/configs.c b/kernel/configs.c
72745 index abaee68..047facd 100644
72746 --- a/kernel/configs.c
72747 +++ b/kernel/configs.c
72748 @@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
72749 struct proc_dir_entry *entry;
72750
72751 /* create the current config file */
72752 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
72753 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
72754 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
72755 + &ikconfig_file_ops);
72756 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72757 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
72758 + &ikconfig_file_ops);
72759 +#endif
72760 +#else
72761 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
72762 &ikconfig_file_ops);
72763 +#endif
72764 +
72765 if (!entry)
72766 return -ENOMEM;
72767
72768 diff --git a/kernel/cpu.c b/kernel/cpu.c
72769 index 3f2f04f..4e53ded 100644
72770 --- a/kernel/cpu.c
72771 +++ b/kernel/cpu.c
72772 @@ -20,7 +20,7 @@
72773 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
72774 static DEFINE_MUTEX(cpu_add_remove_lock);
72775
72776 -static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
72777 +static RAW_NOTIFIER_HEAD(cpu_chain);
72778
72779 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
72780 * Should always be manipulated under cpu_add_remove_lock
72781 diff --git a/kernel/cred.c b/kernel/cred.c
72782 index 0b5b5fc..f7fe51a 100644
72783 --- a/kernel/cred.c
72784 +++ b/kernel/cred.c
72785 @@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
72786 */
72787 void __put_cred(struct cred *cred)
72788 {
72789 + pax_track_stack();
72790 +
72791 kdebug("__put_cred(%p{%d,%d})", cred,
72792 atomic_read(&cred->usage),
72793 read_cred_subscribers(cred));
72794 @@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
72795 {
72796 struct cred *cred;
72797
72798 + pax_track_stack();
72799 +
72800 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
72801 atomic_read(&tsk->cred->usage),
72802 read_cred_subscribers(tsk->cred));
72803 @@ -206,6 +210,15 @@ void exit_creds(struct task_struct *tsk)
72804 validate_creds(cred);
72805 put_cred(cred);
72806 }
72807 +
72808 +#ifdef CONFIG_GRKERNSEC_SETXID
72809 + cred = (struct cred *) tsk->delayed_cred;
72810 + if (cred) {
72811 + tsk->delayed_cred = NULL;
72812 + validate_creds(cred);
72813 + put_cred(cred);
72814 + }
72815 +#endif
72816 }
72817
72818 /**
72819 @@ -222,6 +235,8 @@ const struct cred *get_task_cred(struct task_struct *task)
72820 {
72821 const struct cred *cred;
72822
72823 + pax_track_stack();
72824 +
72825 rcu_read_lock();
72826
72827 do {
72828 @@ -241,6 +256,8 @@ struct cred *cred_alloc_blank(void)
72829 {
72830 struct cred *new;
72831
72832 + pax_track_stack();
72833 +
72834 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
72835 if (!new)
72836 return NULL;
72837 @@ -289,6 +306,8 @@ struct cred *prepare_creds(void)
72838 const struct cred *old;
72839 struct cred *new;
72840
72841 + pax_track_stack();
72842 +
72843 validate_process_creds();
72844
72845 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
72846 @@ -335,6 +354,8 @@ struct cred *prepare_exec_creds(void)
72847 struct thread_group_cred *tgcred = NULL;
72848 struct cred *new;
72849
72850 + pax_track_stack();
72851 +
72852 #ifdef CONFIG_KEYS
72853 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
72854 if (!tgcred)
72855 @@ -441,6 +462,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
72856 struct cred *new;
72857 int ret;
72858
72859 + pax_track_stack();
72860 +
72861 mutex_init(&p->cred_guard_mutex);
72862
72863 if (
72864 @@ -523,11 +546,13 @@ error_put:
72865 * Always returns 0 thus allowing this function to be tail-called at the end
72866 * of, say, sys_setgid().
72867 */
72868 -int commit_creds(struct cred *new)
72869 +static int __commit_creds(struct cred *new)
72870 {
72871 struct task_struct *task = current;
72872 const struct cred *old = task->real_cred;
72873
72874 + pax_track_stack();
72875 +
72876 kdebug("commit_creds(%p{%d,%d})", new,
72877 atomic_read(&new->usage),
72878 read_cred_subscribers(new));
72879 @@ -544,6 +569,8 @@ int commit_creds(struct cred *new)
72880
72881 get_cred(new); /* we will require a ref for the subj creds too */
72882
72883 + gr_set_role_label(task, new->uid, new->gid);
72884 +
72885 /* dumpability changes */
72886 if (old->euid != new->euid ||
72887 old->egid != new->egid ||
72888 @@ -563,10 +590,8 @@ int commit_creds(struct cred *new)
72889 key_fsgid_changed(task);
72890
72891 /* do it
72892 - * - What if a process setreuid()'s and this brings the
72893 - * new uid over his NPROC rlimit? We can check this now
72894 - * cheaply with the new uid cache, so if it matters
72895 - * we should be checking for it. -DaveM
72896 + * RLIMIT_NPROC limits on user->processes have already been checked
72897 + * in set_user().
72898 */
72899 alter_cred_subscribers(new, 2);
72900 if (new->user != old->user)
72901 @@ -595,8 +620,96 @@ int commit_creds(struct cred *new)
72902 put_cred(old);
72903 return 0;
72904 }
72905 +
72906 +#ifdef CONFIG_GRKERNSEC_SETXID
72907 +extern int set_user(struct cred *new);
72908 +
72909 +void gr_delayed_cred_worker(void)
72910 +{
72911 + const struct cred *new = current->delayed_cred;
72912 + struct cred *ncred;
72913 +
72914 + current->delayed_cred = NULL;
72915 +
72916 + if (current_uid() && new != NULL) {
72917 + // from doing get_cred on it when queueing this
72918 + put_cred(new);
72919 + return;
72920 + } else if (new == NULL)
72921 + return;
72922 +
72923 + ncred = prepare_creds();
72924 + if (!ncred)
72925 + goto die;
72926 + // uids
72927 + ncred->uid = new->uid;
72928 + ncred->euid = new->euid;
72929 + ncred->suid = new->suid;
72930 + ncred->fsuid = new->fsuid;
72931 + // gids
72932 + ncred->gid = new->gid;
72933 + ncred->egid = new->egid;
72934 + ncred->sgid = new->sgid;
72935 + ncred->fsgid = new->fsgid;
72936 + // groups
72937 + if (set_groups(ncred, new->group_info) < 0) {
72938 + abort_creds(ncred);
72939 + goto die;
72940 + }
72941 + // caps
72942 + ncred->securebits = new->securebits;
72943 + ncred->cap_inheritable = new->cap_inheritable;
72944 + ncred->cap_permitted = new->cap_permitted;
72945 + ncred->cap_effective = new->cap_effective;
72946 + ncred->cap_bset = new->cap_bset;
72947 +
72948 + if (set_user(ncred)) {
72949 + abort_creds(ncred);
72950 + goto die;
72951 + }
72952 +
72953 + // from doing get_cred on it when queueing this
72954 + put_cred(new);
72955 +
72956 + __commit_creds(ncred);
72957 + return;
72958 +die:
72959 + // from doing get_cred on it when queueing this
72960 + put_cred(new);
72961 + do_group_exit(SIGKILL);
72962 +}
72963 +#endif
72964 +
72965 +int commit_creds(struct cred *new)
72966 +{
72967 +#ifdef CONFIG_GRKERNSEC_SETXID
72968 + struct task_struct *t;
72969 +
72970 + /* we won't get called with tasklist_lock held for writing
72971 + and interrupts disabled as the cred struct in that case is
72972 + init_cred
72973 + */
72974 + if (grsec_enable_setxid && !current_is_single_threaded() &&
72975 + !current_uid() && new->uid) {
72976 + rcu_read_lock();
72977 + read_lock(&tasklist_lock);
72978 + for (t = next_thread(current); t != current;
72979 + t = next_thread(t)) {
72980 + if (t->delayed_cred == NULL) {
72981 + t->delayed_cred = get_cred(new);
72982 + set_tsk_need_resched(t);
72983 + }
72984 + }
72985 + read_unlock(&tasklist_lock);
72986 + rcu_read_unlock();
72987 + }
72988 +#endif
72989 + return __commit_creds(new);
72990 +}
72991 +
72992 EXPORT_SYMBOL(commit_creds);
72993
72994 +
72995 /**
72996 * abort_creds - Discard a set of credentials and unlock the current task
72997 * @new: The credentials that were going to be applied
72998 @@ -606,6 +719,8 @@ EXPORT_SYMBOL(commit_creds);
72999 */
73000 void abort_creds(struct cred *new)
73001 {
73002 + pax_track_stack();
73003 +
73004 kdebug("abort_creds(%p{%d,%d})", new,
73005 atomic_read(&new->usage),
73006 read_cred_subscribers(new));
73007 @@ -629,6 +744,8 @@ const struct cred *override_creds(const struct cred *new)
73008 {
73009 const struct cred *old = current->cred;
73010
73011 + pax_track_stack();
73012 +
73013 kdebug("override_creds(%p{%d,%d})", new,
73014 atomic_read(&new->usage),
73015 read_cred_subscribers(new));
73016 @@ -658,6 +775,8 @@ void revert_creds(const struct cred *old)
73017 {
73018 const struct cred *override = current->cred;
73019
73020 + pax_track_stack();
73021 +
73022 kdebug("revert_creds(%p{%d,%d})", old,
73023 atomic_read(&old->usage),
73024 read_cred_subscribers(old));
73025 @@ -704,6 +823,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
73026 const struct cred *old;
73027 struct cred *new;
73028
73029 + pax_track_stack();
73030 +
73031 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
73032 if (!new)
73033 return NULL;
73034 @@ -758,6 +879,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
73035 */
73036 int set_security_override(struct cred *new, u32 secid)
73037 {
73038 + pax_track_stack();
73039 +
73040 return security_kernel_act_as(new, secid);
73041 }
73042 EXPORT_SYMBOL(set_security_override);
73043 @@ -777,6 +900,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
73044 u32 secid;
73045 int ret;
73046
73047 + pax_track_stack();
73048 +
73049 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
73050 if (ret < 0)
73051 return ret;
73052 diff --git a/kernel/exit.c b/kernel/exit.c
73053 index 0f8fae3..7916abf 100644
73054 --- a/kernel/exit.c
73055 +++ b/kernel/exit.c
73056 @@ -55,6 +55,10 @@
73057 #include <asm/pgtable.h>
73058 #include <asm/mmu_context.h>
73059
73060 +#ifdef CONFIG_GRKERNSEC
73061 +extern rwlock_t grsec_exec_file_lock;
73062 +#endif
73063 +
73064 static void exit_mm(struct task_struct * tsk);
73065
73066 static void __unhash_process(struct task_struct *p)
73067 @@ -174,6 +178,10 @@ void release_task(struct task_struct * p)
73068 struct task_struct *leader;
73069 int zap_leader;
73070 repeat:
73071 +#ifdef CONFIG_NET
73072 + gr_del_task_from_ip_table(p);
73073 +#endif
73074 +
73075 tracehook_prepare_release_task(p);
73076 /* don't need to get the RCU readlock here - the process is dead and
73077 * can't be modifying its own credentials */
73078 @@ -397,7 +405,7 @@ int allow_signal(int sig)
73079 * know it'll be handled, so that they don't get converted to
73080 * SIGKILL or just silently dropped.
73081 */
73082 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
73083 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
73084 recalc_sigpending();
73085 spin_unlock_irq(&current->sighand->siglock);
73086 return 0;
73087 @@ -433,6 +441,17 @@ void daemonize(const char *name, ...)
73088 vsnprintf(current->comm, sizeof(current->comm), name, args);
73089 va_end(args);
73090
73091 +#ifdef CONFIG_GRKERNSEC
73092 + write_lock(&grsec_exec_file_lock);
73093 + if (current->exec_file) {
73094 + fput(current->exec_file);
73095 + current->exec_file = NULL;
73096 + }
73097 + write_unlock(&grsec_exec_file_lock);
73098 +#endif
73099 +
73100 + gr_set_kernel_label(current);
73101 +
73102 /*
73103 * If we were started as result of loading a module, close all of the
73104 * user space pages. We don't need them, and if we didn't close them
73105 @@ -897,17 +916,17 @@ NORET_TYPE void do_exit(long code)
73106 struct task_struct *tsk = current;
73107 int group_dead;
73108
73109 - profile_task_exit(tsk);
73110 -
73111 - WARN_ON(atomic_read(&tsk->fs_excl));
73112 -
73113 + /*
73114 + * Check this first since set_fs() below depends on
73115 + * current_thread_info(), which we better not access when we're in
73116 + * interrupt context. Other than that, we want to do the set_fs()
73117 + * as early as possible.
73118 + */
73119 if (unlikely(in_interrupt()))
73120 panic("Aiee, killing interrupt handler!");
73121 - if (unlikely(!tsk->pid))
73122 - panic("Attempted to kill the idle task!");
73123
73124 /*
73125 - * If do_exit is called because this processes oopsed, it's possible
73126 + * If do_exit is called because this processes Oops'ed, it's possible
73127 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
73128 * continuing. Amongst other possible reasons, this is to prevent
73129 * mm_release()->clear_child_tid() from writing to a user-controlled
73130 @@ -915,6 +934,13 @@ NORET_TYPE void do_exit(long code)
73131 */
73132 set_fs(USER_DS);
73133
73134 + profile_task_exit(tsk);
73135 +
73136 + WARN_ON(atomic_read(&tsk->fs_excl));
73137 +
73138 + if (unlikely(!tsk->pid))
73139 + panic("Attempted to kill the idle task!");
73140 +
73141 tracehook_report_exit(&code);
73142
73143 validate_creds_for_do_exit(tsk);
73144 @@ -973,6 +999,9 @@ NORET_TYPE void do_exit(long code)
73145 tsk->exit_code = code;
73146 taskstats_exit(tsk, group_dead);
73147
73148 + gr_acl_handle_psacct(tsk, code);
73149 + gr_acl_handle_exit();
73150 +
73151 exit_mm(tsk);
73152
73153 if (group_dead)
73154 @@ -1020,7 +1049,7 @@ NORET_TYPE void do_exit(long code)
73155 tsk->flags |= PF_EXITPIDONE;
73156
73157 if (tsk->io_context)
73158 - exit_io_context();
73159 + exit_io_context(tsk);
73160
73161 if (tsk->splice_pipe)
73162 __free_pipe_info(tsk->splice_pipe);
73163 @@ -1188,7 +1217,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
73164
73165 if (unlikely(wo->wo_flags & WNOWAIT)) {
73166 int exit_code = p->exit_code;
73167 - int why, status;
73168 + int why;
73169
73170 get_task_struct(p);
73171 read_unlock(&tasklist_lock);
73172 diff --git a/kernel/fork.c b/kernel/fork.c
73173 index 4bde56f..8976a8f 100644
73174 --- a/kernel/fork.c
73175 +++ b/kernel/fork.c
73176 @@ -253,7 +253,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
73177 *stackend = STACK_END_MAGIC; /* for overflow detection */
73178
73179 #ifdef CONFIG_CC_STACKPROTECTOR
73180 - tsk->stack_canary = get_random_int();
73181 + tsk->stack_canary = pax_get_random_long();
73182 #endif
73183
73184 /* One for us, one for whoever does the "release_task()" (usually parent) */
73185 @@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
73186 mm->locked_vm = 0;
73187 mm->mmap = NULL;
73188 mm->mmap_cache = NULL;
73189 - mm->free_area_cache = oldmm->mmap_base;
73190 - mm->cached_hole_size = ~0UL;
73191 + mm->free_area_cache = oldmm->free_area_cache;
73192 + mm->cached_hole_size = oldmm->cached_hole_size;
73193 mm->map_count = 0;
73194 cpumask_clear(mm_cpumask(mm));
73195 mm->mm_rb = RB_ROOT;
73196 @@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
73197 tmp->vm_flags &= ~VM_LOCKED;
73198 tmp->vm_mm = mm;
73199 tmp->vm_next = tmp->vm_prev = NULL;
73200 + tmp->vm_mirror = NULL;
73201 anon_vma_link(tmp);
73202 file = tmp->vm_file;
73203 if (file) {
73204 @@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
73205 if (retval)
73206 goto out;
73207 }
73208 +
73209 +#ifdef CONFIG_PAX_SEGMEXEC
73210 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
73211 + struct vm_area_struct *mpnt_m;
73212 +
73213 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
73214 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
73215 +
73216 + if (!mpnt->vm_mirror)
73217 + continue;
73218 +
73219 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
73220 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
73221 + mpnt->vm_mirror = mpnt_m;
73222 + } else {
73223 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
73224 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
73225 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
73226 + mpnt->vm_mirror->vm_mirror = mpnt;
73227 + }
73228 + }
73229 + BUG_ON(mpnt_m);
73230 + }
73231 +#endif
73232 +
73233 /* a new mm has just been created */
73234 arch_dup_mmap(oldmm, mm);
73235 retval = 0;
73236 @@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
73237 write_unlock(&fs->lock);
73238 return -EAGAIN;
73239 }
73240 - fs->users++;
73241 + atomic_inc(&fs->users);
73242 write_unlock(&fs->lock);
73243 return 0;
73244 }
73245 tsk->fs = copy_fs_struct(fs);
73246 if (!tsk->fs)
73247 return -ENOMEM;
73248 + gr_set_chroot_entries(tsk, &tsk->fs->root);
73249 return 0;
73250 }
73251
73252 @@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
73253 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
73254 #endif
73255 retval = -EAGAIN;
73256 +
73257 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
73258 +
73259 if (atomic_read(&p->real_cred->user->processes) >=
73260 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
73261 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
73262 - p->real_cred->user != INIT_USER)
73263 + if (p->real_cred->user != INIT_USER &&
73264 + !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
73265 goto bad_fork_free;
73266 }
73267 + current->flags &= ~PF_NPROC_EXCEEDED;
73268
73269 retval = copy_creds(p, clone_flags);
73270 if (retval < 0)
73271 @@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
73272 goto bad_fork_free_pid;
73273 }
73274
73275 + gr_copy_label(p);
73276 +
73277 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
73278 /*
73279 * Clear TID on mm_release()?
73280 @@ -1299,7 +1332,8 @@ bad_fork_free_pid:
73281 if (pid != &init_struct_pid)
73282 free_pid(pid);
73283 bad_fork_cleanup_io:
73284 - put_io_context(p->io_context);
73285 + if (p->io_context)
73286 + exit_io_context(p);
73287 bad_fork_cleanup_namespaces:
73288 exit_task_namespaces(p);
73289 bad_fork_cleanup_mm:
73290 @@ -1333,6 +1367,8 @@ bad_fork_cleanup_count:
73291 bad_fork_free:
73292 free_task(p);
73293 fork_out:
73294 + gr_log_forkfail(retval);
73295 +
73296 return ERR_PTR(retval);
73297 }
73298
73299 @@ -1426,6 +1462,8 @@ long do_fork(unsigned long clone_flags,
73300 if (clone_flags & CLONE_PARENT_SETTID)
73301 put_user(nr, parent_tidptr);
73302
73303 + gr_handle_brute_check();
73304 +
73305 if (clone_flags & CLONE_VFORK) {
73306 p->vfork_done = &vfork;
73307 init_completion(&vfork);
73308 @@ -1558,7 +1596,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
73309 return 0;
73310
73311 /* don't need lock here; in the worst case we'll do useless copy */
73312 - if (fs->users == 1)
73313 + if (atomic_read(&fs->users) == 1)
73314 return 0;
73315
73316 *new_fsp = copy_fs_struct(fs);
73317 @@ -1681,7 +1719,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
73318 fs = current->fs;
73319 write_lock(&fs->lock);
73320 current->fs = new_fs;
73321 - if (--fs->users)
73322 + gr_set_chroot_entries(current, &current->fs->root);
73323 + if (atomic_dec_return(&fs->users))
73324 new_fs = NULL;
73325 else
73326 new_fs = fs;
73327 diff --git a/kernel/futex.c b/kernel/futex.c
73328 index fb98c9f..333faec 100644
73329 --- a/kernel/futex.c
73330 +++ b/kernel/futex.c
73331 @@ -54,6 +54,7 @@
73332 #include <linux/mount.h>
73333 #include <linux/pagemap.h>
73334 #include <linux/syscalls.h>
73335 +#include <linux/ptrace.h>
73336 #include <linux/signal.h>
73337 #include <linux/module.h>
73338 #include <linux/magic.h>
73339 @@ -223,6 +224,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
73340 struct page *page;
73341 int err, ro = 0;
73342
73343 +#ifdef CONFIG_PAX_SEGMEXEC
73344 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
73345 + return -EFAULT;
73346 +#endif
73347 +
73348 /*
73349 * The futex address must be "naturally" aligned.
73350 */
73351 @@ -1819,6 +1825,8 @@ static int futex_wait(u32 __user *uaddr, int fshared,
73352 struct futex_q q;
73353 int ret;
73354
73355 + pax_track_stack();
73356 +
73357 if (!bitset)
73358 return -EINVAL;
73359
73360 @@ -1871,7 +1879,7 @@ retry:
73361
73362 restart = &current_thread_info()->restart_block;
73363 restart->fn = futex_wait_restart;
73364 - restart->futex.uaddr = (u32 *)uaddr;
73365 + restart->futex.uaddr = uaddr;
73366 restart->futex.val = val;
73367 restart->futex.time = abs_time->tv64;
73368 restart->futex.bitset = bitset;
73369 @@ -2233,6 +2241,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
73370 struct futex_q q;
73371 int res, ret;
73372
73373 + pax_track_stack();
73374 +
73375 if (!bitset)
73376 return -EINVAL;
73377
73378 @@ -2423,6 +2433,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
73379 if (!p)
73380 goto err_unlock;
73381 ret = -EPERM;
73382 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73383 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
73384 + goto err_unlock;
73385 +#endif
73386 pcred = __task_cred(p);
73387 if (cred->euid != pcred->euid &&
73388 cred->euid != pcred->uid &&
73389 @@ -2489,7 +2503,7 @@ retry:
73390 */
73391 static inline int fetch_robust_entry(struct robust_list __user **entry,
73392 struct robust_list __user * __user *head,
73393 - int *pi)
73394 + unsigned int *pi)
73395 {
73396 unsigned long uentry;
73397
73398 @@ -2670,6 +2684,7 @@ static int __init futex_init(void)
73399 {
73400 u32 curval;
73401 int i;
73402 + mm_segment_t oldfs;
73403
73404 /*
73405 * This will fail and we want it. Some arch implementations do
73406 @@ -2681,7 +2696,10 @@ static int __init futex_init(void)
73407 * implementation, the non functional ones will return
73408 * -ENOSYS.
73409 */
73410 + oldfs = get_fs();
73411 + set_fs(USER_DS);
73412 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
73413 + set_fs(oldfs);
73414 if (curval == -EFAULT)
73415 futex_cmpxchg_enabled = 1;
73416
73417 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
73418 index 2357165..eb25501 100644
73419 --- a/kernel/futex_compat.c
73420 +++ b/kernel/futex_compat.c
73421 @@ -10,6 +10,7 @@
73422 #include <linux/compat.h>
73423 #include <linux/nsproxy.h>
73424 #include <linux/futex.h>
73425 +#include <linux/ptrace.h>
73426
73427 #include <asm/uaccess.h>
73428
73429 @@ -135,7 +136,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
73430 {
73431 struct compat_robust_list_head __user *head;
73432 unsigned long ret;
73433 - const struct cred *cred = current_cred(), *pcred;
73434 + const struct cred *cred = current_cred();
73435 + const struct cred *pcred;
73436
73437 if (!futex_cmpxchg_enabled)
73438 return -ENOSYS;
73439 @@ -151,6 +153,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
73440 if (!p)
73441 goto err_unlock;
73442 ret = -EPERM;
73443 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73444 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
73445 + goto err_unlock;
73446 +#endif
73447 pcred = __task_cred(p);
73448 if (cred->euid != pcred->euid &&
73449 cred->euid != pcred->uid &&
73450 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
73451 index 9b22d03..6295b62 100644
73452 --- a/kernel/gcov/base.c
73453 +++ b/kernel/gcov/base.c
73454 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
73455 }
73456
73457 #ifdef CONFIG_MODULES
73458 -static inline int within(void *addr, void *start, unsigned long size)
73459 -{
73460 - return ((addr >= start) && (addr < start + size));
73461 -}
73462 -
73463 /* Update list and generate events when modules are unloaded. */
73464 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
73465 void *data)
73466 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
73467 prev = NULL;
73468 /* Remove entries located in module from linked list. */
73469 for (info = gcov_info_head; info; info = info->next) {
73470 - if (within(info, mod->module_core, mod->core_size)) {
73471 + if (within_module_core_rw((unsigned long)info, mod)) {
73472 if (prev)
73473 prev->next = info->next;
73474 else
73475 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
73476 index a6e9d00..a0da4f9 100644
73477 --- a/kernel/hrtimer.c
73478 +++ b/kernel/hrtimer.c
73479 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
73480 local_irq_restore(flags);
73481 }
73482
73483 -static void run_hrtimer_softirq(struct softirq_action *h)
73484 +static void run_hrtimer_softirq(void)
73485 {
73486 hrtimer_peek_ahead_timers();
73487 }
73488 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
73489 index 8b6b8b6..6bc87df 100644
73490 --- a/kernel/kallsyms.c
73491 +++ b/kernel/kallsyms.c
73492 @@ -11,6 +11,9 @@
73493 * Changed the compression method from stem compression to "table lookup"
73494 * compression (see scripts/kallsyms.c for a more complete description)
73495 */
73496 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73497 +#define __INCLUDED_BY_HIDESYM 1
73498 +#endif
73499 #include <linux/kallsyms.h>
73500 #include <linux/module.h>
73501 #include <linux/init.h>
73502 @@ -51,12 +54,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
73503
73504 static inline int is_kernel_inittext(unsigned long addr)
73505 {
73506 + if (system_state != SYSTEM_BOOTING)
73507 + return 0;
73508 +
73509 if (addr >= (unsigned long)_sinittext
73510 && addr <= (unsigned long)_einittext)
73511 return 1;
73512 return 0;
73513 }
73514
73515 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73516 +#ifdef CONFIG_MODULES
73517 +static inline int is_module_text(unsigned long addr)
73518 +{
73519 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
73520 + return 1;
73521 +
73522 + addr = ktla_ktva(addr);
73523 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
73524 +}
73525 +#else
73526 +static inline int is_module_text(unsigned long addr)
73527 +{
73528 + return 0;
73529 +}
73530 +#endif
73531 +#endif
73532 +
73533 static inline int is_kernel_text(unsigned long addr)
73534 {
73535 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
73536 @@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigned long addr)
73537
73538 static inline int is_kernel(unsigned long addr)
73539 {
73540 +
73541 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73542 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
73543 + return 1;
73544 +
73545 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
73546 +#else
73547 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
73548 +#endif
73549 +
73550 return 1;
73551 return in_gate_area_no_task(addr);
73552 }
73553
73554 static int is_ksym_addr(unsigned long addr)
73555 {
73556 +
73557 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73558 + if (is_module_text(addr))
73559 + return 0;
73560 +#endif
73561 +
73562 if (all_var)
73563 return is_kernel(addr);
73564
73565 @@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
73566
73567 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
73568 {
73569 - iter->name[0] = '\0';
73570 iter->nameoff = get_symbol_offset(new_pos);
73571 iter->pos = new_pos;
73572 }
73573 @@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, void *p)
73574 {
73575 struct kallsym_iter *iter = m->private;
73576
73577 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73578 + if (current_uid())
73579 + return 0;
73580 +#endif
73581 +
73582 /* Some debugging symbols have no name. Ignore them. */
73583 if (!iter->name[0])
73584 return 0;
73585 @@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
73586 struct kallsym_iter *iter;
73587 int ret;
73588
73589 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
73590 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
73591 if (!iter)
73592 return -ENOMEM;
73593 reset_iter(iter, 0);
73594 diff --git a/kernel/kexec.c b/kernel/kexec.c
73595 index f336e21..9c1c20b 100644
73596 --- a/kernel/kexec.c
73597 +++ b/kernel/kexec.c
73598 @@ -1028,7 +1028,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
73599 unsigned long flags)
73600 {
73601 struct compat_kexec_segment in;
73602 - struct kexec_segment out, __user *ksegments;
73603 + struct kexec_segment out;
73604 + struct kexec_segment __user *ksegments;
73605 unsigned long i, result;
73606
73607 /* Don't allow clients that don't understand the native
73608 diff --git a/kernel/kgdb.c b/kernel/kgdb.c
73609 index 53dae4b..9ba3743 100644
73610 --- a/kernel/kgdb.c
73611 +++ b/kernel/kgdb.c
73612 @@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
73613 /* Guard for recursive entry */
73614 static int exception_level;
73615
73616 -static struct kgdb_io *kgdb_io_ops;
73617 +static const struct kgdb_io *kgdb_io_ops;
73618 static DEFINE_SPINLOCK(kgdb_registration_lock);
73619
73620 /* kgdb console driver is loaded */
73621 @@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1);
73622 */
73623 static atomic_t passive_cpu_wait[NR_CPUS];
73624 static atomic_t cpu_in_kgdb[NR_CPUS];
73625 -atomic_t kgdb_setting_breakpoint;
73626 +atomic_unchecked_t kgdb_setting_breakpoint;
73627
73628 struct task_struct *kgdb_usethread;
73629 struct task_struct *kgdb_contthread;
73630 @@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBYTES +
73631 sizeof(unsigned long)];
73632
73633 /* to keep track of the CPU which is doing the single stepping*/
73634 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
73635 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
73636
73637 /*
73638 * If you are debugging a problem where roundup (the collection of
73639 @@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
73640 return 0;
73641 if (kgdb_connected)
73642 return 1;
73643 - if (atomic_read(&kgdb_setting_breakpoint))
73644 + if (atomic_read_unchecked(&kgdb_setting_breakpoint))
73645 return 1;
73646 if (print_wait)
73647 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
73648 @@ -1426,8 +1426,8 @@ acquirelock:
73649 * instance of the exception handler wanted to come into the
73650 * debugger on a different CPU via a single step
73651 */
73652 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
73653 - atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
73654 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
73655 + atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
73656
73657 atomic_set(&kgdb_active, -1);
73658 touch_softlockup_watchdog();
73659 @@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void)
73660 *
73661 * Register it with the KGDB core.
73662 */
73663 -int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
73664 +int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
73665 {
73666 int err;
73667
73668 @@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_module);
73669 *
73670 * Unregister it with the KGDB core.
73671 */
73672 -void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
73673 +void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
73674 {
73675 BUG_ON(kgdb_connected);
73676
73677 @@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
73678 */
73679 void kgdb_breakpoint(void)
73680 {
73681 - atomic_set(&kgdb_setting_breakpoint, 1);
73682 + atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
73683 wmb(); /* Sync point before breakpoint */
73684 arch_kgdb_breakpoint();
73685 wmb(); /* Sync point after breakpoint */
73686 - atomic_set(&kgdb_setting_breakpoint, 0);
73687 + atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
73688 }
73689 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
73690
73691 diff --git a/kernel/kmod.c b/kernel/kmod.c
73692 index a061472..40884b6 100644
73693 --- a/kernel/kmod.c
73694 +++ b/kernel/kmod.c
73695 @@ -68,13 +68,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
73696 * If module auto-loading support is disabled then this function
73697 * becomes a no-operation.
73698 */
73699 -int __request_module(bool wait, const char *fmt, ...)
73700 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
73701 {
73702 - va_list args;
73703 char module_name[MODULE_NAME_LEN];
73704 unsigned int max_modprobes;
73705 int ret;
73706 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
73707 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
73708 static char *envp[] = { "HOME=/",
73709 "TERM=linux",
73710 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
73711 @@ -87,12 +86,24 @@ int __request_module(bool wait, const char *fmt, ...)
73712 if (ret)
73713 return ret;
73714
73715 - va_start(args, fmt);
73716 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
73717 - va_end(args);
73718 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
73719 if (ret >= MODULE_NAME_LEN)
73720 return -ENAMETOOLONG;
73721
73722 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
73723 + if (!current_uid()) {
73724 + /* hack to workaround consolekit/udisks stupidity */
73725 + read_lock(&tasklist_lock);
73726 + if (!strcmp(current->comm, "mount") &&
73727 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
73728 + read_unlock(&tasklist_lock);
73729 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
73730 + return -EPERM;
73731 + }
73732 + read_unlock(&tasklist_lock);
73733 + }
73734 +#endif
73735 +
73736 /* If modprobe needs a service that is in a module, we get a recursive
73737 * loop. Limit the number of running kmod threads to max_threads/2 or
73738 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
73739 @@ -126,6 +137,48 @@ int __request_module(bool wait, const char *fmt, ...)
73740 atomic_dec(&kmod_concurrent);
73741 return ret;
73742 }
73743 +
73744 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
73745 +{
73746 + va_list args;
73747 + int ret;
73748 +
73749 + va_start(args, fmt);
73750 + ret = ____request_module(wait, module_param, fmt, args);
73751 + va_end(args);
73752 +
73753 + return ret;
73754 +}
73755 +
73756 +int __request_module(bool wait, const char *fmt, ...)
73757 +{
73758 + va_list args;
73759 + int ret;
73760 +
73761 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
73762 + if (current_uid()) {
73763 + char module_param[MODULE_NAME_LEN];
73764 +
73765 + memset(module_param, 0, sizeof(module_param));
73766 +
73767 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
73768 +
73769 + va_start(args, fmt);
73770 + ret = ____request_module(wait, module_param, fmt, args);
73771 + va_end(args);
73772 +
73773 + return ret;
73774 + }
73775 +#endif
73776 +
73777 + va_start(args, fmt);
73778 + ret = ____request_module(wait, NULL, fmt, args);
73779 + va_end(args);
73780 +
73781 + return ret;
73782 +}
73783 +
73784 +
73785 EXPORT_SYMBOL(__request_module);
73786 #endif /* CONFIG_MODULES */
73787
73788 @@ -231,7 +284,7 @@ static int wait_for_helper(void *data)
73789 *
73790 * Thus the __user pointer cast is valid here.
73791 */
73792 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
73793 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
73794
73795 /*
73796 * If ret is 0, either ____call_usermodehelper failed and the
73797 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
73798 index 176d825..77fa8ea 100644
73799 --- a/kernel/kprobes.c
73800 +++ b/kernel/kprobes.c
73801 @@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
73802 * kernel image and loaded module images reside. This is required
73803 * so x86_64 can correctly handle the %rip-relative fixups.
73804 */
73805 - kip->insns = module_alloc(PAGE_SIZE);
73806 + kip->insns = module_alloc_exec(PAGE_SIZE);
73807 if (!kip->insns) {
73808 kfree(kip);
73809 return NULL;
73810 @@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
73811 */
73812 if (!list_is_singular(&kprobe_insn_pages)) {
73813 list_del(&kip->list);
73814 - module_free(NULL, kip->insns);
73815 + module_free_exec(NULL, kip->insns);
73816 kfree(kip);
73817 }
73818 return 1;
73819 @@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
73820 {
73821 int i, err = 0;
73822 unsigned long offset = 0, size = 0;
73823 - char *modname, namebuf[128];
73824 + char *modname, namebuf[KSYM_NAME_LEN];
73825 const char *symbol_name;
73826 void *addr;
73827 struct kprobe_blackpoint *kb;
73828 @@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
73829 const char *sym = NULL;
73830 unsigned int i = *(loff_t *) v;
73831 unsigned long offset = 0;
73832 - char *modname, namebuf[128];
73833 + char *modname, namebuf[KSYM_NAME_LEN];
73834
73835 head = &kprobe_table[i];
73836 preempt_disable();
73837 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
73838 index d86fe89..d12fc66 100644
73839 --- a/kernel/lockdep.c
73840 +++ b/kernel/lockdep.c
73841 @@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_trace = {
73842 /*
73843 * Various lockdep statistics:
73844 */
73845 -atomic_t chain_lookup_hits;
73846 -atomic_t chain_lookup_misses;
73847 -atomic_t hardirqs_on_events;
73848 -atomic_t hardirqs_off_events;
73849 -atomic_t redundant_hardirqs_on;
73850 -atomic_t redundant_hardirqs_off;
73851 -atomic_t softirqs_on_events;
73852 -atomic_t softirqs_off_events;
73853 -atomic_t redundant_softirqs_on;
73854 -atomic_t redundant_softirqs_off;
73855 -atomic_t nr_unused_locks;
73856 -atomic_t nr_cyclic_checks;
73857 -atomic_t nr_find_usage_forwards_checks;
73858 -atomic_t nr_find_usage_backwards_checks;
73859 +atomic_unchecked_t chain_lookup_hits;
73860 +atomic_unchecked_t chain_lookup_misses;
73861 +atomic_unchecked_t hardirqs_on_events;
73862 +atomic_unchecked_t hardirqs_off_events;
73863 +atomic_unchecked_t redundant_hardirqs_on;
73864 +atomic_unchecked_t redundant_hardirqs_off;
73865 +atomic_unchecked_t softirqs_on_events;
73866 +atomic_unchecked_t softirqs_off_events;
73867 +atomic_unchecked_t redundant_softirqs_on;
73868 +atomic_unchecked_t redundant_softirqs_off;
73869 +atomic_unchecked_t nr_unused_locks;
73870 +atomic_unchecked_t nr_cyclic_checks;
73871 +atomic_unchecked_t nr_find_usage_forwards_checks;
73872 +atomic_unchecked_t nr_find_usage_backwards_checks;
73873 #endif
73874
73875 /*
73876 @@ -577,6 +577,10 @@ static int static_obj(void *obj)
73877 int i;
73878 #endif
73879
73880 +#ifdef CONFIG_PAX_KERNEXEC
73881 + start = ktla_ktva(start);
73882 +#endif
73883 +
73884 /*
73885 * static variable?
73886 */
73887 @@ -592,8 +596,7 @@ static int static_obj(void *obj)
73888 */
73889 for_each_possible_cpu(i) {
73890 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
73891 - end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
73892 - + per_cpu_offset(i);
73893 + end = start + PERCPU_ENOUGH_ROOM;
73894
73895 if ((addr >= start) && (addr < end))
73896 return 1;
73897 @@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
73898 if (!static_obj(lock->key)) {
73899 debug_locks_off();
73900 printk("INFO: trying to register non-static key.\n");
73901 + printk("lock:%pS key:%pS.\n", lock, lock->key);
73902 printk("the code is fine but needs lockdep annotation.\n");
73903 printk("turning off the locking correctness validator.\n");
73904 dump_stack();
73905 @@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
73906 if (!class)
73907 return 0;
73908 }
73909 - debug_atomic_inc((atomic_t *)&class->ops);
73910 + debug_atomic_inc((atomic_unchecked_t *)&class->ops);
73911 if (very_verbose(class)) {
73912 printk("\nacquire class [%p] %s", class->key, class->name);
73913 if (class->name_version > 1)
73914 diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
73915 index a2ee95a..092f0f2 100644
73916 --- a/kernel/lockdep_internals.h
73917 +++ b/kernel/lockdep_internals.h
73918 @@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_class *class)
73919 /*
73920 * Various lockdep statistics:
73921 */
73922 -extern atomic_t chain_lookup_hits;
73923 -extern atomic_t chain_lookup_misses;
73924 -extern atomic_t hardirqs_on_events;
73925 -extern atomic_t hardirqs_off_events;
73926 -extern atomic_t redundant_hardirqs_on;
73927 -extern atomic_t redundant_hardirqs_off;
73928 -extern atomic_t softirqs_on_events;
73929 -extern atomic_t softirqs_off_events;
73930 -extern atomic_t redundant_softirqs_on;
73931 -extern atomic_t redundant_softirqs_off;
73932 -extern atomic_t nr_unused_locks;
73933 -extern atomic_t nr_cyclic_checks;
73934 -extern atomic_t nr_cyclic_check_recursions;
73935 -extern atomic_t nr_find_usage_forwards_checks;
73936 -extern atomic_t nr_find_usage_forwards_recursions;
73937 -extern atomic_t nr_find_usage_backwards_checks;
73938 -extern atomic_t nr_find_usage_backwards_recursions;
73939 -# define debug_atomic_inc(ptr) atomic_inc(ptr)
73940 -# define debug_atomic_dec(ptr) atomic_dec(ptr)
73941 -# define debug_atomic_read(ptr) atomic_read(ptr)
73942 +extern atomic_unchecked_t chain_lookup_hits;
73943 +extern atomic_unchecked_t chain_lookup_misses;
73944 +extern atomic_unchecked_t hardirqs_on_events;
73945 +extern atomic_unchecked_t hardirqs_off_events;
73946 +extern atomic_unchecked_t redundant_hardirqs_on;
73947 +extern atomic_unchecked_t redundant_hardirqs_off;
73948 +extern atomic_unchecked_t softirqs_on_events;
73949 +extern atomic_unchecked_t softirqs_off_events;
73950 +extern atomic_unchecked_t redundant_softirqs_on;
73951 +extern atomic_unchecked_t redundant_softirqs_off;
73952 +extern atomic_unchecked_t nr_unused_locks;
73953 +extern atomic_unchecked_t nr_cyclic_checks;
73954 +extern atomic_unchecked_t nr_cyclic_check_recursions;
73955 +extern atomic_unchecked_t nr_find_usage_forwards_checks;
73956 +extern atomic_unchecked_t nr_find_usage_forwards_recursions;
73957 +extern atomic_unchecked_t nr_find_usage_backwards_checks;
73958 +extern atomic_unchecked_t nr_find_usage_backwards_recursions;
73959 +# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
73960 +# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
73961 +# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
73962 #else
73963 # define debug_atomic_inc(ptr) do { } while (0)
73964 # define debug_atomic_dec(ptr) do { } while (0)
73965 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
73966 index d4aba4f..02a353f 100644
73967 --- a/kernel/lockdep_proc.c
73968 +++ b/kernel/lockdep_proc.c
73969 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
73970
73971 static void print_name(struct seq_file *m, struct lock_class *class)
73972 {
73973 - char str[128];
73974 + char str[KSYM_NAME_LEN];
73975 const char *name = class->name;
73976
73977 if (!name) {
73978 diff --git a/kernel/module.c b/kernel/module.c
73979 index 4b270e6..2226274 100644
73980 --- a/kernel/module.c
73981 +++ b/kernel/module.c
73982 @@ -55,6 +55,7 @@
73983 #include <linux/async.h>
73984 #include <linux/percpu.h>
73985 #include <linux/kmemleak.h>
73986 +#include <linux/grsecurity.h>
73987
73988 #define CREATE_TRACE_POINTS
73989 #include <trace/events/module.h>
73990 @@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
73991 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
73992
73993 /* Bounds of module allocation, for speeding __module_address */
73994 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
73995 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
73996 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
73997
73998 int register_module_notifier(struct notifier_block * nb)
73999 {
74000 @@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
74001 return true;
74002
74003 list_for_each_entry_rcu(mod, &modules, list) {
74004 - struct symsearch arr[] = {
74005 + struct symsearch modarr[] = {
74006 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
74007 NOT_GPL_ONLY, false },
74008 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
74009 @@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
74010 #endif
74011 };
74012
74013 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
74014 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
74015 return true;
74016 }
74017 return false;
74018 @@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
74019 void *ptr;
74020 int cpu;
74021
74022 - if (align > PAGE_SIZE) {
74023 + if (align-1 >= PAGE_SIZE) {
74024 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
74025 name, align, PAGE_SIZE);
74026 align = PAGE_SIZE;
74027 @@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
74028 * /sys/module/foo/sections stuff
74029 * J. Corbet <corbet@lwn.net>
74030 */
74031 -#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
74032 +#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
74033
74034 static inline bool sect_empty(const Elf_Shdr *sect)
74035 {
74036 @@ -1545,7 +1547,8 @@ static void free_module(struct module *mod)
74037 destroy_params(mod->kp, mod->num_kp);
74038
74039 /* This may be NULL, but that's OK */
74040 - module_free(mod, mod->module_init);
74041 + module_free(mod, mod->module_init_rw);
74042 + module_free_exec(mod, mod->module_init_rx);
74043 kfree(mod->args);
74044 if (mod->percpu)
74045 percpu_modfree(mod->percpu);
74046 @@ -1554,10 +1557,12 @@ static void free_module(struct module *mod)
74047 percpu_modfree(mod->refptr);
74048 #endif
74049 /* Free lock-classes: */
74050 - lockdep_free_key_range(mod->module_core, mod->core_size);
74051 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
74052 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
74053
74054 /* Finally, free the core (containing the module structure) */
74055 - module_free(mod, mod->module_core);
74056 + module_free_exec(mod, mod->module_core_rx);
74057 + module_free(mod, mod->module_core_rw);
74058
74059 #ifdef CONFIG_MPU
74060 update_protections(current->mm);
74061 @@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
74062 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
74063 int ret = 0;
74064 const struct kernel_symbol *ksym;
74065 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
74066 + int is_fs_load = 0;
74067 + int register_filesystem_found = 0;
74068 + char *p;
74069 +
74070 + p = strstr(mod->args, "grsec_modharden_fs");
74071 +
74072 + if (p) {
74073 + char *endptr = p + strlen("grsec_modharden_fs");
74074 + /* copy \0 as well */
74075 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
74076 + is_fs_load = 1;
74077 + }
74078 +#endif
74079 +
74080
74081 for (i = 1; i < n; i++) {
74082 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
74083 + const char *name = strtab + sym[i].st_name;
74084 +
74085 + /* it's a real shame this will never get ripped and copied
74086 + upstream! ;(
74087 + */
74088 + if (is_fs_load && !strcmp(name, "register_filesystem"))
74089 + register_filesystem_found = 1;
74090 +#endif
74091 switch (sym[i].st_shndx) {
74092 case SHN_COMMON:
74093 /* We compiled with -fno-common. These are not
74094 @@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
74095 strtab + sym[i].st_name, mod);
74096 /* Ok if resolved. */
74097 if (ksym) {
74098 + pax_open_kernel();
74099 sym[i].st_value = ksym->value;
74100 + pax_close_kernel();
74101 break;
74102 }
74103
74104 @@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
74105 secbase = (unsigned long)mod->percpu;
74106 else
74107 secbase = sechdrs[sym[i].st_shndx].sh_addr;
74108 + pax_open_kernel();
74109 sym[i].st_value += secbase;
74110 + pax_close_kernel();
74111 break;
74112 }
74113 }
74114
74115 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
74116 + if (is_fs_load && !register_filesystem_found) {
74117 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
74118 + ret = -EPERM;
74119 + }
74120 +#endif
74121 +
74122 return ret;
74123 }
74124
74125 @@ -1731,11 +1771,12 @@ static void layout_sections(struct module *mod,
74126 || s->sh_entsize != ~0UL
74127 || strstarts(secstrings + s->sh_name, ".init"))
74128 continue;
74129 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
74130 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
74131 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
74132 + else
74133 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
74134 DEBUGP("\t%s\n", secstrings + s->sh_name);
74135 }
74136 - if (m == 0)
74137 - mod->core_text_size = mod->core_size;
74138 }
74139
74140 DEBUGP("Init section allocation order:\n");
74141 @@ -1748,12 +1789,13 @@ static void layout_sections(struct module *mod,
74142 || s->sh_entsize != ~0UL
74143 || !strstarts(secstrings + s->sh_name, ".init"))
74144 continue;
74145 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
74146 - | INIT_OFFSET_MASK);
74147 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
74148 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
74149 + else
74150 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
74151 + s->sh_entsize |= INIT_OFFSET_MASK;
74152 DEBUGP("\t%s\n", secstrings + s->sh_name);
74153 }
74154 - if (m == 0)
74155 - mod->init_text_size = mod->init_size;
74156 }
74157 }
74158
74159 @@ -1857,9 +1899,8 @@ static int is_exported(const char *name, unsigned long value,
74160
74161 /* As per nm */
74162 static char elf_type(const Elf_Sym *sym,
74163 - Elf_Shdr *sechdrs,
74164 - const char *secstrings,
74165 - struct module *mod)
74166 + const Elf_Shdr *sechdrs,
74167 + const char *secstrings)
74168 {
74169 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
74170 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
74171 @@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struct module *mod,
74172
74173 /* Put symbol section at end of init part of module. */
74174 symsect->sh_flags |= SHF_ALLOC;
74175 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
74176 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
74177 symindex) | INIT_OFFSET_MASK;
74178 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
74179
74180 @@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struct module *mod,
74181 }
74182
74183 /* Append room for core symbols at end of core part. */
74184 - symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
74185 - mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
74186 + symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
74187 + mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
74188
74189 /* Put string table section at end of init part of module. */
74190 strsect->sh_flags |= SHF_ALLOC;
74191 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
74192 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
74193 strindex) | INIT_OFFSET_MASK;
74194 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
74195
74196 /* Append room for core symbols' strings at end of core part. */
74197 - *pstroffs = mod->core_size;
74198 + *pstroffs = mod->core_size_rx;
74199 __set_bit(0, strmap);
74200 - mod->core_size += bitmap_weight(strmap, strsect->sh_size);
74201 + mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
74202
74203 return symoffs;
74204 }
74205 @@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *mod,
74206 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
74207 mod->strtab = (void *)sechdrs[strindex].sh_addr;
74208
74209 + pax_open_kernel();
74210 +
74211 /* Set types up while we still have access to sections. */
74212 for (i = 0; i < mod->num_symtab; i++)
74213 mod->symtab[i].st_info
74214 - = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
74215 + = elf_type(&mod->symtab[i], sechdrs, secstrings);
74216
74217 - mod->core_symtab = dst = mod->module_core + symoffs;
74218 + mod->core_symtab = dst = mod->module_core_rx + symoffs;
74219 src = mod->symtab;
74220 *dst = *src;
74221 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
74222 @@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *mod,
74223 }
74224 mod->core_num_syms = ndst;
74225
74226 - mod->core_strtab = s = mod->module_core + stroffs;
74227 + mod->core_strtab = s = mod->module_core_rx + stroffs;
74228 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
74229 if (test_bit(i, strmap))
74230 *++s = mod->strtab[i];
74231 +
74232 + pax_close_kernel();
74233 }
74234 #else
74235 static inline unsigned long layout_symtab(struct module *mod,
74236 @@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
74237 #endif
74238 }
74239
74240 -static void *module_alloc_update_bounds(unsigned long size)
74241 +static void *module_alloc_update_bounds_rw(unsigned long size)
74242 {
74243 void *ret = module_alloc(size);
74244
74245 if (ret) {
74246 /* Update module bounds. */
74247 - if ((unsigned long)ret < module_addr_min)
74248 - module_addr_min = (unsigned long)ret;
74249 - if ((unsigned long)ret + size > module_addr_max)
74250 - module_addr_max = (unsigned long)ret + size;
74251 + if ((unsigned long)ret < module_addr_min_rw)
74252 + module_addr_min_rw = (unsigned long)ret;
74253 + if ((unsigned long)ret + size > module_addr_max_rw)
74254 + module_addr_max_rw = (unsigned long)ret + size;
74255 + }
74256 + return ret;
74257 +}
74258 +
74259 +static void *module_alloc_update_bounds_rx(unsigned long size)
74260 +{
74261 + void *ret = module_alloc_exec(size);
74262 +
74263 + if (ret) {
74264 + /* Update module bounds. */
74265 + if ((unsigned long)ret < module_addr_min_rx)
74266 + module_addr_min_rx = (unsigned long)ret;
74267 + if ((unsigned long)ret + size > module_addr_max_rx)
74268 + module_addr_max_rx = (unsigned long)ret + size;
74269 }
74270 return ret;
74271 }
74272 @@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
74273 unsigned int i;
74274
74275 /* only scan the sections containing data */
74276 - kmemleak_scan_area(mod->module_core, (unsigned long)mod -
74277 - (unsigned long)mod->module_core,
74278 + kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
74279 + (unsigned long)mod->module_core_rw,
74280 sizeof(struct module), GFP_KERNEL);
74281
74282 for (i = 1; i < hdr->e_shnum; i++) {
74283 @@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
74284 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
74285 continue;
74286
74287 - kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
74288 - (unsigned long)mod->module_core,
74289 + kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
74290 + (unsigned long)mod->module_core_rw,
74291 sechdrs[i].sh_size, GFP_KERNEL);
74292 }
74293 }
74294 @@ -2097,7 +2156,7 @@ static noinline struct module *load_module(void __user *umod,
74295 Elf_Ehdr *hdr;
74296 Elf_Shdr *sechdrs;
74297 char *secstrings, *args, *modmagic, *strtab = NULL;
74298 - char *staging;
74299 + char *staging, *license;
74300 unsigned int i;
74301 unsigned int symindex = 0;
74302 unsigned int strindex = 0;
74303 @@ -2195,6 +2254,14 @@ static noinline struct module *load_module(void __user *umod,
74304 goto free_hdr;
74305 }
74306
74307 + license = get_modinfo(sechdrs, infoindex, "license");
74308 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
74309 + if (!license || !license_is_gpl_compatible(license)) {
74310 + err -ENOEXEC;
74311 + goto free_hdr;
74312 + }
74313 +#endif
74314 +
74315 modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
74316 /* This is allowed: modprobe --force will invalidate it. */
74317 if (!modmagic) {
74318 @@ -2263,7 +2330,7 @@ static noinline struct module *load_module(void __user *umod,
74319 secstrings, &stroffs, strmap);
74320
74321 /* Do the allocs. */
74322 - ptr = module_alloc_update_bounds(mod->core_size);
74323 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
74324 /*
74325 * The pointer to this block is stored in the module structure
74326 * which is inside the block. Just mark it as not being a
74327 @@ -2274,23 +2341,47 @@ static noinline struct module *load_module(void __user *umod,
74328 err = -ENOMEM;
74329 goto free_percpu;
74330 }
74331 - memset(ptr, 0, mod->core_size);
74332 - mod->module_core = ptr;
74333 + memset(ptr, 0, mod->core_size_rw);
74334 + mod->module_core_rw = ptr;
74335
74336 - ptr = module_alloc_update_bounds(mod->init_size);
74337 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
74338 /*
74339 * The pointer to this block is stored in the module structure
74340 * which is inside the block. This block doesn't need to be
74341 * scanned as it contains data and code that will be freed
74342 * after the module is initialized.
74343 */
74344 - kmemleak_ignore(ptr);
74345 - if (!ptr && mod->init_size) {
74346 + kmemleak_not_leak(ptr);
74347 + if (!ptr && mod->init_size_rw) {
74348 err = -ENOMEM;
74349 - goto free_core;
74350 + goto free_core_rw;
74351 }
74352 - memset(ptr, 0, mod->init_size);
74353 - mod->module_init = ptr;
74354 + memset(ptr, 0, mod->init_size_rw);
74355 + mod->module_init_rw = ptr;
74356 +
74357 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
74358 + kmemleak_not_leak(ptr);
74359 + if (!ptr) {
74360 + err = -ENOMEM;
74361 + goto free_init_rw;
74362 + }
74363 +
74364 + pax_open_kernel();
74365 + memset(ptr, 0, mod->core_size_rx);
74366 + pax_close_kernel();
74367 + mod->module_core_rx = ptr;
74368 +
74369 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
74370 + kmemleak_not_leak(ptr);
74371 + if (!ptr && mod->init_size_rx) {
74372 + err = -ENOMEM;
74373 + goto free_core_rx;
74374 + }
74375 +
74376 + pax_open_kernel();
74377 + memset(ptr, 0, mod->init_size_rx);
74378 + pax_close_kernel();
74379 + mod->module_init_rx = ptr;
74380
74381 /* Transfer each section which specifies SHF_ALLOC */
74382 DEBUGP("final section addresses:\n");
74383 @@ -2300,17 +2391,45 @@ static noinline struct module *load_module(void __user *umod,
74384 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
74385 continue;
74386
74387 - if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
74388 - dest = mod->module_init
74389 - + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
74390 - else
74391 - dest = mod->module_core + sechdrs[i].sh_entsize;
74392 + if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
74393 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
74394 + dest = mod->module_init_rw
74395 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
74396 + else
74397 + dest = mod->module_init_rx
74398 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
74399 + } else {
74400 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
74401 + dest = mod->module_core_rw + sechdrs[i].sh_entsize;
74402 + else
74403 + dest = mod->module_core_rx + sechdrs[i].sh_entsize;
74404 + }
74405
74406 - if (sechdrs[i].sh_type != SHT_NOBITS)
74407 - memcpy(dest, (void *)sechdrs[i].sh_addr,
74408 - sechdrs[i].sh_size);
74409 + if (sechdrs[i].sh_type != SHT_NOBITS) {
74410 +
74411 +#ifdef CONFIG_PAX_KERNEXEC
74412 +#ifdef CONFIG_X86_64
74413 + if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
74414 + set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
74415 +#endif
74416 + if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
74417 + pax_open_kernel();
74418 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
74419 + pax_close_kernel();
74420 + } else
74421 +#endif
74422 +
74423 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
74424 + }
74425 /* Update sh_addr to point to copy in image. */
74426 - sechdrs[i].sh_addr = (unsigned long)dest;
74427 +
74428 +#ifdef CONFIG_PAX_KERNEXEC
74429 + if (sechdrs[i].sh_flags & SHF_EXECINSTR)
74430 + sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
74431 + else
74432 +#endif
74433 +
74434 + sechdrs[i].sh_addr = (unsigned long)dest;
74435 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
74436 }
74437 /* Module has been moved. */
74438 @@ -2322,7 +2441,7 @@ static noinline struct module *load_module(void __user *umod,
74439 mod->name);
74440 if (!mod->refptr) {
74441 err = -ENOMEM;
74442 - goto free_init;
74443 + goto free_init_rx;
74444 }
74445 #endif
74446 /* Now we've moved module, initialize linked lists, etc. */
74447 @@ -2334,7 +2453,7 @@ static noinline struct module *load_module(void __user *umod,
74448 goto free_unload;
74449
74450 /* Set up license info based on the info section */
74451 - set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
74452 + set_license(mod, license);
74453
74454 /*
74455 * ndiswrapper is under GPL by itself, but loads proprietary modules.
74456 @@ -2351,6 +2470,31 @@ static noinline struct module *load_module(void __user *umod,
74457 /* Set up MODINFO_ATTR fields */
74458 setup_modinfo(mod, sechdrs, infoindex);
74459
74460 + mod->args = args;
74461 +
74462 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
74463 + {
74464 + char *p, *p2;
74465 +
74466 + if (strstr(mod->args, "grsec_modharden_netdev")) {
74467 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
74468 + err = -EPERM;
74469 + goto cleanup;
74470 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
74471 + p += strlen("grsec_modharden_normal");
74472 + p2 = strstr(p, "_");
74473 + if (p2) {
74474 + *p2 = '\0';
74475 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
74476 + *p2 = '_';
74477 + }
74478 + err = -EPERM;
74479 + goto cleanup;
74480 + }
74481 + }
74482 +#endif
74483 +
74484 +
74485 /* Fix up syms, so that st_value is a pointer to location. */
74486 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
74487 mod);
74488 @@ -2431,8 +2575,8 @@ static noinline struct module *load_module(void __user *umod,
74489
74490 /* Now do relocations. */
74491 for (i = 1; i < hdr->e_shnum; i++) {
74492 - const char *strtab = (char *)sechdrs[strindex].sh_addr;
74493 unsigned int info = sechdrs[i].sh_info;
74494 + strtab = (char *)sechdrs[strindex].sh_addr;
74495
74496 /* Not a valid relocation section? */
74497 if (info >= hdr->e_shnum)
74498 @@ -2493,16 +2637,15 @@ static noinline struct module *load_module(void __user *umod,
74499 * Do it before processing of module parameters, so the module
74500 * can provide parameter accessor functions of its own.
74501 */
74502 - if (mod->module_init)
74503 - flush_icache_range((unsigned long)mod->module_init,
74504 - (unsigned long)mod->module_init
74505 - + mod->init_size);
74506 - flush_icache_range((unsigned long)mod->module_core,
74507 - (unsigned long)mod->module_core + mod->core_size);
74508 + if (mod->module_init_rx)
74509 + flush_icache_range((unsigned long)mod->module_init_rx,
74510 + (unsigned long)mod->module_init_rx
74511 + + mod->init_size_rx);
74512 + flush_icache_range((unsigned long)mod->module_core_rx,
74513 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
74514
74515 set_fs(old_fs);
74516
74517 - mod->args = args;
74518 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
74519 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
74520 mod->name);
74521 @@ -2546,12 +2689,16 @@ static noinline struct module *load_module(void __user *umod,
74522 free_unload:
74523 module_unload_free(mod);
74524 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
74525 + free_init_rx:
74526 percpu_modfree(mod->refptr);
74527 - free_init:
74528 #endif
74529 - module_free(mod, mod->module_init);
74530 - free_core:
74531 - module_free(mod, mod->module_core);
74532 + module_free_exec(mod, mod->module_init_rx);
74533 + free_core_rx:
74534 + module_free_exec(mod, mod->module_core_rx);
74535 + free_init_rw:
74536 + module_free(mod, mod->module_init_rw);
74537 + free_core_rw:
74538 + module_free(mod, mod->module_core_rw);
74539 /* mod will be freed with core. Don't access it beyond this line! */
74540 free_percpu:
74541 if (percpu)
74542 @@ -2653,10 +2800,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
74543 mod->symtab = mod->core_symtab;
74544 mod->strtab = mod->core_strtab;
74545 #endif
74546 - module_free(mod, mod->module_init);
74547 - mod->module_init = NULL;
74548 - mod->init_size = 0;
74549 - mod->init_text_size = 0;
74550 + module_free(mod, mod->module_init_rw);
74551 + module_free_exec(mod, mod->module_init_rx);
74552 + mod->module_init_rw = NULL;
74553 + mod->module_init_rx = NULL;
74554 + mod->init_size_rw = 0;
74555 + mod->init_size_rx = 0;
74556 mutex_unlock(&module_mutex);
74557
74558 return 0;
74559 @@ -2687,10 +2836,16 @@ static const char *get_ksymbol(struct module *mod,
74560 unsigned long nextval;
74561
74562 /* At worse, next value is at end of module */
74563 - if (within_module_init(addr, mod))
74564 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
74565 + if (within_module_init_rx(addr, mod))
74566 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
74567 + else if (within_module_init_rw(addr, mod))
74568 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
74569 + else if (within_module_core_rx(addr, mod))
74570 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
74571 + else if (within_module_core_rw(addr, mod))
74572 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
74573 else
74574 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
74575 + return NULL;
74576
74577 /* Scan for closest preceeding symbol, and next symbol. (ELF
74578 starts real symbols at 1). */
74579 @@ -2936,7 +3091,7 @@ static int m_show(struct seq_file *m, void *p)
74580 char buf[8];
74581
74582 seq_printf(m, "%s %u",
74583 - mod->name, mod->init_size + mod->core_size);
74584 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
74585 print_unload_info(m, mod);
74586
74587 /* Informative for users. */
74588 @@ -2945,7 +3100,7 @@ static int m_show(struct seq_file *m, void *p)
74589 mod->state == MODULE_STATE_COMING ? "Loading":
74590 "Live");
74591 /* Used by oprofile and other similar tools. */
74592 - seq_printf(m, " 0x%p", mod->module_core);
74593 + seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
74594
74595 /* Taints info */
74596 if (mod->taints)
74597 @@ -2981,7 +3136,17 @@ static const struct file_operations proc_modules_operations = {
74598
74599 static int __init proc_modules_init(void)
74600 {
74601 +#ifndef CONFIG_GRKERNSEC_HIDESYM
74602 +#ifdef CONFIG_GRKERNSEC_PROC_USER
74603 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
74604 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74605 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
74606 +#else
74607 proc_create("modules", 0, NULL, &proc_modules_operations);
74608 +#endif
74609 +#else
74610 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
74611 +#endif
74612 return 0;
74613 }
74614 module_init(proc_modules_init);
74615 @@ -3040,12 +3205,12 @@ struct module *__module_address(unsigned long addr)
74616 {
74617 struct module *mod;
74618
74619 - if (addr < module_addr_min || addr > module_addr_max)
74620 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
74621 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
74622 return NULL;
74623
74624 list_for_each_entry_rcu(mod, &modules, list)
74625 - if (within_module_core(addr, mod)
74626 - || within_module_init(addr, mod))
74627 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
74628 return mod;
74629 return NULL;
74630 }
74631 @@ -3079,11 +3244,20 @@ bool is_module_text_address(unsigned long addr)
74632 */
74633 struct module *__module_text_address(unsigned long addr)
74634 {
74635 - struct module *mod = __module_address(addr);
74636 + struct module *mod;
74637 +
74638 +#ifdef CONFIG_X86_32
74639 + addr = ktla_ktva(addr);
74640 +#endif
74641 +
74642 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
74643 + return NULL;
74644 +
74645 + mod = __module_address(addr);
74646 +
74647 if (mod) {
74648 /* Make sure it's within the text section. */
74649 - if (!within(addr, mod->module_init, mod->init_text_size)
74650 - && !within(addr, mod->module_core, mod->core_text_size))
74651 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
74652 mod = NULL;
74653 }
74654 return mod;
74655 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
74656 index ec815a9..fe46e99 100644
74657 --- a/kernel/mutex-debug.c
74658 +++ b/kernel/mutex-debug.c
74659 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
74660 }
74661
74662 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
74663 - struct thread_info *ti)
74664 + struct task_struct *task)
74665 {
74666 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
74667
74668 /* Mark the current thread as blocked on the lock: */
74669 - ti->task->blocked_on = waiter;
74670 + task->blocked_on = waiter;
74671 }
74672
74673 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
74674 - struct thread_info *ti)
74675 + struct task_struct *task)
74676 {
74677 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
74678 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
74679 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
74680 - ti->task->blocked_on = NULL;
74681 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
74682 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
74683 + task->blocked_on = NULL;
74684
74685 list_del_init(&waiter->list);
74686 waiter->task = NULL;
74687 @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock)
74688 return;
74689
74690 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
74691 - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
74692 + DEBUG_LOCKS_WARN_ON(lock->owner != current);
74693 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
74694 mutex_clear_owner(lock);
74695 }
74696 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
74697 index 6b2d735..372d3c4 100644
74698 --- a/kernel/mutex-debug.h
74699 +++ b/kernel/mutex-debug.h
74700 @@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
74701 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
74702 extern void debug_mutex_add_waiter(struct mutex *lock,
74703 struct mutex_waiter *waiter,
74704 - struct thread_info *ti);
74705 + struct task_struct *task);
74706 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
74707 - struct thread_info *ti);
74708 + struct task_struct *task);
74709 extern void debug_mutex_unlock(struct mutex *lock);
74710 extern void debug_mutex_init(struct mutex *lock, const char *name,
74711 struct lock_class_key *key);
74712
74713 static inline void mutex_set_owner(struct mutex *lock)
74714 {
74715 - lock->owner = current_thread_info();
74716 + lock->owner = current;
74717 }
74718
74719 static inline void mutex_clear_owner(struct mutex *lock)
74720 diff --git a/kernel/mutex.c b/kernel/mutex.c
74721 index f85644c..5ee9f77 100644
74722 --- a/kernel/mutex.c
74723 +++ b/kernel/mutex.c
74724 @@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
74725 */
74726
74727 for (;;) {
74728 - struct thread_info *owner;
74729 + struct task_struct *owner;
74730
74731 /*
74732 * If we own the BKL, then don't spin. The owner of
74733 @@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
74734 spin_lock_mutex(&lock->wait_lock, flags);
74735
74736 debug_mutex_lock_common(lock, &waiter);
74737 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
74738 + debug_mutex_add_waiter(lock, &waiter, task);
74739
74740 /* add waiting tasks to the end of the waitqueue (FIFO): */
74741 list_add_tail(&waiter.list, &lock->wait_list);
74742 @@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
74743 * TASK_UNINTERRUPTIBLE case.)
74744 */
74745 if (unlikely(signal_pending_state(state, task))) {
74746 - mutex_remove_waiter(lock, &waiter,
74747 - task_thread_info(task));
74748 + mutex_remove_waiter(lock, &waiter, task);
74749 mutex_release(&lock->dep_map, 1, ip);
74750 spin_unlock_mutex(&lock->wait_lock, flags);
74751
74752 @@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
74753 done:
74754 lock_acquired(&lock->dep_map, ip);
74755 /* got the lock - rejoice! */
74756 - mutex_remove_waiter(lock, &waiter, current_thread_info());
74757 + mutex_remove_waiter(lock, &waiter, task);
74758 mutex_set_owner(lock);
74759
74760 /* set it to 0 if there are no waiters left: */
74761 diff --git a/kernel/mutex.h b/kernel/mutex.h
74762 index 67578ca..4115fbf 100644
74763 --- a/kernel/mutex.h
74764 +++ b/kernel/mutex.h
74765 @@ -19,7 +19,7 @@
74766 #ifdef CONFIG_SMP
74767 static inline void mutex_set_owner(struct mutex *lock)
74768 {
74769 - lock->owner = current_thread_info();
74770 + lock->owner = current;
74771 }
74772
74773 static inline void mutex_clear_owner(struct mutex *lock)
74774 diff --git a/kernel/panic.c b/kernel/panic.c
74775 index 96b45d0..ff70a46 100644
74776 --- a/kernel/panic.c
74777 +++ b/kernel/panic.c
74778 @@ -71,7 +71,11 @@ NORET_TYPE void panic(const char * fmt, ...)
74779 va_end(args);
74780 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
74781 #ifdef CONFIG_DEBUG_BUGVERBOSE
74782 - dump_stack();
74783 + /*
74784 + * Avoid nested stack-dumping if a panic occurs during oops processing
74785 + */
74786 + if (!oops_in_progress)
74787 + dump_stack();
74788 #endif
74789
74790 /*
74791 @@ -352,7 +356,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller, struc
74792 const char *board;
74793
74794 printk(KERN_WARNING "------------[ cut here ]------------\n");
74795 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
74796 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
74797 board = dmi_get_system_info(DMI_PRODUCT_NAME);
74798 if (board)
74799 printk(KERN_WARNING "Hardware name: %s\n", board);
74800 @@ -392,7 +396,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
74801 */
74802 void __stack_chk_fail(void)
74803 {
74804 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
74805 + dump_stack();
74806 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
74807 __builtin_return_address(0));
74808 }
74809 EXPORT_SYMBOL(__stack_chk_fail);
74810 diff --git a/kernel/params.c b/kernel/params.c
74811 index d656c27..21e452c 100644
74812 --- a/kernel/params.c
74813 +++ b/kernel/params.c
74814 @@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct kobject *kobj,
74815 return ret;
74816 }
74817
74818 -static struct sysfs_ops module_sysfs_ops = {
74819 +static const struct sysfs_ops module_sysfs_ops = {
74820 .show = module_attr_show,
74821 .store = module_attr_store,
74822 };
74823 @@ -739,7 +739,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
74824 return 0;
74825 }
74826
74827 -static struct kset_uevent_ops module_uevent_ops = {
74828 +static const struct kset_uevent_ops module_uevent_ops = {
74829 .filter = uevent_filter,
74830 };
74831
74832 diff --git a/kernel/perf_event.c b/kernel/perf_event.c
74833 index 37ebc14..9c121d9 100644
74834 --- a/kernel/perf_event.c
74835 +++ b/kernel/perf_event.c
74836 @@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */
74837 */
74838 int sysctl_perf_event_sample_rate __read_mostly = 100000;
74839
74840 -static atomic64_t perf_event_id;
74841 +static atomic64_unchecked_t perf_event_id;
74842
74843 /*
74844 * Lock for (sysadmin-configurable) event reservations:
74845 @@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struct perf_event *event,
74846 * In order to keep per-task stats reliable we need to flip the event
74847 * values when we flip the contexts.
74848 */
74849 - value = atomic64_read(&next_event->count);
74850 - value = atomic64_xchg(&event->count, value);
74851 - atomic64_set(&next_event->count, value);
74852 + value = atomic64_read_unchecked(&next_event->count);
74853 + value = atomic64_xchg_unchecked(&event->count, value);
74854 + atomic64_set_unchecked(&next_event->count, value);
74855
74856 swap(event->total_time_enabled, next_event->total_time_enabled);
74857 swap(event->total_time_running, next_event->total_time_running);
74858 @@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_event *event)
74859 update_event_times(event);
74860 }
74861
74862 - return atomic64_read(&event->count);
74863 + return atomic64_read_unchecked(&event->count);
74864 }
74865
74866 /*
74867 @@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct perf_event *event,
74868 values[n++] = 1 + leader->nr_siblings;
74869 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
74870 values[n++] = leader->total_time_enabled +
74871 - atomic64_read(&leader->child_total_time_enabled);
74872 + atomic64_read_unchecked(&leader->child_total_time_enabled);
74873 }
74874 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
74875 values[n++] = leader->total_time_running +
74876 - atomic64_read(&leader->child_total_time_running);
74877 + atomic64_read_unchecked(&leader->child_total_time_running);
74878 }
74879
74880 size = n * sizeof(u64);
74881 @@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct perf_event *event,
74882 values[n++] = perf_event_read_value(event);
74883 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
74884 values[n++] = event->total_time_enabled +
74885 - atomic64_read(&event->child_total_time_enabled);
74886 + atomic64_read_unchecked(&event->child_total_time_enabled);
74887 }
74888 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
74889 values[n++] = event->total_time_running +
74890 - atomic64_read(&event->child_total_time_running);
74891 + atomic64_read_unchecked(&event->child_total_time_running);
74892 }
74893 if (read_format & PERF_FORMAT_ID)
74894 values[n++] = primary_event_id(event);
74895 @@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
74896 static void perf_event_reset(struct perf_event *event)
74897 {
74898 (void)perf_event_read(event);
74899 - atomic64_set(&event->count, 0);
74900 + atomic64_set_unchecked(&event->count, 0);
74901 perf_event_update_userpage(event);
74902 }
74903
74904 @@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct perf_event *event)
74905 ++userpg->lock;
74906 barrier();
74907 userpg->index = perf_event_index(event);
74908 - userpg->offset = atomic64_read(&event->count);
74909 + userpg->offset = atomic64_read_unchecked(&event->count);
74910 if (event->state == PERF_EVENT_STATE_ACTIVE)
74911 - userpg->offset -= atomic64_read(&event->hw.prev_count);
74912 + userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
74913
74914 userpg->time_enabled = event->total_time_enabled +
74915 - atomic64_read(&event->child_total_time_enabled);
74916 + atomic64_read_unchecked(&event->child_total_time_enabled);
74917
74918 userpg->time_running = event->total_time_running +
74919 - atomic64_read(&event->child_total_time_running);
74920 + atomic64_read_unchecked(&event->child_total_time_running);
74921
74922 barrier();
74923 ++userpg->lock;
74924 @@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct perf_output_handle *handle,
74925 u64 values[4];
74926 int n = 0;
74927
74928 - values[n++] = atomic64_read(&event->count);
74929 + values[n++] = atomic64_read_unchecked(&event->count);
74930 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
74931 values[n++] = event->total_time_enabled +
74932 - atomic64_read(&event->child_total_time_enabled);
74933 + atomic64_read_unchecked(&event->child_total_time_enabled);
74934 }
74935 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
74936 values[n++] = event->total_time_running +
74937 - atomic64_read(&event->child_total_time_running);
74938 + atomic64_read_unchecked(&event->child_total_time_running);
74939 }
74940 if (read_format & PERF_FORMAT_ID)
74941 values[n++] = primary_event_id(event);
74942 @@ -2940,7 +2940,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
74943 if (leader != event)
74944 leader->pmu->read(leader);
74945
74946 - values[n++] = atomic64_read(&leader->count);
74947 + values[n++] = atomic64_read_unchecked(&leader->count);
74948 if (read_format & PERF_FORMAT_ID)
74949 values[n++] = primary_event_id(leader);
74950
74951 @@ -2952,7 +2952,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
74952 if (sub != event)
74953 sub->pmu->read(sub);
74954
74955 - values[n++] = atomic64_read(&sub->count);
74956 + values[n++] = atomic64_read_unchecked(&sub->count);
74957 if (read_format & PERF_FORMAT_ID)
74958 values[n++] = primary_event_id(sub);
74959
74960 @@ -3525,12 +3525,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
74961 * need to add enough zero bytes after the string to handle
74962 * the 64bit alignment we do later.
74963 */
74964 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
74965 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
74966 if (!buf) {
74967 name = strncpy(tmp, "//enomem", sizeof(tmp));
74968 goto got_name;
74969 }
74970 - name = d_path(&file->f_path, buf, PATH_MAX);
74971 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
74972 if (IS_ERR(name)) {
74973 name = strncpy(tmp, "//toolong", sizeof(tmp));
74974 goto got_name;
74975 @@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
74976 {
74977 struct hw_perf_event *hwc = &event->hw;
74978
74979 - atomic64_add(nr, &event->count);
74980 + atomic64_add_unchecked(nr, &event->count);
74981
74982 if (!hwc->sample_period)
74983 return;
74984 @@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(struct perf_event *event)
74985 u64 now;
74986
74987 now = cpu_clock(cpu);
74988 - prev = atomic64_read(&event->hw.prev_count);
74989 - atomic64_set(&event->hw.prev_count, now);
74990 - atomic64_add(now - prev, &event->count);
74991 + prev = atomic64_read_unchecked(&event->hw.prev_count);
74992 + atomic64_set_unchecked(&event->hw.prev_count, now);
74993 + atomic64_add_unchecked(now - prev, &event->count);
74994 }
74995
74996 static int cpu_clock_perf_event_enable(struct perf_event *event)
74997 @@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
74998 struct hw_perf_event *hwc = &event->hw;
74999 int cpu = raw_smp_processor_id();
75000
75001 - atomic64_set(&hwc->prev_count, cpu_clock(cpu));
75002 + atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
75003 perf_swevent_start_hrtimer(event);
75004
75005 return 0;
75006 @@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update(struct perf_event *event, u64 now)
75007 u64 prev;
75008 s64 delta;
75009
75010 - prev = atomic64_xchg(&event->hw.prev_count, now);
75011 + prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
75012 delta = now - prev;
75013 - atomic64_add(delta, &event->count);
75014 + atomic64_add_unchecked(delta, &event->count);
75015 }
75016
75017 static int task_clock_perf_event_enable(struct perf_event *event)
75018 @@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(struct perf_event *event)
75019
75020 now = event->ctx->time;
75021
75022 - atomic64_set(&hwc->prev_count, now);
75023 + atomic64_set_unchecked(&hwc->prev_count, now);
75024
75025 perf_swevent_start_hrtimer(event);
75026
75027 @@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr *attr,
75028 event->parent = parent_event;
75029
75030 event->ns = get_pid_ns(current->nsproxy->pid_ns);
75031 - event->id = atomic64_inc_return(&perf_event_id);
75032 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
75033
75034 event->state = PERF_EVENT_STATE_INACTIVE;
75035
75036 @@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf_event *child_event,
75037 if (child_event->attr.inherit_stat)
75038 perf_event_read_event(child_event, child);
75039
75040 - child_val = atomic64_read(&child_event->count);
75041 + child_val = atomic64_read_unchecked(&child_event->count);
75042
75043 /*
75044 * Add back the child's count to the parent's count:
75045 */
75046 - atomic64_add(child_val, &parent_event->count);
75047 - atomic64_add(child_event->total_time_enabled,
75048 + atomic64_add_unchecked(child_val, &parent_event->count);
75049 + atomic64_add_unchecked(child_event->total_time_enabled,
75050 &parent_event->child_total_time_enabled);
75051 - atomic64_add(child_event->total_time_running,
75052 + atomic64_add_unchecked(child_event->total_time_running,
75053 &parent_event->child_total_time_running);
75054
75055 /*
75056 diff --git a/kernel/pid.c b/kernel/pid.c
75057 index fce7198..4f23a7e 100644
75058 --- a/kernel/pid.c
75059 +++ b/kernel/pid.c
75060 @@ -33,6 +33,7 @@
75061 #include <linux/rculist.h>
75062 #include <linux/bootmem.h>
75063 #include <linux/hash.h>
75064 +#include <linux/security.h>
75065 #include <linux/pid_namespace.h>
75066 #include <linux/init_task.h>
75067 #include <linux/syscalls.h>
75068 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
75069
75070 int pid_max = PID_MAX_DEFAULT;
75071
75072 -#define RESERVED_PIDS 300
75073 +#define RESERVED_PIDS 500
75074
75075 int pid_max_min = RESERVED_PIDS + 1;
75076 int pid_max_max = PID_MAX_LIMIT;
75077 @@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
75078 */
75079 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
75080 {
75081 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
75082 + struct task_struct *task;
75083 +
75084 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
75085 +
75086 + if (gr_pid_is_chrooted(task))
75087 + return NULL;
75088 +
75089 + return task;
75090 }
75091
75092 struct task_struct *find_task_by_vpid(pid_t vnr)
75093 @@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
75094 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
75095 }
75096
75097 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
75098 +{
75099 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
75100 +}
75101 +
75102 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
75103 {
75104 struct pid *pid;
75105 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
75106 index 5c9dc22..d271117 100644
75107 --- a/kernel/posix-cpu-timers.c
75108 +++ b/kernel/posix-cpu-timers.c
75109 @@ -6,6 +6,7 @@
75110 #include <linux/posix-timers.h>
75111 #include <linux/errno.h>
75112 #include <linux/math64.h>
75113 +#include <linux/security.h>
75114 #include <asm/uaccess.h>
75115 #include <linux/kernel_stat.h>
75116 #include <trace/events/timer.h>
75117 @@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
75118
75119 static __init int init_posix_cpu_timers(void)
75120 {
75121 - struct k_clock process = {
75122 + static struct k_clock process = {
75123 .clock_getres = process_cpu_clock_getres,
75124 .clock_get = process_cpu_clock_get,
75125 .clock_set = do_posix_clock_nosettime,
75126 @@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(void)
75127 .nsleep = process_cpu_nsleep,
75128 .nsleep_restart = process_cpu_nsleep_restart,
75129 };
75130 - struct k_clock thread = {
75131 + static struct k_clock thread = {
75132 .clock_getres = thread_cpu_clock_getres,
75133 .clock_get = thread_cpu_clock_get,
75134 .clock_set = do_posix_clock_nosettime,
75135 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
75136 index 5e76d22..cf1baeb 100644
75137 --- a/kernel/posix-timers.c
75138 +++ b/kernel/posix-timers.c
75139 @@ -42,6 +42,7 @@
75140 #include <linux/compiler.h>
75141 #include <linux/idr.h>
75142 #include <linux/posix-timers.h>
75143 +#include <linux/grsecurity.h>
75144 #include <linux/syscalls.h>
75145 #include <linux/wait.h>
75146 #include <linux/workqueue.h>
75147 @@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
75148 * which we beg off on and pass to do_sys_settimeofday().
75149 */
75150
75151 -static struct k_clock posix_clocks[MAX_CLOCKS];
75152 +static struct k_clock *posix_clocks[MAX_CLOCKS];
75153
75154 /*
75155 * These ones are defined below.
75156 @@ -157,8 +158,8 @@ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
75157 */
75158 #define CLOCK_DISPATCH(clock, call, arglist) \
75159 ((clock) < 0 ? posix_cpu_##call arglist : \
75160 - (posix_clocks[clock].call != NULL \
75161 - ? (*posix_clocks[clock].call) arglist : common_##call arglist))
75162 + (posix_clocks[clock]->call != NULL \
75163 + ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
75164
75165 /*
75166 * Default clock hook functions when the struct k_clock passed
75167 @@ -172,7 +173,7 @@ static inline int common_clock_getres(const clockid_t which_clock,
75168 struct timespec *tp)
75169 {
75170 tp->tv_sec = 0;
75171 - tp->tv_nsec = posix_clocks[which_clock].res;
75172 + tp->tv_nsec = posix_clocks[which_clock]->res;
75173 return 0;
75174 }
75175
75176 @@ -217,9 +218,11 @@ static inline int invalid_clockid(const clockid_t which_clock)
75177 return 0;
75178 if ((unsigned) which_clock >= MAX_CLOCKS)
75179 return 1;
75180 - if (posix_clocks[which_clock].clock_getres != NULL)
75181 + if (posix_clocks[which_clock] == NULL)
75182 return 0;
75183 - if (posix_clocks[which_clock].res != 0)
75184 + if (posix_clocks[which_clock]->clock_getres != NULL)
75185 + return 0;
75186 + if (posix_clocks[which_clock]->res != 0)
75187 return 0;
75188 return 1;
75189 }
75190 @@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
75191 */
75192 static __init int init_posix_timers(void)
75193 {
75194 - struct k_clock clock_realtime = {
75195 + static struct k_clock clock_realtime = {
75196 .clock_getres = hrtimer_get_res,
75197 };
75198 - struct k_clock clock_monotonic = {
75199 + static struct k_clock clock_monotonic = {
75200 .clock_getres = hrtimer_get_res,
75201 .clock_get = posix_ktime_get_ts,
75202 .clock_set = do_posix_clock_nosettime,
75203 };
75204 - struct k_clock clock_monotonic_raw = {
75205 + static struct k_clock clock_monotonic_raw = {
75206 .clock_getres = hrtimer_get_res,
75207 .clock_get = posix_get_monotonic_raw,
75208 .clock_set = do_posix_clock_nosettime,
75209 .timer_create = no_timer_create,
75210 .nsleep = no_nsleep,
75211 };
75212 - struct k_clock clock_realtime_coarse = {
75213 + static struct k_clock clock_realtime_coarse = {
75214 .clock_getres = posix_get_coarse_res,
75215 .clock_get = posix_get_realtime_coarse,
75216 .clock_set = do_posix_clock_nosettime,
75217 .timer_create = no_timer_create,
75218 .nsleep = no_nsleep,
75219 };
75220 - struct k_clock clock_monotonic_coarse = {
75221 + static struct k_clock clock_monotonic_coarse = {
75222 .clock_getres = posix_get_coarse_res,
75223 .clock_get = posix_get_monotonic_coarse,
75224 .clock_set = do_posix_clock_nosettime,
75225 @@ -296,6 +299,8 @@ static __init int init_posix_timers(void)
75226 .nsleep = no_nsleep,
75227 };
75228
75229 + pax_track_stack();
75230 +
75231 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
75232 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
75233 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
75234 @@ -484,7 +489,7 @@ void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
75235 return;
75236 }
75237
75238 - posix_clocks[clock_id] = *new_clock;
75239 + posix_clocks[clock_id] = new_clock;
75240 }
75241 EXPORT_SYMBOL_GPL(register_posix_clock);
75242
75243 @@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
75244 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
75245 return -EFAULT;
75246
75247 + /* only the CLOCK_REALTIME clock can be set, all other clocks
75248 + have their clock_set fptr set to a nosettime dummy function
75249 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
75250 + call common_clock_set, which calls do_sys_settimeofday, which
75251 + we hook
75252 + */
75253 +
75254 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
75255 }
75256
75257 diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
75258 index 04a9e90..bc355aa 100644
75259 --- a/kernel/power/hibernate.c
75260 +++ b/kernel/power/hibernate.c
75261 @@ -48,14 +48,14 @@ enum {
75262
75263 static int hibernation_mode = HIBERNATION_SHUTDOWN;
75264
75265 -static struct platform_hibernation_ops *hibernation_ops;
75266 +static const struct platform_hibernation_ops *hibernation_ops;
75267
75268 /**
75269 * hibernation_set_ops - set the global hibernate operations
75270 * @ops: the hibernation operations to use in subsequent hibernation transitions
75271 */
75272
75273 -void hibernation_set_ops(struct platform_hibernation_ops *ops)
75274 +void hibernation_set_ops(const struct platform_hibernation_ops *ops)
75275 {
75276 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
75277 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
75278 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
75279 index e8b3370..484c2e4 100644
75280 --- a/kernel/power/poweroff.c
75281 +++ b/kernel/power/poweroff.c
75282 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
75283 .enable_mask = SYSRQ_ENABLE_BOOT,
75284 };
75285
75286 -static int pm_sysrq_init(void)
75287 +static int __init pm_sysrq_init(void)
75288 {
75289 register_sysrq_key('o', &sysrq_poweroff_op);
75290 return 0;
75291 diff --git a/kernel/power/process.c b/kernel/power/process.c
75292 index e7cd671..56d5f459 100644
75293 --- a/kernel/power/process.c
75294 +++ b/kernel/power/process.c
75295 @@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_only)
75296 struct timeval start, end;
75297 u64 elapsed_csecs64;
75298 unsigned int elapsed_csecs;
75299 + bool timedout = false;
75300
75301 do_gettimeofday(&start);
75302
75303 end_time = jiffies + TIMEOUT;
75304 do {
75305 todo = 0;
75306 + if (time_after(jiffies, end_time))
75307 + timedout = true;
75308 read_lock(&tasklist_lock);
75309 do_each_thread(g, p) {
75310 if (frozen(p) || !freezeable(p))
75311 @@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_only)
75312 * It is "frozen enough". If the task does wake
75313 * up, it will immediately call try_to_freeze.
75314 */
75315 - if (!task_is_stopped_or_traced(p) &&
75316 - !freezer_should_skip(p))
75317 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
75318 todo++;
75319 + if (timedout) {
75320 + printk(KERN_ERR "Task refusing to freeze:\n");
75321 + sched_show_task(p);
75322 + }
75323 + }
75324 } while_each_thread(g, p);
75325 read_unlock(&tasklist_lock);
75326 yield(); /* Yield is okay here */
75327 - if (time_after(jiffies, end_time))
75328 - break;
75329 - } while (todo);
75330 + } while (todo && !timedout);
75331
75332 do_gettimeofday(&end);
75333 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
75334 diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
75335 index 40dd021..fb30ceb 100644
75336 --- a/kernel/power/suspend.c
75337 +++ b/kernel/power/suspend.c
75338 @@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_MAX] = {
75339 [PM_SUSPEND_MEM] = "mem",
75340 };
75341
75342 -static struct platform_suspend_ops *suspend_ops;
75343 +static const struct platform_suspend_ops *suspend_ops;
75344
75345 /**
75346 * suspend_set_ops - Set the global suspend method table.
75347 * @ops: Pointer to ops structure.
75348 */
75349 -void suspend_set_ops(struct platform_suspend_ops *ops)
75350 +void suspend_set_ops(const struct platform_suspend_ops *ops)
75351 {
75352 mutex_lock(&pm_mutex);
75353 suspend_ops = ops;
75354 diff --git a/kernel/printk.c b/kernel/printk.c
75355 index 4cade47..4d17900 100644
75356 --- a/kernel/printk.c
75357 +++ b/kernel/printk.c
75358 @@ -33,6 +33,7 @@
75359 #include <linux/bootmem.h>
75360 #include <linux/syscalls.h>
75361 #include <linux/kexec.h>
75362 +#include <linux/syslog.h>
75363
75364 #include <asm/uaccess.h>
75365
75366 @@ -256,38 +257,30 @@ static inline void boot_delay_msec(void)
75367 }
75368 #endif
75369
75370 -/*
75371 - * Commands to do_syslog:
75372 - *
75373 - * 0 -- Close the log. Currently a NOP.
75374 - * 1 -- Open the log. Currently a NOP.
75375 - * 2 -- Read from the log.
75376 - * 3 -- Read all messages remaining in the ring buffer.
75377 - * 4 -- Read and clear all messages remaining in the ring buffer
75378 - * 5 -- Clear ring buffer.
75379 - * 6 -- Disable printk's to console
75380 - * 7 -- Enable printk's to console
75381 - * 8 -- Set level of messages printed to console
75382 - * 9 -- Return number of unread characters in the log buffer
75383 - * 10 -- Return size of the log buffer
75384 - */
75385 -int do_syslog(int type, char __user *buf, int len)
75386 +int do_syslog(int type, char __user *buf, int len, bool from_file)
75387 {
75388 unsigned i, j, limit, count;
75389 int do_clear = 0;
75390 char c;
75391 int error = 0;
75392
75393 - error = security_syslog(type);
75394 +#ifdef CONFIG_GRKERNSEC_DMESG
75395 + if (grsec_enable_dmesg &&
75396 + (!from_file || (from_file && type == SYSLOG_ACTION_OPEN)) &&
75397 + !capable(CAP_SYS_ADMIN))
75398 + return -EPERM;
75399 +#endif
75400 +
75401 + error = security_syslog(type, from_file);
75402 if (error)
75403 return error;
75404
75405 switch (type) {
75406 - case 0: /* Close log */
75407 + case SYSLOG_ACTION_CLOSE: /* Close log */
75408 break;
75409 - case 1: /* Open log */
75410 + case SYSLOG_ACTION_OPEN: /* Open log */
75411 break;
75412 - case 2: /* Read from log */
75413 + case SYSLOG_ACTION_READ: /* Read from log */
75414 error = -EINVAL;
75415 if (!buf || len < 0)
75416 goto out;
75417 @@ -318,10 +311,12 @@ int do_syslog(int type, char __user *buf, int len)
75418 if (!error)
75419 error = i;
75420 break;
75421 - case 4: /* Read/clear last kernel messages */
75422 + /* Read/clear last kernel messages */
75423 + case SYSLOG_ACTION_READ_CLEAR:
75424 do_clear = 1;
75425 /* FALL THRU */
75426 - case 3: /* Read last kernel messages */
75427 + /* Read last kernel messages */
75428 + case SYSLOG_ACTION_READ_ALL:
75429 error = -EINVAL;
75430 if (!buf || len < 0)
75431 goto out;
75432 @@ -374,21 +369,25 @@ int do_syslog(int type, char __user *buf, int len)
75433 }
75434 }
75435 break;
75436 - case 5: /* Clear ring buffer */
75437 + /* Clear ring buffer */
75438 + case SYSLOG_ACTION_CLEAR:
75439 logged_chars = 0;
75440 break;
75441 - case 6: /* Disable logging to console */
75442 + /* Disable logging to console */
75443 + case SYSLOG_ACTION_CONSOLE_OFF:
75444 if (saved_console_loglevel == -1)
75445 saved_console_loglevel = console_loglevel;
75446 console_loglevel = minimum_console_loglevel;
75447 break;
75448 - case 7: /* Enable logging to console */
75449 + /* Enable logging to console */
75450 + case SYSLOG_ACTION_CONSOLE_ON:
75451 if (saved_console_loglevel != -1) {
75452 console_loglevel = saved_console_loglevel;
75453 saved_console_loglevel = -1;
75454 }
75455 break;
75456 - case 8: /* Set level of messages printed to console */
75457 + /* Set level of messages printed to console */
75458 + case SYSLOG_ACTION_CONSOLE_LEVEL:
75459 error = -EINVAL;
75460 if (len < 1 || len > 8)
75461 goto out;
75462 @@ -399,10 +398,12 @@ int do_syslog(int type, char __user *buf, int len)
75463 saved_console_loglevel = -1;
75464 error = 0;
75465 break;
75466 - case 9: /* Number of chars in the log buffer */
75467 + /* Number of chars in the log buffer */
75468 + case SYSLOG_ACTION_SIZE_UNREAD:
75469 error = log_end - log_start;
75470 break;
75471 - case 10: /* Size of the log buffer */
75472 + /* Size of the log buffer */
75473 + case SYSLOG_ACTION_SIZE_BUFFER:
75474 error = log_buf_len;
75475 break;
75476 default:
75477 @@ -415,7 +416,7 @@ out:
75478
75479 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
75480 {
75481 - return do_syslog(type, buf, len);
75482 + return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
75483 }
75484
75485 /*
75486 diff --git a/kernel/profile.c b/kernel/profile.c
75487 index dfadc5b..7f59404 100644
75488 --- a/kernel/profile.c
75489 +++ b/kernel/profile.c
75490 @@ -39,7 +39,7 @@ struct profile_hit {
75491 /* Oprofile timer tick hook */
75492 static int (*timer_hook)(struct pt_regs *) __read_mostly;
75493
75494 -static atomic_t *prof_buffer;
75495 +static atomic_unchecked_t *prof_buffer;
75496 static unsigned long prof_len, prof_shift;
75497
75498 int prof_on __read_mostly;
75499 @@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
75500 hits[i].pc = 0;
75501 continue;
75502 }
75503 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
75504 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
75505 hits[i].hits = hits[i].pc = 0;
75506 }
75507 }
75508 @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
75509 * Add the current hit(s) and flush the write-queue out
75510 * to the global buffer:
75511 */
75512 - atomic_add(nr_hits, &prof_buffer[pc]);
75513 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
75514 for (i = 0; i < NR_PROFILE_HIT; ++i) {
75515 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
75516 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
75517 hits[i].pc = hits[i].hits = 0;
75518 }
75519 out:
75520 @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
75521 if (prof_on != type || !prof_buffer)
75522 return;
75523 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
75524 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
75525 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
75526 }
75527 #endif /* !CONFIG_SMP */
75528 EXPORT_SYMBOL_GPL(profile_hits);
75529 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
75530 return -EFAULT;
75531 buf++; p++; count--; read++;
75532 }
75533 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
75534 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
75535 if (copy_to_user(buf, (void *)pnt, count))
75536 return -EFAULT;
75537 read += count;
75538 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
75539 }
75540 #endif
75541 profile_discard_flip_buffers();
75542 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
75543 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
75544 return count;
75545 }
75546
75547 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
75548 index 05625f6..733bf70 100644
75549 --- a/kernel/ptrace.c
75550 +++ b/kernel/ptrace.c
75551 @@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_struct *child, int kill)
75552 return ret;
75553 }
75554
75555 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
75556 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
75557 + unsigned int log)
75558 {
75559 const struct cred *cred = current_cred(), *tcred;
75560
75561 @@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
75562 cred->gid != tcred->egid ||
75563 cred->gid != tcred->sgid ||
75564 cred->gid != tcred->gid) &&
75565 - !capable(CAP_SYS_PTRACE)) {
75566 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
75567 + (log && !capable(CAP_SYS_PTRACE)))
75568 + ) {
75569 rcu_read_unlock();
75570 return -EPERM;
75571 }
75572 @@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
75573 smp_rmb();
75574 if (task->mm)
75575 dumpable = get_dumpable(task->mm);
75576 - if (!dumpable && !capable(CAP_SYS_PTRACE))
75577 + if (!dumpable &&
75578 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
75579 + (log && !capable(CAP_SYS_PTRACE))))
75580 return -EPERM;
75581
75582 return security_ptrace_access_check(task, mode);
75583 @@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
75584 {
75585 int err;
75586 task_lock(task);
75587 - err = __ptrace_may_access(task, mode);
75588 + err = __ptrace_may_access(task, mode, 0);
75589 + task_unlock(task);
75590 + return !err;
75591 +}
75592 +
75593 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
75594 +{
75595 + int err;
75596 + task_lock(task);
75597 + err = __ptrace_may_access(task, mode, 1);
75598 task_unlock(task);
75599 return !err;
75600 }
75601 @@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *task)
75602 goto out;
75603
75604 task_lock(task);
75605 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
75606 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
75607 task_unlock(task);
75608 if (retval)
75609 goto unlock_creds;
75610 @@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *task)
75611 goto unlock_tasklist;
75612
75613 task->ptrace = PT_PTRACED;
75614 - if (capable(CAP_SYS_PTRACE))
75615 + if (capable_nolog(CAP_SYS_PTRACE))
75616 task->ptrace |= PT_PTRACE_CAP;
75617
75618 __ptrace_link(task, current);
75619 @@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
75620 {
75621 int copied = 0;
75622
75623 + pax_track_stack();
75624 +
75625 while (len > 0) {
75626 char buf[128];
75627 int this_len, retval;
75628 @@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
75629 {
75630 int copied = 0;
75631
75632 + pax_track_stack();
75633 +
75634 while (len > 0) {
75635 char buf[128];
75636 int this_len, retval;
75637 @@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *child, long request,
75638 int ret = -EIO;
75639 siginfo_t siginfo;
75640
75641 + pax_track_stack();
75642 +
75643 switch (request) {
75644 case PTRACE_PEEKTEXT:
75645 case PTRACE_PEEKDATA:
75646 @@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *child, long request,
75647 ret = ptrace_setoptions(child, data);
75648 break;
75649 case PTRACE_GETEVENTMSG:
75650 - ret = put_user(child->ptrace_message, (unsigned long __user *) data);
75651 + ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
75652 break;
75653
75654 case PTRACE_GETSIGINFO:
75655 ret = ptrace_getsiginfo(child, &siginfo);
75656 if (!ret)
75657 - ret = copy_siginfo_to_user((siginfo_t __user *) data,
75658 + ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
75659 &siginfo);
75660 break;
75661
75662 case PTRACE_SETSIGINFO:
75663 - if (copy_from_user(&siginfo, (siginfo_t __user *) data,
75664 + if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
75665 sizeof siginfo))
75666 ret = -EFAULT;
75667 else
75668 @@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
75669 goto out;
75670 }
75671
75672 + if (gr_handle_ptrace(child, request)) {
75673 + ret = -EPERM;
75674 + goto out_put_task_struct;
75675 + }
75676 +
75677 if (request == PTRACE_ATTACH) {
75678 ret = ptrace_attach(child);
75679 /*
75680 * Some architectures need to do book-keeping after
75681 * a ptrace attach.
75682 */
75683 - if (!ret)
75684 + if (!ret) {
75685 arch_ptrace_attach(child);
75686 + gr_audit_ptrace(child);
75687 + }
75688 goto out_put_task_struct;
75689 }
75690
75691 @@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
75692 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
75693 if (copied != sizeof(tmp))
75694 return -EIO;
75695 - return put_user(tmp, (unsigned long __user *)data);
75696 + return put_user(tmp, (__force unsigned long __user *)data);
75697 }
75698
75699 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
75700 @@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
75701 siginfo_t siginfo;
75702 int ret;
75703
75704 + pax_track_stack();
75705 +
75706 switch (request) {
75707 case PTRACE_PEEKTEXT:
75708 case PTRACE_PEEKDATA:
75709 @@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
75710 goto out;
75711 }
75712
75713 + if (gr_handle_ptrace(child, request)) {
75714 + ret = -EPERM;
75715 + goto out_put_task_struct;
75716 + }
75717 +
75718 if (request == PTRACE_ATTACH) {
75719 ret = ptrace_attach(child);
75720 /*
75721 * Some architectures need to do book-keeping after
75722 * a ptrace attach.
75723 */
75724 - if (!ret)
75725 + if (!ret) {
75726 arch_ptrace_attach(child);
75727 + gr_audit_ptrace(child);
75728 + }
75729 goto out_put_task_struct;
75730 }
75731
75732 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
75733 index 697c0a0..2402696 100644
75734 --- a/kernel/rcutorture.c
75735 +++ b/kernel/rcutorture.c
75736 @@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
75737 { 0 };
75738 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
75739 { 0 };
75740 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
75741 -static atomic_t n_rcu_torture_alloc;
75742 -static atomic_t n_rcu_torture_alloc_fail;
75743 -static atomic_t n_rcu_torture_free;
75744 -static atomic_t n_rcu_torture_mberror;
75745 -static atomic_t n_rcu_torture_error;
75746 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
75747 +static atomic_unchecked_t n_rcu_torture_alloc;
75748 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
75749 +static atomic_unchecked_t n_rcu_torture_free;
75750 +static atomic_unchecked_t n_rcu_torture_mberror;
75751 +static atomic_unchecked_t n_rcu_torture_error;
75752 static long n_rcu_torture_timers;
75753 static struct list_head rcu_torture_removed;
75754 static cpumask_var_t shuffle_tmp_mask;
75755 @@ -187,11 +187,11 @@ rcu_torture_alloc(void)
75756
75757 spin_lock_bh(&rcu_torture_lock);
75758 if (list_empty(&rcu_torture_freelist)) {
75759 - atomic_inc(&n_rcu_torture_alloc_fail);
75760 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
75761 spin_unlock_bh(&rcu_torture_lock);
75762 return NULL;
75763 }
75764 - atomic_inc(&n_rcu_torture_alloc);
75765 + atomic_inc_unchecked(&n_rcu_torture_alloc);
75766 p = rcu_torture_freelist.next;
75767 list_del_init(p);
75768 spin_unlock_bh(&rcu_torture_lock);
75769 @@ -204,7 +204,7 @@ rcu_torture_alloc(void)
75770 static void
75771 rcu_torture_free(struct rcu_torture *p)
75772 {
75773 - atomic_inc(&n_rcu_torture_free);
75774 + atomic_inc_unchecked(&n_rcu_torture_free);
75775 spin_lock_bh(&rcu_torture_lock);
75776 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
75777 spin_unlock_bh(&rcu_torture_lock);
75778 @@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
75779 i = rp->rtort_pipe_count;
75780 if (i > RCU_TORTURE_PIPE_LEN)
75781 i = RCU_TORTURE_PIPE_LEN;
75782 - atomic_inc(&rcu_torture_wcount[i]);
75783 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
75784 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
75785 rp->rtort_mbtest = 0;
75786 rcu_torture_free(rp);
75787 @@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
75788 i = rp->rtort_pipe_count;
75789 if (i > RCU_TORTURE_PIPE_LEN)
75790 i = RCU_TORTURE_PIPE_LEN;
75791 - atomic_inc(&rcu_torture_wcount[i]);
75792 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
75793 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
75794 rp->rtort_mbtest = 0;
75795 list_del(&rp->rtort_free);
75796 @@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
75797 i = old_rp->rtort_pipe_count;
75798 if (i > RCU_TORTURE_PIPE_LEN)
75799 i = RCU_TORTURE_PIPE_LEN;
75800 - atomic_inc(&rcu_torture_wcount[i]);
75801 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
75802 old_rp->rtort_pipe_count++;
75803 cur_ops->deferred_free(old_rp);
75804 }
75805 @@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned long unused)
75806 return;
75807 }
75808 if (p->rtort_mbtest == 0)
75809 - atomic_inc(&n_rcu_torture_mberror);
75810 + atomic_inc_unchecked(&n_rcu_torture_mberror);
75811 spin_lock(&rand_lock);
75812 cur_ops->read_delay(&rand);
75813 n_rcu_torture_timers++;
75814 @@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
75815 continue;
75816 }
75817 if (p->rtort_mbtest == 0)
75818 - atomic_inc(&n_rcu_torture_mberror);
75819 + atomic_inc_unchecked(&n_rcu_torture_mberror);
75820 cur_ops->read_delay(&rand);
75821 preempt_disable();
75822 pipe_count = p->rtort_pipe_count;
75823 @@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
75824 rcu_torture_current,
75825 rcu_torture_current_version,
75826 list_empty(&rcu_torture_freelist),
75827 - atomic_read(&n_rcu_torture_alloc),
75828 - atomic_read(&n_rcu_torture_alloc_fail),
75829 - atomic_read(&n_rcu_torture_free),
75830 - atomic_read(&n_rcu_torture_mberror),
75831 + atomic_read_unchecked(&n_rcu_torture_alloc),
75832 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
75833 + atomic_read_unchecked(&n_rcu_torture_free),
75834 + atomic_read_unchecked(&n_rcu_torture_mberror),
75835 n_rcu_torture_timers);
75836 - if (atomic_read(&n_rcu_torture_mberror) != 0)
75837 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
75838 cnt += sprintf(&page[cnt], " !!!");
75839 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
75840 if (i > 1) {
75841 cnt += sprintf(&page[cnt], "!!! ");
75842 - atomic_inc(&n_rcu_torture_error);
75843 + atomic_inc_unchecked(&n_rcu_torture_error);
75844 WARN_ON_ONCE(1);
75845 }
75846 cnt += sprintf(&page[cnt], "Reader Pipe: ");
75847 @@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
75848 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
75849 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
75850 cnt += sprintf(&page[cnt], " %d",
75851 - atomic_read(&rcu_torture_wcount[i]));
75852 + atomic_read_unchecked(&rcu_torture_wcount[i]));
75853 }
75854 cnt += sprintf(&page[cnt], "\n");
75855 if (cur_ops->stats)
75856 @@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
75857
75858 if (cur_ops->cleanup)
75859 cur_ops->cleanup();
75860 - if (atomic_read(&n_rcu_torture_error))
75861 + if (atomic_read_unchecked(&n_rcu_torture_error))
75862 rcu_torture_print_module_parms("End of test: FAILURE");
75863 else
75864 rcu_torture_print_module_parms("End of test: SUCCESS");
75865 @@ -1138,13 +1138,13 @@ rcu_torture_init(void)
75866
75867 rcu_torture_current = NULL;
75868 rcu_torture_current_version = 0;
75869 - atomic_set(&n_rcu_torture_alloc, 0);
75870 - atomic_set(&n_rcu_torture_alloc_fail, 0);
75871 - atomic_set(&n_rcu_torture_free, 0);
75872 - atomic_set(&n_rcu_torture_mberror, 0);
75873 - atomic_set(&n_rcu_torture_error, 0);
75874 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
75875 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
75876 + atomic_set_unchecked(&n_rcu_torture_free, 0);
75877 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
75878 + atomic_set_unchecked(&n_rcu_torture_error, 0);
75879 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
75880 - atomic_set(&rcu_torture_wcount[i], 0);
75881 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
75882 for_each_possible_cpu(cpu) {
75883 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
75884 per_cpu(rcu_torture_count, cpu)[i] = 0;
75885 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
75886 index 683c4f3..97f54c6 100644
75887 --- a/kernel/rcutree.c
75888 +++ b/kernel/rcutree.c
75889 @@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
75890 /*
75891 * Do softirq processing for the current CPU.
75892 */
75893 -static void rcu_process_callbacks(struct softirq_action *unused)
75894 +static void rcu_process_callbacks(void)
75895 {
75896 /*
75897 * Memory references from any prior RCU read-side critical sections
75898 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
75899 index c03edf7..ac1b341 100644
75900 --- a/kernel/rcutree_plugin.h
75901 +++ b/kernel/rcutree_plugin.h
75902 @@ -145,7 +145,7 @@ static void rcu_preempt_note_context_switch(int cpu)
75903 */
75904 void __rcu_read_lock(void)
75905 {
75906 - ACCESS_ONCE(current->rcu_read_lock_nesting)++;
75907 + ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
75908 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
75909 }
75910 EXPORT_SYMBOL_GPL(__rcu_read_lock);
75911 @@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
75912 struct task_struct *t = current;
75913
75914 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
75915 - if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
75916 + if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
75917 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
75918 rcu_read_unlock_special(t);
75919 }
75920 diff --git a/kernel/relay.c b/kernel/relay.c
75921 index bf343f5..908e9ee 100644
75922 --- a/kernel/relay.c
75923 +++ b/kernel/relay.c
75924 @@ -1228,7 +1228,7 @@ static int subbuf_splice_actor(struct file *in,
75925 unsigned int flags,
75926 int *nonpad_ret)
75927 {
75928 - unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
75929 + unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
75930 struct rchan_buf *rbuf = in->private_data;
75931 unsigned int subbuf_size = rbuf->chan->subbuf_size;
75932 uint64_t pos = (uint64_t) *ppos;
75933 @@ -1247,6 +1247,9 @@ static int subbuf_splice_actor(struct file *in,
75934 .ops = &relay_pipe_buf_ops,
75935 .spd_release = relay_page_release,
75936 };
75937 + ssize_t ret;
75938 +
75939 + pax_track_stack();
75940
75941 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
75942 return 0;
75943 diff --git a/kernel/resource.c b/kernel/resource.c
75944 index fb11a58..4e61ae1 100644
75945 --- a/kernel/resource.c
75946 +++ b/kernel/resource.c
75947 @@ -132,8 +132,18 @@ static const struct file_operations proc_iomem_operations = {
75948
75949 static int __init ioresources_init(void)
75950 {
75951 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
75952 +#ifdef CONFIG_GRKERNSEC_PROC_USER
75953 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
75954 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
75955 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
75956 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
75957 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
75958 +#endif
75959 +#else
75960 proc_create("ioports", 0, NULL, &proc_ioports_operations);
75961 proc_create("iomem", 0, NULL, &proc_iomem_operations);
75962 +#endif
75963 return 0;
75964 }
75965 __initcall(ioresources_init);
75966 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
75967 index a56f629..1fc4989 100644
75968 --- a/kernel/rtmutex-tester.c
75969 +++ b/kernel/rtmutex-tester.c
75970 @@ -21,7 +21,7 @@
75971 #define MAX_RT_TEST_MUTEXES 8
75972
75973 static spinlock_t rttest_lock;
75974 -static atomic_t rttest_event;
75975 +static atomic_unchecked_t rttest_event;
75976
75977 struct test_thread_data {
75978 int opcode;
75979 @@ -64,7 +64,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75980
75981 case RTTEST_LOCKCONT:
75982 td->mutexes[td->opdata] = 1;
75983 - td->event = atomic_add_return(1, &rttest_event);
75984 + td->event = atomic_add_return_unchecked(1, &rttest_event);
75985 return 0;
75986
75987 case RTTEST_RESET:
75988 @@ -82,7 +82,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75989 return 0;
75990
75991 case RTTEST_RESETEVENT:
75992 - atomic_set(&rttest_event, 0);
75993 + atomic_set_unchecked(&rttest_event, 0);
75994 return 0;
75995
75996 default:
75997 @@ -99,9 +99,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75998 return ret;
75999
76000 td->mutexes[id] = 1;
76001 - td->event = atomic_add_return(1, &rttest_event);
76002 + td->event = atomic_add_return_unchecked(1, &rttest_event);
76003 rt_mutex_lock(&mutexes[id]);
76004 - td->event = atomic_add_return(1, &rttest_event);
76005 + td->event = atomic_add_return_unchecked(1, &rttest_event);
76006 td->mutexes[id] = 4;
76007 return 0;
76008
76009 @@ -112,9 +112,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
76010 return ret;
76011
76012 td->mutexes[id] = 1;
76013 - td->event = atomic_add_return(1, &rttest_event);
76014 + td->event = atomic_add_return_unchecked(1, &rttest_event);
76015 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
76016 - td->event = atomic_add_return(1, &rttest_event);
76017 + td->event = atomic_add_return_unchecked(1, &rttest_event);
76018 td->mutexes[id] = ret ? 0 : 4;
76019 return ret ? -EINTR : 0;
76020
76021 @@ -123,9 +123,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
76022 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
76023 return ret;
76024
76025 - td->event = atomic_add_return(1, &rttest_event);
76026 + td->event = atomic_add_return_unchecked(1, &rttest_event);
76027 rt_mutex_unlock(&mutexes[id]);
76028 - td->event = atomic_add_return(1, &rttest_event);
76029 + td->event = atomic_add_return_unchecked(1, &rttest_event);
76030 td->mutexes[id] = 0;
76031 return 0;
76032
76033 @@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
76034 break;
76035
76036 td->mutexes[dat] = 2;
76037 - td->event = atomic_add_return(1, &rttest_event);
76038 + td->event = atomic_add_return_unchecked(1, &rttest_event);
76039 break;
76040
76041 case RTTEST_LOCKBKL:
76042 @@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
76043 return;
76044
76045 td->mutexes[dat] = 3;
76046 - td->event = atomic_add_return(1, &rttest_event);
76047 + td->event = atomic_add_return_unchecked(1, &rttest_event);
76048 break;
76049
76050 case RTTEST_LOCKNOWAIT:
76051 @@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
76052 return;
76053
76054 td->mutexes[dat] = 1;
76055 - td->event = atomic_add_return(1, &rttest_event);
76056 + td->event = atomic_add_return_unchecked(1, &rttest_event);
76057 return;
76058
76059 case RTTEST_LOCKBKL:
76060 diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
76061 index 29bd4ba..8c5de90 100644
76062 --- a/kernel/rtmutex.c
76063 +++ b/kernel/rtmutex.c
76064 @@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
76065 */
76066 spin_lock_irqsave(&pendowner->pi_lock, flags);
76067
76068 - WARN_ON(!pendowner->pi_blocked_on);
76069 + BUG_ON(!pendowner->pi_blocked_on);
76070 WARN_ON(pendowner->pi_blocked_on != waiter);
76071 WARN_ON(pendowner->pi_blocked_on->lock != lock);
76072
76073 diff --git a/kernel/sched.c b/kernel/sched.c
76074 index 0591df8..e3af3a4 100644
76075 --- a/kernel/sched.c
76076 +++ b/kernel/sched.c
76077 @@ -5043,7 +5043,7 @@ out:
76078 * In CONFIG_NO_HZ case, the idle load balance owner will do the
76079 * rebalancing for all the cpus for whom scheduler ticks are stopped.
76080 */
76081 -static void run_rebalance_domains(struct softirq_action *h)
76082 +static void run_rebalance_domains(void)
76083 {
76084 int this_cpu = smp_processor_id();
76085 struct rq *this_rq = cpu_rq(this_cpu);
76086 @@ -5690,6 +5690,19 @@ pick_next_task(struct rq *rq)
76087 }
76088 }
76089
76090 +#ifdef CONFIG_GRKERNSEC_SETXID
76091 +extern void gr_delayed_cred_worker(void);
76092 +static inline void gr_cred_schedule(void)
76093 +{
76094 + if (unlikely(current->delayed_cred))
76095 + gr_delayed_cred_worker();
76096 +}
76097 +#else
76098 +static inline void gr_cred_schedule(void)
76099 +{
76100 +}
76101 +#endif
76102 +
76103 /*
76104 * schedule() is the main scheduler function.
76105 */
76106 @@ -5700,6 +5713,8 @@ asmlinkage void __sched schedule(void)
76107 struct rq *rq;
76108 int cpu;
76109
76110 + pax_track_stack();
76111 +
76112 need_resched:
76113 preempt_disable();
76114 cpu = smp_processor_id();
76115 @@ -5713,6 +5728,8 @@ need_resched_nonpreemptible:
76116
76117 schedule_debug(prev);
76118
76119 + gr_cred_schedule();
76120 +
76121 if (sched_feat(HRTICK))
76122 hrtick_clear(rq);
76123
76124 @@ -5770,7 +5787,7 @@ EXPORT_SYMBOL(schedule);
76125 * Look out! "owner" is an entirely speculative pointer
76126 * access and not reliable.
76127 */
76128 -int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
76129 +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
76130 {
76131 unsigned int cpu;
76132 struct rq *rq;
76133 @@ -5784,10 +5801,10 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
76134 * DEBUG_PAGEALLOC could have unmapped it if
76135 * the mutex owner just released it and exited.
76136 */
76137 - if (probe_kernel_address(&owner->cpu, cpu))
76138 + if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
76139 return 0;
76140 #else
76141 - cpu = owner->cpu;
76142 + cpu = task_thread_info(owner)->cpu;
76143 #endif
76144
76145 /*
76146 @@ -5816,7 +5833,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
76147 /*
76148 * Is that owner really running on that cpu?
76149 */
76150 - if (task_thread_info(rq->curr) != owner || need_resched())
76151 + if (rq->curr != owner || need_resched())
76152 return 0;
76153
76154 cpu_relax();
76155 @@ -6359,6 +6376,8 @@ int can_nice(const struct task_struct *p, const int nice)
76156 /* convert nice value [19,-20] to rlimit style value [1,40] */
76157 int nice_rlim = 20 - nice;
76158
76159 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
76160 +
76161 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
76162 capable(CAP_SYS_NICE));
76163 }
76164 @@ -6392,7 +6411,8 @@ SYSCALL_DEFINE1(nice, int, increment)
76165 if (nice > 19)
76166 nice = 19;
76167
76168 - if (increment < 0 && !can_nice(current, nice))
76169 + if (increment < 0 && (!can_nice(current, nice) ||
76170 + gr_handle_chroot_nice()))
76171 return -EPERM;
76172
76173 retval = security_task_setnice(current, nice);
76174 @@ -8774,7 +8794,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
76175 long power;
76176 int weight;
76177
76178 - WARN_ON(!sd || !sd->groups);
76179 + BUG_ON(!sd || !sd->groups);
76180
76181 if (cpu != group_first_cpu(sd->groups))
76182 return;
76183 diff --git a/kernel/signal.c b/kernel/signal.c
76184 index 2494827..cda80a0 100644
76185 --- a/kernel/signal.c
76186 +++ b/kernel/signal.c
76187 @@ -41,12 +41,12 @@
76188
76189 static struct kmem_cache *sigqueue_cachep;
76190
76191 -static void __user *sig_handler(struct task_struct *t, int sig)
76192 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
76193 {
76194 return t->sighand->action[sig - 1].sa.sa_handler;
76195 }
76196
76197 -static int sig_handler_ignored(void __user *handler, int sig)
76198 +static int sig_handler_ignored(__sighandler_t handler, int sig)
76199 {
76200 /* Is it explicitly or implicitly ignored? */
76201 return handler == SIG_IGN ||
76202 @@ -56,7 +56,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
76203 static int sig_task_ignored(struct task_struct *t, int sig,
76204 int from_ancestor_ns)
76205 {
76206 - void __user *handler;
76207 + __sighandler_t handler;
76208
76209 handler = sig_handler(t, sig);
76210
76211 @@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
76212 */
76213 user = get_uid(__task_cred(t)->user);
76214 atomic_inc(&user->sigpending);
76215 +
76216 + if (!override_rlimit)
76217 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
76218 if (override_rlimit ||
76219 atomic_read(&user->sigpending) <=
76220 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
76221 @@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
76222
76223 int unhandled_signal(struct task_struct *tsk, int sig)
76224 {
76225 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
76226 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
76227 if (is_global_init(tsk))
76228 return 1;
76229 if (handler != SIG_IGN && handler != SIG_DFL)
76230 @@ -627,6 +630,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
76231 }
76232 }
76233
76234 + /* allow glibc communication via tgkill to other threads in our
76235 + thread group */
76236 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
76237 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
76238 + && gr_handle_signal(t, sig))
76239 + return -EPERM;
76240 +
76241 return security_task_kill(t, info, sig, 0);
76242 }
76243
76244 @@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
76245 return send_signal(sig, info, p, 1);
76246 }
76247
76248 -static int
76249 +int
76250 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
76251 {
76252 return send_signal(sig, info, t, 0);
76253 @@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
76254 unsigned long int flags;
76255 int ret, blocked, ignored;
76256 struct k_sigaction *action;
76257 + int is_unhandled = 0;
76258
76259 spin_lock_irqsave(&t->sighand->siglock, flags);
76260 action = &t->sighand->action[sig-1];
76261 @@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
76262 }
76263 if (action->sa.sa_handler == SIG_DFL)
76264 t->signal->flags &= ~SIGNAL_UNKILLABLE;
76265 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
76266 + is_unhandled = 1;
76267 ret = specific_send_sig_info(sig, info, t);
76268 spin_unlock_irqrestore(&t->sighand->siglock, flags);
76269
76270 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
76271 + normal operation */
76272 + if (is_unhandled) {
76273 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
76274 + gr_handle_crash(t, sig);
76275 + }
76276 +
76277 return ret;
76278 }
76279
76280 @@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
76281 {
76282 int ret = check_kill_permission(sig, info, p);
76283
76284 - if (!ret && sig)
76285 + if (!ret && sig) {
76286 ret = do_send_sig_info(sig, info, p, true);
76287 + if (!ret)
76288 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
76289 + }
76290
76291 return ret;
76292 }
76293 @@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
76294 {
76295 siginfo_t info;
76296
76297 + pax_track_stack();
76298 +
76299 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
76300
76301 memset(&info, 0, sizeof info);
76302 @@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
76303 int error = -ESRCH;
76304
76305 rcu_read_lock();
76306 - p = find_task_by_vpid(pid);
76307 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
76308 + /* allow glibc communication via tgkill to other threads in our
76309 + thread group */
76310 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
76311 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
76312 + p = find_task_by_vpid_unrestricted(pid);
76313 + else
76314 +#endif
76315 + p = find_task_by_vpid(pid);
76316 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
76317 error = check_kill_permission(sig, info, p);
76318 /*
76319 diff --git a/kernel/smp.c b/kernel/smp.c
76320 index aa9cff3..631a0de 100644
76321 --- a/kernel/smp.c
76322 +++ b/kernel/smp.c
76323 @@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void *), void *info, int wait)
76324 }
76325 EXPORT_SYMBOL(smp_call_function);
76326
76327 -void ipi_call_lock(void)
76328 +void ipi_call_lock(void) __acquires(call_function.lock)
76329 {
76330 spin_lock(&call_function.lock);
76331 }
76332
76333 -void ipi_call_unlock(void)
76334 +void ipi_call_unlock(void) __releases(call_function.lock)
76335 {
76336 spin_unlock(&call_function.lock);
76337 }
76338
76339 -void ipi_call_lock_irq(void)
76340 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
76341 {
76342 spin_lock_irq(&call_function.lock);
76343 }
76344
76345 -void ipi_call_unlock_irq(void)
76346 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
76347 {
76348 spin_unlock_irq(&call_function.lock);
76349 }
76350 diff --git a/kernel/softirq.c b/kernel/softirq.c
76351 index 04a0252..580c512 100644
76352 --- a/kernel/softirq.c
76353 +++ b/kernel/softirq.c
76354 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
76355
76356 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
76357
76358 -char *softirq_to_name[NR_SOFTIRQS] = {
76359 +const char * const softirq_to_name[NR_SOFTIRQS] = {
76360 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
76361 "TASKLET", "SCHED", "HRTIMER", "RCU"
76362 };
76363 @@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
76364
76365 asmlinkage void __do_softirq(void)
76366 {
76367 - struct softirq_action *h;
76368 + const struct softirq_action *h;
76369 __u32 pending;
76370 int max_restart = MAX_SOFTIRQ_RESTART;
76371 int cpu;
76372 @@ -233,7 +233,7 @@ restart:
76373 kstat_incr_softirqs_this_cpu(h - softirq_vec);
76374
76375 trace_softirq_entry(h, softirq_vec);
76376 - h->action(h);
76377 + h->action();
76378 trace_softirq_exit(h, softirq_vec);
76379 if (unlikely(prev_count != preempt_count())) {
76380 printk(KERN_ERR "huh, entered softirq %td %s %p"
76381 @@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
76382 local_irq_restore(flags);
76383 }
76384
76385 -void open_softirq(int nr, void (*action)(struct softirq_action *))
76386 +void open_softirq(int nr, void (*action)(void))
76387 {
76388 - softirq_vec[nr].action = action;
76389 + pax_open_kernel();
76390 + *(void **)&softirq_vec[nr].action = action;
76391 + pax_close_kernel();
76392 }
76393
76394 /*
76395 @@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
76396
76397 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
76398
76399 -static void tasklet_action(struct softirq_action *a)
76400 +static void tasklet_action(void)
76401 {
76402 struct tasklet_struct *list;
76403
76404 @@ -454,7 +456,7 @@ static void tasklet_action(struct softirq_action *a)
76405 }
76406 }
76407
76408 -static void tasklet_hi_action(struct softirq_action *a)
76409 +static void tasklet_hi_action(void)
76410 {
76411 struct tasklet_struct *list;
76412
76413 diff --git a/kernel/sys.c b/kernel/sys.c
76414 index e9512b1..f07185f 100644
76415 --- a/kernel/sys.c
76416 +++ b/kernel/sys.c
76417 @@ -133,6 +133,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
76418 error = -EACCES;
76419 goto out;
76420 }
76421 +
76422 + if (gr_handle_chroot_setpriority(p, niceval)) {
76423 + error = -EACCES;
76424 + goto out;
76425 + }
76426 +
76427 no_nice = security_task_setnice(p, niceval);
76428 if (no_nice) {
76429 error = no_nice;
76430 @@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
76431 !(user = find_user(who)))
76432 goto out_unlock; /* No processes for this user */
76433
76434 - do_each_thread(g, p)
76435 + do_each_thread(g, p) {
76436 if (__task_cred(p)->uid == who)
76437 error = set_one_prio(p, niceval, error);
76438 - while_each_thread(g, p);
76439 + } while_each_thread(g, p);
76440 if (who != cred->uid)
76441 free_uid(user); /* For find_user() */
76442 break;
76443 @@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
76444 !(user = find_user(who)))
76445 goto out_unlock; /* No processes for this user */
76446
76447 - do_each_thread(g, p)
76448 + do_each_thread(g, p) {
76449 if (__task_cred(p)->uid == who) {
76450 niceval = 20 - task_nice(p);
76451 if (niceval > retval)
76452 retval = niceval;
76453 }
76454 - while_each_thread(g, p);
76455 + } while_each_thread(g, p);
76456 if (who != cred->uid)
76457 free_uid(user); /* for find_user() */
76458 break;
76459 @@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
76460 goto error;
76461 }
76462
76463 + if (gr_check_group_change(new->gid, new->egid, -1))
76464 + goto error;
76465 +
76466 if (rgid != (gid_t) -1 ||
76467 (egid != (gid_t) -1 && egid != old->gid))
76468 new->sgid = new->egid;
76469 @@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
76470 goto error;
76471
76472 retval = -EPERM;
76473 +
76474 + if (gr_check_group_change(gid, gid, gid))
76475 + goto error;
76476 +
76477 if (capable(CAP_SETGID))
76478 new->gid = new->egid = new->sgid = new->fsgid = gid;
76479 else if (gid == old->gid || gid == old->sgid)
76480 @@ -559,7 +572,7 @@ error:
76481 /*
76482 * change the user struct in a credentials set to match the new UID
76483 */
76484 -static int set_user(struct cred *new)
76485 +int set_user(struct cred *new)
76486 {
76487 struct user_struct *new_user;
76488
76489 @@ -567,12 +580,19 @@ static int set_user(struct cred *new)
76490 if (!new_user)
76491 return -EAGAIN;
76492
76493 + /*
76494 + * We don't fail in case of NPROC limit excess here because too many
76495 + * poorly written programs don't check set*uid() return code, assuming
76496 + * it never fails if called by root. We may still enforce NPROC limit
76497 + * for programs doing set*uid()+execve() by harmlessly deferring the
76498 + * failure to the execve() stage.
76499 + */
76500 if (atomic_read(&new_user->processes) >=
76501 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
76502 - new_user != INIT_USER) {
76503 - free_uid(new_user);
76504 - return -EAGAIN;
76505 - }
76506 + new_user != INIT_USER)
76507 + current->flags |= PF_NPROC_EXCEEDED;
76508 + else
76509 + current->flags &= ~PF_NPROC_EXCEEDED;
76510
76511 free_uid(new->user);
76512 new->user = new_user;
76513 @@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
76514 goto error;
76515 }
76516
76517 + if (gr_check_user_change(new->uid, new->euid, -1))
76518 + goto error;
76519 +
76520 if (new->uid != old->uid) {
76521 retval = set_user(new);
76522 if (retval < 0)
76523 @@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
76524 goto error;
76525
76526 retval = -EPERM;
76527 +
76528 + if (gr_check_crash_uid(uid))
76529 + goto error;
76530 + if (gr_check_user_change(uid, uid, uid))
76531 + goto error;
76532 +
76533 if (capable(CAP_SETUID)) {
76534 new->suid = new->uid = uid;
76535 if (uid != old->uid) {
76536 @@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
76537 goto error;
76538 }
76539
76540 + if (gr_check_user_change(ruid, euid, -1))
76541 + goto error;
76542 +
76543 if (ruid != (uid_t) -1) {
76544 new->uid = ruid;
76545 if (ruid != old->uid) {
76546 @@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
76547 goto error;
76548 }
76549
76550 + if (gr_check_group_change(rgid, egid, -1))
76551 + goto error;
76552 +
76553 if (rgid != (gid_t) -1)
76554 new->gid = rgid;
76555 if (egid != (gid_t) -1)
76556 @@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
76557 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
76558 goto error;
76559
76560 + if (gr_check_user_change(-1, -1, uid))
76561 + goto error;
76562 +
76563 if (uid == old->uid || uid == old->euid ||
76564 uid == old->suid || uid == old->fsuid ||
76565 capable(CAP_SETUID)) {
76566 @@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
76567 if (gid == old->gid || gid == old->egid ||
76568 gid == old->sgid || gid == old->fsgid ||
76569 capable(CAP_SETGID)) {
76570 + if (gr_check_group_change(-1, -1, gid))
76571 + goto error;
76572 +
76573 if (gid != old_fsgid) {
76574 new->fsgid = gid;
76575 goto change_okay;
76576 @@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
76577 error = get_dumpable(me->mm);
76578 break;
76579 case PR_SET_DUMPABLE:
76580 - if (arg2 < 0 || arg2 > 1) {
76581 + if (arg2 > 1) {
76582 error = -EINVAL;
76583 break;
76584 }
76585 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
76586 index b8bd058..ab6a76be 100644
76587 --- a/kernel/sysctl.c
76588 +++ b/kernel/sysctl.c
76589 @@ -63,6 +63,13 @@
76590 static int deprecated_sysctl_warning(struct __sysctl_args *args);
76591
76592 #if defined(CONFIG_SYSCTL)
76593 +#include <linux/grsecurity.h>
76594 +#include <linux/grinternal.h>
76595 +
76596 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
76597 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
76598 + const int op);
76599 +extern int gr_handle_chroot_sysctl(const int op);
76600
76601 /* External variables not in a header file. */
76602 extern int C_A_D;
76603 @@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_table *table, int write,
76604 static int proc_taint(struct ctl_table *table, int write,
76605 void __user *buffer, size_t *lenp, loff_t *ppos);
76606 #endif
76607 +extern ctl_table grsecurity_table[];
76608
76609 static struct ctl_table root_table[];
76610 static struct ctl_table_root sysctl_table_root;
76611 @@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
76612 int sysctl_legacy_va_layout;
76613 #endif
76614
76615 +#ifdef CONFIG_PAX_SOFTMODE
76616 +static ctl_table pax_table[] = {
76617 + {
76618 + .ctl_name = CTL_UNNUMBERED,
76619 + .procname = "softmode",
76620 + .data = &pax_softmode,
76621 + .maxlen = sizeof(unsigned int),
76622 + .mode = 0600,
76623 + .proc_handler = &proc_dointvec,
76624 + },
76625 +
76626 + { .ctl_name = 0 }
76627 +};
76628 +#endif
76629 +
76630 extern int prove_locking;
76631 extern int lock_stat;
76632
76633 @@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
76634 #endif
76635
76636 static struct ctl_table kern_table[] = {
76637 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
76638 + {
76639 + .ctl_name = CTL_UNNUMBERED,
76640 + .procname = "grsecurity",
76641 + .mode = 0500,
76642 + .child = grsecurity_table,
76643 + },
76644 +#endif
76645 +
76646 +#ifdef CONFIG_PAX_SOFTMODE
76647 + {
76648 + .ctl_name = CTL_UNNUMBERED,
76649 + .procname = "pax",
76650 + .mode = 0500,
76651 + .child = pax_table,
76652 + },
76653 +#endif
76654 +
76655 {
76656 .ctl_name = CTL_UNNUMBERED,
76657 .procname = "sched_child_runs_first",
76658 @@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
76659 .data = &modprobe_path,
76660 .maxlen = KMOD_PATH_LEN,
76661 .mode = 0644,
76662 - .proc_handler = &proc_dostring,
76663 - .strategy = &sysctl_string,
76664 + .proc_handler = &proc_dostring_modpriv,
76665 + .strategy = &sysctl_string_modpriv,
76666 },
76667 {
76668 .ctl_name = CTL_UNNUMBERED,
76669 @@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
76670 .mode = 0644,
76671 .proc_handler = &proc_dointvec
76672 },
76673 + {
76674 + .procname = "heap_stack_gap",
76675 + .data = &sysctl_heap_stack_gap,
76676 + .maxlen = sizeof(sysctl_heap_stack_gap),
76677 + .mode = 0644,
76678 + .proc_handler = proc_doulongvec_minmax,
76679 + },
76680 #else
76681 {
76682 .ctl_name = CTL_UNNUMBERED,
76683 @@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
76684 return 0;
76685 }
76686
76687 +static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
76688 +
76689 static int parse_table(int __user *name, int nlen,
76690 void __user *oldval, size_t __user *oldlenp,
76691 void __user *newval, size_t newlen,
76692 @@ -1821,7 +1871,7 @@ repeat:
76693 if (n == table->ctl_name) {
76694 int error;
76695 if (table->child) {
76696 - if (sysctl_perm(root, table, MAY_EXEC))
76697 + if (sysctl_perm_nochk(root, table, MAY_EXEC))
76698 return -EPERM;
76699 name++;
76700 nlen--;
76701 @@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
76702 int error;
76703 int mode;
76704
76705 + if (table->parent != NULL && table->parent->procname != NULL &&
76706 + table->procname != NULL &&
76707 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
76708 + return -EACCES;
76709 + if (gr_handle_chroot_sysctl(op))
76710 + return -EACCES;
76711 + error = gr_handle_sysctl(table, op);
76712 + if (error)
76713 + return error;
76714 +
76715 + error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
76716 + if (error)
76717 + return error;
76718 +
76719 + if (root->permissions)
76720 + mode = root->permissions(root, current->nsproxy, table);
76721 + else
76722 + mode = table->mode;
76723 +
76724 + return test_perm(mode, op);
76725 +}
76726 +
76727 +int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
76728 +{
76729 + int error;
76730 + int mode;
76731 +
76732 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
76733 if (error)
76734 return error;
76735 @@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *table, int write,
76736 buffer, lenp, ppos);
76737 }
76738
76739 +int proc_dostring_modpriv(struct ctl_table *table, int write,
76740 + void __user *buffer, size_t *lenp, loff_t *ppos)
76741 +{
76742 + if (write && !capable(CAP_SYS_MODULE))
76743 + return -EPERM;
76744 +
76745 + return _proc_do_string(table->data, table->maxlen, write,
76746 + buffer, lenp, ppos);
76747 +}
76748 +
76749
76750 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
76751 int *valp,
76752 @@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
76753 vleft = table->maxlen / sizeof(unsigned long);
76754 left = *lenp;
76755
76756 - for (; left && vleft--; i++, min++, max++, first=0) {
76757 + for (; left && vleft--; i++, first=0) {
76758 if (write) {
76759 while (left) {
76760 char c;
76761 @@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *table, int write,
76762 return -ENOSYS;
76763 }
76764
76765 +int proc_dostring_modpriv(struct ctl_table *table, int write,
76766 + void __user *buffer, size_t *lenp, loff_t *ppos)
76767 +{
76768 + return -ENOSYS;
76769 +}
76770 +
76771 int proc_dointvec(struct ctl_table *table, int write,
76772 void __user *buffer, size_t *lenp, loff_t *ppos)
76773 {
76774 @@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *table,
76775 return 1;
76776 }
76777
76778 +int sysctl_string_modpriv(struct ctl_table *table,
76779 + void __user *oldval, size_t __user *oldlenp,
76780 + void __user *newval, size_t newlen)
76781 +{
76782 + if (newval && newlen && !capable(CAP_SYS_MODULE))
76783 + return -EPERM;
76784 +
76785 + return sysctl_string(table, oldval, oldlenp, newval, newlen);
76786 +}
76787 +
76788 /*
76789 * This function makes sure that all of the integers in the vector
76790 * are between the minimum and maximum values given in the arrays
76791 @@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *table,
76792 return -ENOSYS;
76793 }
76794
76795 +int sysctl_string_modpriv(struct ctl_table *table,
76796 + void __user *oldval, size_t __user *oldlenp,
76797 + void __user *newval, size_t newlen)
76798 +{
76799 + return -ENOSYS;
76800 +}
76801 +
76802 int sysctl_intvec(struct ctl_table *table,
76803 void __user *oldval, size_t __user *oldlenp,
76804 void __user *newval, size_t newlen)
76805 @@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
76806 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
76807 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
76808 EXPORT_SYMBOL(proc_dostring);
76809 +EXPORT_SYMBOL(proc_dostring_modpriv);
76810 EXPORT_SYMBOL(proc_doulongvec_minmax);
76811 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
76812 EXPORT_SYMBOL(register_sysctl_table);
76813 @@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
76814 EXPORT_SYMBOL(sysctl_jiffies);
76815 EXPORT_SYMBOL(sysctl_ms_jiffies);
76816 EXPORT_SYMBOL(sysctl_string);
76817 +EXPORT_SYMBOL(sysctl_string_modpriv);
76818 EXPORT_SYMBOL(sysctl_data);
76819 EXPORT_SYMBOL(unregister_sysctl_table);
76820 diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
76821 index 469193c..ea3ecb2 100644
76822 --- a/kernel/sysctl_check.c
76823 +++ b/kernel/sysctl_check.c
76824 @@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
76825 } else {
76826 if ((table->strategy == sysctl_data) ||
76827 (table->strategy == sysctl_string) ||
76828 + (table->strategy == sysctl_string_modpriv) ||
76829 (table->strategy == sysctl_intvec) ||
76830 (table->strategy == sysctl_jiffies) ||
76831 (table->strategy == sysctl_ms_jiffies) ||
76832 (table->proc_handler == proc_dostring) ||
76833 + (table->proc_handler == proc_dostring_modpriv) ||
76834 (table->proc_handler == proc_dointvec) ||
76835 (table->proc_handler == proc_dointvec_minmax) ||
76836 (table->proc_handler == proc_dointvec_jiffies) ||
76837 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
76838 index a4ef542..798bcd7 100644
76839 --- a/kernel/taskstats.c
76840 +++ b/kernel/taskstats.c
76841 @@ -26,9 +26,12 @@
76842 #include <linux/cgroup.h>
76843 #include <linux/fs.h>
76844 #include <linux/file.h>
76845 +#include <linux/grsecurity.h>
76846 #include <net/genetlink.h>
76847 #include <asm/atomic.h>
76848
76849 +extern int gr_is_taskstats_denied(int pid);
76850 +
76851 /*
76852 * Maximum length of a cpumask that can be specified in
76853 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
76854 @@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
76855 size_t size;
76856 cpumask_var_t mask;
76857
76858 + if (gr_is_taskstats_denied(current->pid))
76859 + return -EACCES;
76860 +
76861 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
76862 return -ENOMEM;
76863
76864 diff --git a/kernel/time.c b/kernel/time.c
76865 index 33df60e..ca768bd 100644
76866 --- a/kernel/time.c
76867 +++ b/kernel/time.c
76868 @@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
76869 return error;
76870
76871 if (tz) {
76872 + /* we log in do_settimeofday called below, so don't log twice
76873 + */
76874 + if (!tv)
76875 + gr_log_timechange();
76876 +
76877 /* SMP safe, global irq locking makes it work. */
76878 sys_tz = *tz;
76879 update_vsyscall_tz();
76880 @@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
76881 * Avoid unnecessary multiplications/divisions in the
76882 * two most common HZ cases:
76883 */
76884 -unsigned int inline jiffies_to_msecs(const unsigned long j)
76885 +inline unsigned int jiffies_to_msecs(const unsigned long j)
76886 {
76887 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
76888 return (MSEC_PER_SEC / HZ) * j;
76889 @@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(const unsigned long j)
76890 }
76891 EXPORT_SYMBOL(jiffies_to_msecs);
76892
76893 -unsigned int inline jiffies_to_usecs(const unsigned long j)
76894 +inline unsigned int jiffies_to_usecs(const unsigned long j)
76895 {
76896 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
76897 return (USEC_PER_SEC / HZ) * j;
76898 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
76899 index 57b953f..06f149f 100644
76900 --- a/kernel/time/tick-broadcast.c
76901 +++ b/kernel/time/tick-broadcast.c
76902 @@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
76903 * then clear the broadcast bit.
76904 */
76905 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
76906 - int cpu = smp_processor_id();
76907 + cpu = smp_processor_id();
76908
76909 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
76910 tick_broadcast_clear_oneshot(cpu);
76911 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
76912 index 4a71cff..ffb5548 100644
76913 --- a/kernel/time/timekeeping.c
76914 +++ b/kernel/time/timekeeping.c
76915 @@ -14,6 +14,7 @@
76916 #include <linux/init.h>
76917 #include <linux/mm.h>
76918 #include <linux/sched.h>
76919 +#include <linux/grsecurity.h>
76920 #include <linux/sysdev.h>
76921 #include <linux/clocksource.h>
76922 #include <linux/jiffies.h>
76923 @@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
76924 */
76925 struct timespec ts = xtime;
76926 timespec_add_ns(&ts, nsec);
76927 - ACCESS_ONCE(xtime_cache) = ts;
76928 + ACCESS_ONCE_RW(xtime_cache) = ts;
76929 }
76930
76931 /* must hold xtime_lock */
76932 @@ -337,6 +338,8 @@ int do_settimeofday(struct timespec *tv)
76933 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
76934 return -EINVAL;
76935
76936 + gr_log_timechange();
76937 +
76938 write_seqlock_irqsave(&xtime_lock, flags);
76939
76940 timekeeping_forward_now();
76941 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
76942 index 54c0dda..e9095d9 100644
76943 --- a/kernel/time/timer_list.c
76944 +++ b/kernel/time/timer_list.c
76945 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
76946
76947 static void print_name_offset(struct seq_file *m, void *sym)
76948 {
76949 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76950 + SEQ_printf(m, "<%p>", NULL);
76951 +#else
76952 char symname[KSYM_NAME_LEN];
76953
76954 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
76955 SEQ_printf(m, "<%p>", sym);
76956 else
76957 SEQ_printf(m, "%s", symname);
76958 +#endif
76959 }
76960
76961 static void
76962 @@ -112,7 +116,11 @@ next_one:
76963 static void
76964 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
76965 {
76966 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76967 + SEQ_printf(m, " .base: %p\n", NULL);
76968 +#else
76969 SEQ_printf(m, " .base: %p\n", base);
76970 +#endif
76971 SEQ_printf(m, " .index: %d\n",
76972 base->index);
76973 SEQ_printf(m, " .resolution: %Lu nsecs\n",
76974 @@ -289,7 +297,11 @@ static int __init init_timer_list_procfs(void)
76975 {
76976 struct proc_dir_entry *pe;
76977
76978 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
76979 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
76980 +#else
76981 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
76982 +#endif
76983 if (!pe)
76984 return -ENOMEM;
76985 return 0;
76986 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
76987 index ee5681f..634089b 100644
76988 --- a/kernel/time/timer_stats.c
76989 +++ b/kernel/time/timer_stats.c
76990 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
76991 static unsigned long nr_entries;
76992 static struct entry entries[MAX_ENTRIES];
76993
76994 -static atomic_t overflow_count;
76995 +static atomic_unchecked_t overflow_count;
76996
76997 /*
76998 * The entries are in a hash-table, for fast lookup:
76999 @@ -140,7 +140,7 @@ static void reset_entries(void)
77000 nr_entries = 0;
77001 memset(entries, 0, sizeof(entries));
77002 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
77003 - atomic_set(&overflow_count, 0);
77004 + atomic_set_unchecked(&overflow_count, 0);
77005 }
77006
77007 static struct entry *alloc_entry(void)
77008 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
77009 if (likely(entry))
77010 entry->count++;
77011 else
77012 - atomic_inc(&overflow_count);
77013 + atomic_inc_unchecked(&overflow_count);
77014
77015 out_unlock:
77016 spin_unlock_irqrestore(lock, flags);
77017 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
77018
77019 static void print_name_offset(struct seq_file *m, unsigned long addr)
77020 {
77021 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77022 + seq_printf(m, "<%p>", NULL);
77023 +#else
77024 char symname[KSYM_NAME_LEN];
77025
77026 if (lookup_symbol_name(addr, symname) < 0)
77027 seq_printf(m, "<%p>", (void *)addr);
77028 else
77029 seq_printf(m, "%s", symname);
77030 +#endif
77031 }
77032
77033 static int tstats_show(struct seq_file *m, void *v)
77034 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
77035
77036 seq_puts(m, "Timer Stats Version: v0.2\n");
77037 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
77038 - if (atomic_read(&overflow_count))
77039 + if (atomic_read_unchecked(&overflow_count))
77040 seq_printf(m, "Overflow: %d entries\n",
77041 - atomic_read(&overflow_count));
77042 + atomic_read_unchecked(&overflow_count));
77043
77044 for (i = 0; i < nr_entries; i++) {
77045 entry = entries + i;
77046 @@ -415,7 +419,11 @@ static int __init init_tstats_procfs(void)
77047 {
77048 struct proc_dir_entry *pe;
77049
77050 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
77051 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
77052 +#else
77053 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
77054 +#endif
77055 if (!pe)
77056 return -ENOMEM;
77057 return 0;
77058 diff --git a/kernel/timer.c b/kernel/timer.c
77059 index cb3c1f1..8bf5526 100644
77060 --- a/kernel/timer.c
77061 +++ b/kernel/timer.c
77062 @@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
77063 /*
77064 * This function runs timers and the timer-tq in bottom half context.
77065 */
77066 -static void run_timer_softirq(struct softirq_action *h)
77067 +static void run_timer_softirq(void)
77068 {
77069 struct tvec_base *base = __get_cpu_var(tvec_bases);
77070
77071 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
77072 index d9d6206..f19467e 100644
77073 --- a/kernel/trace/blktrace.c
77074 +++ b/kernel/trace/blktrace.c
77075 @@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
77076 struct blk_trace *bt = filp->private_data;
77077 char buf[16];
77078
77079 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
77080 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
77081
77082 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
77083 }
77084 @@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
77085 return 1;
77086
77087 bt = buf->chan->private_data;
77088 - atomic_inc(&bt->dropped);
77089 + atomic_inc_unchecked(&bt->dropped);
77090 return 0;
77091 }
77092
77093 @@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
77094
77095 bt->dir = dir;
77096 bt->dev = dev;
77097 - atomic_set(&bt->dropped, 0);
77098 + atomic_set_unchecked(&bt->dropped, 0);
77099
77100 ret = -EIO;
77101 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
77102 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
77103 index 4872937..c794d40 100644
77104 --- a/kernel/trace/ftrace.c
77105 +++ b/kernel/trace/ftrace.c
77106 @@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
77107
77108 ip = rec->ip;
77109
77110 + ret = ftrace_arch_code_modify_prepare();
77111 + FTRACE_WARN_ON(ret);
77112 + if (ret)
77113 + return 0;
77114 +
77115 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
77116 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
77117 if (ret) {
77118 ftrace_bug(ret, ip);
77119 rec->flags |= FTRACE_FL_FAILED;
77120 - return 0;
77121 }
77122 - return 1;
77123 + return ret ? 0 : 1;
77124 }
77125
77126 /*
77127 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
77128 index e749a05..19c6e94 100644
77129 --- a/kernel/trace/ring_buffer.c
77130 +++ b/kernel/trace/ring_buffer.c
77131 @@ -606,7 +606,7 @@ static struct list_head *rb_list_head(struct list_head *list)
77132 * the reader page). But if the next page is a header page,
77133 * its flags will be non zero.
77134 */
77135 -static int inline
77136 +static inline int
77137 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
77138 struct buffer_page *page, struct list_head *list)
77139 {
77140 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
77141 index a2a2d1f..7f32b09 100644
77142 --- a/kernel/trace/trace.c
77143 +++ b/kernel/trace/trace.c
77144 @@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
77145 size_t rem;
77146 unsigned int i;
77147
77148 + pax_track_stack();
77149 +
77150 /* copy the tracer to avoid using a global lock all around */
77151 mutex_lock(&trace_types_lock);
77152 if (unlikely(old_tracer != current_trace && current_trace)) {
77153 @@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
77154 int entries, size, i;
77155 size_t ret;
77156
77157 + pax_track_stack();
77158 +
77159 if (*ppos & (PAGE_SIZE - 1)) {
77160 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
77161 return -EINVAL;
77162 @@ -3816,10 +3820,9 @@ static const struct file_operations tracing_dyn_info_fops = {
77163 };
77164 #endif
77165
77166 -static struct dentry *d_tracer;
77167 -
77168 struct dentry *tracing_init_dentry(void)
77169 {
77170 + static struct dentry *d_tracer;
77171 static int once;
77172
77173 if (d_tracer)
77174 @@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
77175 return d_tracer;
77176 }
77177
77178 -static struct dentry *d_percpu;
77179 -
77180 struct dentry *tracing_dentry_percpu(void)
77181 {
77182 + static struct dentry *d_percpu;
77183 static int once;
77184 struct dentry *d_tracer;
77185
77186 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
77187 index d128f65..f37b4af 100644
77188 --- a/kernel/trace/trace_events.c
77189 +++ b/kernel/trace/trace_events.c
77190 @@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list);
77191 * Modules must own their file_operations to keep up with
77192 * reference counting.
77193 */
77194 +
77195 struct ftrace_module_file_ops {
77196 struct list_head list;
77197 struct module *mod;
77198 - struct file_operations id;
77199 - struct file_operations enable;
77200 - struct file_operations format;
77201 - struct file_operations filter;
77202 };
77203
77204 static void remove_subsystem_dir(const char *name)
77205 @@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod)
77206
77207 file_ops->mod = mod;
77208
77209 - file_ops->id = ftrace_event_id_fops;
77210 - file_ops->id.owner = mod;
77211 -
77212 - file_ops->enable = ftrace_enable_fops;
77213 - file_ops->enable.owner = mod;
77214 -
77215 - file_ops->filter = ftrace_event_filter_fops;
77216 - file_ops->filter.owner = mod;
77217 -
77218 - file_ops->format = ftrace_event_format_fops;
77219 - file_ops->format.owner = mod;
77220 + pax_open_kernel();
77221 + *(void **)&mod->trace_id.owner = mod;
77222 + *(void **)&mod->trace_enable.owner = mod;
77223 + *(void **)&mod->trace_filter.owner = mod;
77224 + *(void **)&mod->trace_format.owner = mod;
77225 + pax_close_kernel();
77226
77227 list_add(&file_ops->list, &ftrace_module_file_list);
77228
77229 @@ -1063,8 +1055,8 @@ static void trace_module_add_events(struct module *mod)
77230 call->mod = mod;
77231 list_add(&call->list, &ftrace_events);
77232 event_create_dir(call, d_events,
77233 - &file_ops->id, &file_ops->enable,
77234 - &file_ops->filter, &file_ops->format);
77235 + &mod->trace_id, &mod->trace_enable,
77236 + &mod->trace_filter, &mod->trace_format);
77237 }
77238 }
77239
77240 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
77241 index 0acd834..b800b56 100644
77242 --- a/kernel/trace/trace_mmiotrace.c
77243 +++ b/kernel/trace/trace_mmiotrace.c
77244 @@ -23,7 +23,7 @@ struct header_iter {
77245 static struct trace_array *mmio_trace_array;
77246 static bool overrun_detected;
77247 static unsigned long prev_overruns;
77248 -static atomic_t dropped_count;
77249 +static atomic_unchecked_t dropped_count;
77250
77251 static void mmio_reset_data(struct trace_array *tr)
77252 {
77253 @@ -126,7 +126,7 @@ static void mmio_close(struct trace_iterator *iter)
77254
77255 static unsigned long count_overruns(struct trace_iterator *iter)
77256 {
77257 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
77258 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
77259 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
77260
77261 if (over > prev_overruns)
77262 @@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
77263 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
77264 sizeof(*entry), 0, pc);
77265 if (!event) {
77266 - atomic_inc(&dropped_count);
77267 + atomic_inc_unchecked(&dropped_count);
77268 return;
77269 }
77270 entry = ring_buffer_event_data(event);
77271 @@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
77272 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
77273 sizeof(*entry), 0, pc);
77274 if (!event) {
77275 - atomic_inc(&dropped_count);
77276 + atomic_inc_unchecked(&dropped_count);
77277 return;
77278 }
77279 entry = ring_buffer_event_data(event);
77280 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
77281 index b6c12c6..41fdc53 100644
77282 --- a/kernel/trace/trace_output.c
77283 +++ b/kernel/trace/trace_output.c
77284 @@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
77285 return 0;
77286 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
77287 if (!IS_ERR(p)) {
77288 - p = mangle_path(s->buffer + s->len, p, "\n");
77289 + p = mangle_path(s->buffer + s->len, p, "\n\\");
77290 if (p) {
77291 s->len = p - s->buffer;
77292 return 1;
77293 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
77294 index 8504ac7..ecf0adb 100644
77295 --- a/kernel/trace/trace_stack.c
77296 +++ b/kernel/trace/trace_stack.c
77297 @@ -50,7 +50,7 @@ static inline void check_stack(void)
77298 return;
77299
77300 /* we do not handle interrupt stacks yet */
77301 - if (!object_is_on_stack(&this_size))
77302 + if (!object_starts_on_stack(&this_size))
77303 return;
77304
77305 local_irq_save(flags);
77306 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
77307 index 40cafb0..d5ead43 100644
77308 --- a/kernel/trace/trace_workqueue.c
77309 +++ b/kernel/trace/trace_workqueue.c
77310 @@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
77311 int cpu;
77312 pid_t pid;
77313 /* Can be inserted from interrupt or user context, need to be atomic */
77314 - atomic_t inserted;
77315 + atomic_unchecked_t inserted;
77316 /*
77317 * Don't need to be atomic, works are serialized in a single workqueue thread
77318 * on a single CPU.
77319 @@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
77320 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
77321 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
77322 if (node->pid == wq_thread->pid) {
77323 - atomic_inc(&node->inserted);
77324 + atomic_inc_unchecked(&node->inserted);
77325 goto found;
77326 }
77327 }
77328 @@ -205,7 +205,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
77329 tsk = get_pid_task(pid, PIDTYPE_PID);
77330 if (tsk) {
77331 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
77332 - atomic_read(&cws->inserted), cws->executed,
77333 + atomic_read_unchecked(&cws->inserted), cws->executed,
77334 tsk->comm);
77335 put_task_struct(tsk);
77336 }
77337 diff --git a/kernel/user.c b/kernel/user.c
77338 index 1b91701..8795237 100644
77339 --- a/kernel/user.c
77340 +++ b/kernel/user.c
77341 @@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
77342 spin_lock_irq(&uidhash_lock);
77343 up = uid_hash_find(uid, hashent);
77344 if (up) {
77345 + put_user_ns(ns);
77346 key_put(new->uid_keyring);
77347 key_put(new->session_keyring);
77348 kmem_cache_free(uid_cachep, new);
77349 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
77350 index 234ceb1..ad74049 100644
77351 --- a/lib/Kconfig.debug
77352 +++ b/lib/Kconfig.debug
77353 @@ -905,7 +905,7 @@ config LATENCYTOP
77354 select STACKTRACE
77355 select SCHEDSTATS
77356 select SCHED_DEBUG
77357 - depends on HAVE_LATENCYTOP_SUPPORT
77358 + depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
77359 help
77360 Enable this option if you want to use the LatencyTOP tool
77361 to find out which userspace is blocking on what kernel operations.
77362 diff --git a/lib/bitmap.c b/lib/bitmap.c
77363 index 7025658..8d14cab 100644
77364 --- a/lib/bitmap.c
77365 +++ b/lib/bitmap.c
77366 @@ -341,7 +341,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
77367 {
77368 int c, old_c, totaldigits, ndigits, nchunks, nbits;
77369 u32 chunk;
77370 - const char __user *ubuf = buf;
77371 + const char __user *ubuf = (const char __force_user *)buf;
77372
77373 bitmap_zero(maskp, nmaskbits);
77374
77375 @@ -426,7 +426,7 @@ int bitmap_parse_user(const char __user *ubuf,
77376 {
77377 if (!access_ok(VERIFY_READ, ubuf, ulen))
77378 return -EFAULT;
77379 - return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
77380 + return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
77381 }
77382 EXPORT_SYMBOL(bitmap_parse_user);
77383
77384 diff --git a/lib/bug.c b/lib/bug.c
77385 index 300e41a..2779eb0 100644
77386 --- a/lib/bug.c
77387 +++ b/lib/bug.c
77388 @@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
77389 return BUG_TRAP_TYPE_NONE;
77390
77391 bug = find_bug(bugaddr);
77392 + if (!bug)
77393 + return BUG_TRAP_TYPE_NONE;
77394
77395 printk(KERN_EMERG "------------[ cut here ]------------\n");
77396
77397 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
77398 index 2b413db..e21d207 100644
77399 --- a/lib/debugobjects.c
77400 +++ b/lib/debugobjects.c
77401 @@ -277,7 +277,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
77402 if (limit > 4)
77403 return;
77404
77405 - is_on_stack = object_is_on_stack(addr);
77406 + is_on_stack = object_starts_on_stack(addr);
77407 if (is_on_stack == onstack)
77408 return;
77409
77410 diff --git a/lib/devres.c b/lib/devres.c
77411 index 72c8909..7543868 100644
77412 --- a/lib/devres.c
77413 +++ b/lib/devres.c
77414 @@ -80,7 +80,7 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
77415 {
77416 iounmap(addr);
77417 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
77418 - (void *)addr));
77419 + (void __force *)addr));
77420 }
77421 EXPORT_SYMBOL(devm_iounmap);
77422
77423 @@ -140,7 +140,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
77424 {
77425 ioport_unmap(addr);
77426 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
77427 - devm_ioport_map_match, (void *)addr));
77428 + devm_ioport_map_match, (void __force *)addr));
77429 }
77430 EXPORT_SYMBOL(devm_ioport_unmap);
77431
77432 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
77433 index 084e879..0674448 100644
77434 --- a/lib/dma-debug.c
77435 +++ b/lib/dma-debug.c
77436 @@ -861,7 +861,7 @@ out:
77437
77438 static void check_for_stack(struct device *dev, void *addr)
77439 {
77440 - if (object_is_on_stack(addr))
77441 + if (object_starts_on_stack(addr))
77442 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
77443 "stack [addr=%p]\n", addr);
77444 }
77445 diff --git a/lib/idr.c b/lib/idr.c
77446 index eda7ba3..915dfae 100644
77447 --- a/lib/idr.c
77448 +++ b/lib/idr.c
77449 @@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
77450 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
77451
77452 /* if already at the top layer, we need to grow */
77453 - if (id >= 1 << (idp->layers * IDR_BITS)) {
77454 + if (id >= (1 << (idp->layers * IDR_BITS))) {
77455 *starting_id = id;
77456 return IDR_NEED_TO_GROW;
77457 }
77458 diff --git a/lib/inflate.c b/lib/inflate.c
77459 index d102559..4215f31 100644
77460 --- a/lib/inflate.c
77461 +++ b/lib/inflate.c
77462 @@ -266,7 +266,7 @@ static void free(void *where)
77463 malloc_ptr = free_mem_ptr;
77464 }
77465 #else
77466 -#define malloc(a) kmalloc(a, GFP_KERNEL)
77467 +#define malloc(a) kmalloc((a), GFP_KERNEL)
77468 #define free(a) kfree(a)
77469 #endif
77470
77471 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
77472 index bd2bea9..6b3c95e 100644
77473 --- a/lib/is_single_threaded.c
77474 +++ b/lib/is_single_threaded.c
77475 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
77476 struct task_struct *p, *t;
77477 bool ret;
77478
77479 + if (!mm)
77480 + return true;
77481 +
77482 if (atomic_read(&task->signal->live) != 1)
77483 return false;
77484
77485 diff --git a/lib/kobject.c b/lib/kobject.c
77486 index b512b74..8115eb1 100644
77487 --- a/lib/kobject.c
77488 +++ b/lib/kobject.c
77489 @@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
77490 return ret;
77491 }
77492
77493 -struct sysfs_ops kobj_sysfs_ops = {
77494 +const struct sysfs_ops kobj_sysfs_ops = {
77495 .show = kobj_attr_show,
77496 .store = kobj_attr_store,
77497 };
77498 @@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
77499 * If the kset was not able to be created, NULL will be returned.
77500 */
77501 static struct kset *kset_create(const char *name,
77502 - struct kset_uevent_ops *uevent_ops,
77503 + const struct kset_uevent_ops *uevent_ops,
77504 struct kobject *parent_kobj)
77505 {
77506 struct kset *kset;
77507 @@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name,
77508 * If the kset was not able to be created, NULL will be returned.
77509 */
77510 struct kset *kset_create_and_add(const char *name,
77511 - struct kset_uevent_ops *uevent_ops,
77512 + const struct kset_uevent_ops *uevent_ops,
77513 struct kobject *parent_kobj)
77514 {
77515 struct kset *kset;
77516 diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
77517 index 507b821..0bf8ed0 100644
77518 --- a/lib/kobject_uevent.c
77519 +++ b/lib/kobject_uevent.c
77520 @@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
77521 const char *subsystem;
77522 struct kobject *top_kobj;
77523 struct kset *kset;
77524 - struct kset_uevent_ops *uevent_ops;
77525 + const struct kset_uevent_ops *uevent_ops;
77526 u64 seq;
77527 int i = 0;
77528 int retval = 0;
77529 diff --git a/lib/kref.c b/lib/kref.c
77530 index 9ecd6e8..12c94c1 100644
77531 --- a/lib/kref.c
77532 +++ b/lib/kref.c
77533 @@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
77534 */
77535 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
77536 {
77537 - WARN_ON(release == NULL);
77538 + BUG_ON(release == NULL);
77539 WARN_ON(release == (void (*)(struct kref *))kfree);
77540
77541 if (atomic_dec_and_test(&kref->refcount)) {
77542 diff --git a/lib/parser.c b/lib/parser.c
77543 index b00d020..1b34325 100644
77544 --- a/lib/parser.c
77545 +++ b/lib/parser.c
77546 @@ -126,7 +126,7 @@ static int match_number(substring_t *s, int *result, int base)
77547 char *buf;
77548 int ret;
77549
77550 - buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
77551 + buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
77552 if (!buf)
77553 return -ENOMEM;
77554 memcpy(buf, s->from, s->to - s->from);
77555 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
77556 index 92cdd99..a8149d7 100644
77557 --- a/lib/radix-tree.c
77558 +++ b/lib/radix-tree.c
77559 @@ -81,7 +81,7 @@ struct radix_tree_preload {
77560 int nr;
77561 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
77562 };
77563 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
77564 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
77565
77566 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
77567 {
77568 diff --git a/lib/random32.c b/lib/random32.c
77569 index 217d5c4..45aba8a 100644
77570 --- a/lib/random32.c
77571 +++ b/lib/random32.c
77572 @@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *state)
77573 */
77574 static inline u32 __seed(u32 x, u32 m)
77575 {
77576 - return (x < m) ? x + m : x;
77577 + return (x <= m) ? x + m + 1 : x;
77578 }
77579
77580 /**
77581 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
77582 index 33bed5e..1477e46 100644
77583 --- a/lib/vsprintf.c
77584 +++ b/lib/vsprintf.c
77585 @@ -16,6 +16,9 @@
77586 * - scnprintf and vscnprintf
77587 */
77588
77589 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77590 +#define __INCLUDED_BY_HIDESYM 1
77591 +#endif
77592 #include <stdarg.h>
77593 #include <linux/module.h>
77594 #include <linux/types.h>
77595 @@ -546,12 +549,12 @@ static char *number(char *buf, char *end, unsigned long long num,
77596 return buf;
77597 }
77598
77599 -static char *string(char *buf, char *end, char *s, struct printf_spec spec)
77600 +static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
77601 {
77602 int len, i;
77603
77604 if ((unsigned long)s < PAGE_SIZE)
77605 - s = "<NULL>";
77606 + s = "(null)";
77607
77608 len = strnlen(s, spec.precision);
77609
77610 @@ -581,7 +584,7 @@ static char *symbol_string(char *buf, char *end, void *ptr,
77611 unsigned long value = (unsigned long) ptr;
77612 #ifdef CONFIG_KALLSYMS
77613 char sym[KSYM_SYMBOL_LEN];
77614 - if (ext != 'f' && ext != 's')
77615 + if (ext != 'f' && ext != 's' && ext != 'a')
77616 sprint_symbol(sym, value);
77617 else
77618 kallsyms_lookup(value, NULL, NULL, NULL, sym);
77619 @@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
77620 * - 'f' For simple symbolic function names without offset
77621 * - 'S' For symbolic direct pointers with offset
77622 * - 's' For symbolic direct pointers without offset
77623 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
77624 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
77625 * - 'R' For a struct resource pointer, it prints the range of
77626 * addresses (not the name nor the flags)
77627 * - 'M' For a 6-byte MAC address, it prints the address in the
77628 @@ -822,7 +827,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
77629 struct printf_spec spec)
77630 {
77631 if (!ptr)
77632 - return string(buf, end, "(null)", spec);
77633 + return string(buf, end, "(nil)", spec);
77634
77635 switch (*fmt) {
77636 case 'F':
77637 @@ -831,6 +836,14 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
77638 case 's':
77639 /* Fallthrough */
77640 case 'S':
77641 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77642 + break;
77643 +#else
77644 + return symbol_string(buf, end, ptr, spec, *fmt);
77645 +#endif
77646 + case 'a':
77647 + /* Fallthrough */
77648 + case 'A':
77649 return symbol_string(buf, end, ptr, spec, *fmt);
77650 case 'R':
77651 return resource_string(buf, end, ptr, spec);
77652 @@ -1445,7 +1458,7 @@ do { \
77653 size_t len;
77654 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
77655 || (unsigned long)save_str < PAGE_SIZE)
77656 - save_str = "<NULL>";
77657 + save_str = "(null)";
77658 len = strlen(save_str);
77659 if (str + len + 1 < end)
77660 memcpy(str, save_str, len + 1);
77661 @@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
77662 typeof(type) value; \
77663 if (sizeof(type) == 8) { \
77664 args = PTR_ALIGN(args, sizeof(u32)); \
77665 - *(u32 *)&value = *(u32 *)args; \
77666 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
77667 + *(u32 *)&value = *(const u32 *)args; \
77668 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
77669 } else { \
77670 args = PTR_ALIGN(args, sizeof(type)); \
77671 - value = *(typeof(type) *)args; \
77672 + value = *(const typeof(type) *)args; \
77673 } \
77674 args += sizeof(type); \
77675 value; \
77676 @@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
77677 const char *str_arg = args;
77678 size_t len = strlen(str_arg);
77679 args += len + 1;
77680 - str = string(str, end, (char *)str_arg, spec);
77681 + str = string(str, end, str_arg, spec);
77682 break;
77683 }
77684
77685 diff --git a/localversion-grsec b/localversion-grsec
77686 new file mode 100644
77687 index 0000000..7cd6065
77688 --- /dev/null
77689 +++ b/localversion-grsec
77690 @@ -0,0 +1 @@
77691 +-grsec
77692 diff --git a/mm/Kconfig b/mm/Kconfig
77693 index 2c19c0b..f3c3f83 100644
77694 --- a/mm/Kconfig
77695 +++ b/mm/Kconfig
77696 @@ -228,7 +228,7 @@ config KSM
77697 config DEFAULT_MMAP_MIN_ADDR
77698 int "Low address space to protect from user allocation"
77699 depends on MMU
77700 - default 4096
77701 + default 65536
77702 help
77703 This is the portion of low virtual memory which should be protected
77704 from userspace allocation. Keeping a user from writing to low pages
77705 diff --git a/mm/backing-dev.c b/mm/backing-dev.c
77706 index 67a33a5..094dcf1 100644
77707 --- a/mm/backing-dev.c
77708 +++ b/mm/backing-dev.c
77709 @@ -272,7 +272,7 @@ static void bdi_task_init(struct backing_dev_info *bdi,
77710 list_add_tail_rcu(&wb->list, &bdi->wb_list);
77711 spin_unlock(&bdi->wb_lock);
77712
77713 - tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
77714 + tsk->flags |= PF_SWAPWRITE;
77715 set_freezable();
77716
77717 /*
77718 @@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rcu_head *head)
77719 * Add the default flusher task that gets created for any bdi
77720 * that has dirty data pending writeout
77721 */
77722 -void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
77723 +static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
77724 {
77725 if (!bdi_cap_writeback_dirty(bdi))
77726 return;
77727 diff --git a/mm/filemap.c b/mm/filemap.c
77728 index a1fe378..e26702f 100644
77729 --- a/mm/filemap.c
77730 +++ b/mm/filemap.c
77731 @@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
77732 struct address_space *mapping = file->f_mapping;
77733
77734 if (!mapping->a_ops->readpage)
77735 - return -ENOEXEC;
77736 + return -ENODEV;
77737 file_accessed(file);
77738 vma->vm_ops = &generic_file_vm_ops;
77739 vma->vm_flags |= VM_CAN_NONLINEAR;
77740 @@ -2024,6 +2024,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
77741 *pos = i_size_read(inode);
77742
77743 if (limit != RLIM_INFINITY) {
77744 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
77745 if (*pos >= limit) {
77746 send_sig(SIGXFSZ, current, 0);
77747 return -EFBIG;
77748 diff --git a/mm/fremap.c b/mm/fremap.c
77749 index b6ec85a..a24ac22 100644
77750 --- a/mm/fremap.c
77751 +++ b/mm/fremap.c
77752 @@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
77753 retry:
77754 vma = find_vma(mm, start);
77755
77756 +#ifdef CONFIG_PAX_SEGMEXEC
77757 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
77758 + goto out;
77759 +#endif
77760 +
77761 /*
77762 * Make sure the vma is shared, that it supports prefaulting,
77763 * and that the remapped range is valid and fully within
77764 @@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
77765 /*
77766 * drop PG_Mlocked flag for over-mapped range
77767 */
77768 - unsigned int saved_flags = vma->vm_flags;
77769 + unsigned long saved_flags = vma->vm_flags;
77770 munlock_vma_pages_range(vma, start, start + size);
77771 vma->vm_flags = saved_flags;
77772 }
77773 diff --git a/mm/highmem.c b/mm/highmem.c
77774 index 9c1e627..5ca9447 100644
77775 --- a/mm/highmem.c
77776 +++ b/mm/highmem.c
77777 @@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
77778 * So no dangers, even with speculative execution.
77779 */
77780 page = pte_page(pkmap_page_table[i]);
77781 + pax_open_kernel();
77782 pte_clear(&init_mm, (unsigned long)page_address(page),
77783 &pkmap_page_table[i]);
77784 -
77785 + pax_close_kernel();
77786 set_page_address(page, NULL);
77787 need_flush = 1;
77788 }
77789 @@ -177,9 +178,11 @@ start:
77790 }
77791 }
77792 vaddr = PKMAP_ADDR(last_pkmap_nr);
77793 +
77794 + pax_open_kernel();
77795 set_pte_at(&init_mm, vaddr,
77796 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
77797 -
77798 + pax_close_kernel();
77799 pkmap_count[last_pkmap_nr] = 1;
77800 set_page_address(page, (void *)vaddr);
77801
77802 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
77803 index 5e1e508..ac70275 100644
77804 --- a/mm/hugetlb.c
77805 +++ b/mm/hugetlb.c
77806 @@ -869,6 +869,7 @@ free:
77807 list_del(&page->lru);
77808 enqueue_huge_page(h, page);
77809 }
77810 + spin_unlock(&hugetlb_lock);
77811
77812 /* Free unnecessary surplus pages to the buddy allocator */
77813 if (!list_empty(&surplus_list)) {
77814 @@ -1933,6 +1934,26 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
77815 return 1;
77816 }
77817
77818 +#ifdef CONFIG_PAX_SEGMEXEC
77819 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
77820 +{
77821 + struct mm_struct *mm = vma->vm_mm;
77822 + struct vm_area_struct *vma_m;
77823 + unsigned long address_m;
77824 + pte_t *ptep_m;
77825 +
77826 + vma_m = pax_find_mirror_vma(vma);
77827 + if (!vma_m)
77828 + return;
77829 +
77830 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77831 + address_m = address + SEGMEXEC_TASK_SIZE;
77832 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
77833 + get_page(page_m);
77834 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
77835 +}
77836 +#endif
77837 +
77838 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
77839 unsigned long address, pte_t *ptep, pte_t pte,
77840 struct page *pagecache_page)
77841 @@ -2004,6 +2025,11 @@ retry_avoidcopy:
77842 huge_ptep_clear_flush(vma, address, ptep);
77843 set_huge_pte_at(mm, address, ptep,
77844 make_huge_pte(vma, new_page, 1));
77845 +
77846 +#ifdef CONFIG_PAX_SEGMEXEC
77847 + pax_mirror_huge_pte(vma, address, new_page);
77848 +#endif
77849 +
77850 /* Make the old page be freed below */
77851 new_page = old_page;
77852 }
77853 @@ -2135,6 +2161,10 @@ retry:
77854 && (vma->vm_flags & VM_SHARED)));
77855 set_huge_pte_at(mm, address, ptep, new_pte);
77856
77857 +#ifdef CONFIG_PAX_SEGMEXEC
77858 + pax_mirror_huge_pte(vma, address, page);
77859 +#endif
77860 +
77861 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
77862 /* Optimization, do the COW without a second fault */
77863 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
77864 @@ -2163,6 +2193,28 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77865 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
77866 struct hstate *h = hstate_vma(vma);
77867
77868 +#ifdef CONFIG_PAX_SEGMEXEC
77869 + struct vm_area_struct *vma_m;
77870 +
77871 + vma_m = pax_find_mirror_vma(vma);
77872 + if (vma_m) {
77873 + unsigned long address_m;
77874 +
77875 + if (vma->vm_start > vma_m->vm_start) {
77876 + address_m = address;
77877 + address -= SEGMEXEC_TASK_SIZE;
77878 + vma = vma_m;
77879 + h = hstate_vma(vma);
77880 + } else
77881 + address_m = address + SEGMEXEC_TASK_SIZE;
77882 +
77883 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
77884 + return VM_FAULT_OOM;
77885 + address_m &= HPAGE_MASK;
77886 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
77887 + }
77888 +#endif
77889 +
77890 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
77891 if (!ptep)
77892 return VM_FAULT_OOM;
77893 diff --git a/mm/internal.h b/mm/internal.h
77894 index f03e8e2..7354343 100644
77895 --- a/mm/internal.h
77896 +++ b/mm/internal.h
77897 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page *page);
77898 * in mm/page_alloc.c
77899 */
77900 extern void __free_pages_bootmem(struct page *page, unsigned int order);
77901 +extern void free_compound_page(struct page *page);
77902 extern void prep_compound_page(struct page *page, unsigned long order);
77903
77904
77905 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
77906 index c346660..b47382f 100644
77907 --- a/mm/kmemleak.c
77908 +++ b/mm/kmemleak.c
77909 @@ -358,7 +358,7 @@ static void print_unreferenced(struct seq_file *seq,
77910
77911 for (i = 0; i < object->trace_len; i++) {
77912 void *ptr = (void *)object->trace[i];
77913 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
77914 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
77915 }
77916 }
77917
77918 diff --git a/mm/maccess.c b/mm/maccess.c
77919 index 9073695..1127f348 100644
77920 --- a/mm/maccess.c
77921 +++ b/mm/maccess.c
77922 @@ -14,7 +14,7 @@
77923 * Safely read from address @src to the buffer at @dst. If a kernel fault
77924 * happens, handle that and return -EFAULT.
77925 */
77926 -long probe_kernel_read(void *dst, void *src, size_t size)
77927 +long probe_kernel_read(void *dst, const void *src, size_t size)
77928 {
77929 long ret;
77930 mm_segment_t old_fs = get_fs();
77931 @@ -22,7 +22,7 @@ long probe_kernel_read(void *dst, void *src, size_t size)
77932 set_fs(KERNEL_DS);
77933 pagefault_disable();
77934 ret = __copy_from_user_inatomic(dst,
77935 - (__force const void __user *)src, size);
77936 + (const void __force_user *)src, size);
77937 pagefault_enable();
77938 set_fs(old_fs);
77939
77940 @@ -39,14 +39,14 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
77941 * Safely write to address @dst from the buffer at @src. If a kernel fault
77942 * happens, handle that and return -EFAULT.
77943 */
77944 -long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
77945 +long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
77946 {
77947 long ret;
77948 mm_segment_t old_fs = get_fs();
77949
77950 set_fs(KERNEL_DS);
77951 pagefault_disable();
77952 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
77953 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
77954 pagefault_enable();
77955 set_fs(old_fs);
77956
77957 diff --git a/mm/madvise.c b/mm/madvise.c
77958 index 35b1479..499f7d4 100644
77959 --- a/mm/madvise.c
77960 +++ b/mm/madvise.c
77961 @@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
77962 pgoff_t pgoff;
77963 unsigned long new_flags = vma->vm_flags;
77964
77965 +#ifdef CONFIG_PAX_SEGMEXEC
77966 + struct vm_area_struct *vma_m;
77967 +#endif
77968 +
77969 switch (behavior) {
77970 case MADV_NORMAL:
77971 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
77972 @@ -103,6 +107,13 @@ success:
77973 /*
77974 * vm_flags is protected by the mmap_sem held in write mode.
77975 */
77976 +
77977 +#ifdef CONFIG_PAX_SEGMEXEC
77978 + vma_m = pax_find_mirror_vma(vma);
77979 + if (vma_m)
77980 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
77981 +#endif
77982 +
77983 vma->vm_flags = new_flags;
77984
77985 out:
77986 @@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
77987 struct vm_area_struct ** prev,
77988 unsigned long start, unsigned long end)
77989 {
77990 +
77991 +#ifdef CONFIG_PAX_SEGMEXEC
77992 + struct vm_area_struct *vma_m;
77993 +#endif
77994 +
77995 *prev = vma;
77996 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
77997 return -EINVAL;
77998 @@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
77999 zap_page_range(vma, start, end - start, &details);
78000 } else
78001 zap_page_range(vma, start, end - start, NULL);
78002 +
78003 +#ifdef CONFIG_PAX_SEGMEXEC
78004 + vma_m = pax_find_mirror_vma(vma);
78005 + if (vma_m) {
78006 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
78007 + struct zap_details details = {
78008 + .nonlinear_vma = vma_m,
78009 + .last_index = ULONG_MAX,
78010 + };
78011 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
78012 + } else
78013 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
78014 + }
78015 +#endif
78016 +
78017 return 0;
78018 }
78019
78020 @@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
78021 if (end < start)
78022 goto out;
78023
78024 +#ifdef CONFIG_PAX_SEGMEXEC
78025 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
78026 + if (end > SEGMEXEC_TASK_SIZE)
78027 + goto out;
78028 + } else
78029 +#endif
78030 +
78031 + if (end > TASK_SIZE)
78032 + goto out;
78033 +
78034 error = 0;
78035 if (end == start)
78036 goto out;
78037 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
78038 index 8aeba53..b4a4198 100644
78039 --- a/mm/memory-failure.c
78040 +++ b/mm/memory-failure.c
78041 @@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
78042
78043 int sysctl_memory_failure_recovery __read_mostly = 1;
78044
78045 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
78046 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
78047
78048 /*
78049 * Send all the processes who have the page mapped an ``action optional''
78050 @@ -64,7 +64,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
78051 si.si_signo = SIGBUS;
78052 si.si_errno = 0;
78053 si.si_code = BUS_MCEERR_AO;
78054 - si.si_addr = (void *)addr;
78055 + si.si_addr = (void __user *)addr;
78056 #ifdef __ARCH_SI_TRAPNO
78057 si.si_trapno = trapno;
78058 #endif
78059 @@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
78060 return 0;
78061 }
78062
78063 - atomic_long_add(1, &mce_bad_pages);
78064 + atomic_long_add_unchecked(1, &mce_bad_pages);
78065
78066 /*
78067 * We need/can do nothing about count=0 pages.
78068 diff --git a/mm/memory.c b/mm/memory.c
78069 index 6c836d3..48f3264 100644
78070 --- a/mm/memory.c
78071 +++ b/mm/memory.c
78072 @@ -187,8 +187,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
78073 return;
78074
78075 pmd = pmd_offset(pud, start);
78076 +
78077 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
78078 pud_clear(pud);
78079 pmd_free_tlb(tlb, pmd, start);
78080 +#endif
78081 +
78082 }
78083
78084 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
78085 @@ -219,9 +223,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
78086 if (end - 1 > ceiling - 1)
78087 return;
78088
78089 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
78090 pud = pud_offset(pgd, start);
78091 pgd_clear(pgd);
78092 pud_free_tlb(tlb, pud, start);
78093 +#endif
78094 +
78095 }
78096
78097 /*
78098 @@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
78099 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
78100 i = 0;
78101
78102 - do {
78103 + while (nr_pages) {
78104 struct vm_area_struct *vma;
78105
78106 - vma = find_extend_vma(mm, start);
78107 + vma = find_vma(mm, start);
78108 if (!vma && in_gate_area(tsk, start)) {
78109 unsigned long pg = start & PAGE_MASK;
78110 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
78111 @@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
78112 continue;
78113 }
78114
78115 - if (!vma ||
78116 + if (!vma || start < vma->vm_start ||
78117 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
78118 !(vm_flags & vma->vm_flags))
78119 return i ? : -EFAULT;
78120 @@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
78121 start += PAGE_SIZE;
78122 nr_pages--;
78123 } while (nr_pages && start < vma->vm_end);
78124 - } while (nr_pages);
78125 + }
78126 return i;
78127 }
78128
78129 @@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
78130 page_add_file_rmap(page);
78131 set_pte_at(mm, addr, pte, mk_pte(page, prot));
78132
78133 +#ifdef CONFIG_PAX_SEGMEXEC
78134 + pax_mirror_file_pte(vma, addr, page, ptl);
78135 +#endif
78136 +
78137 retval = 0;
78138 pte_unmap_unlock(pte, ptl);
78139 return retval;
78140 @@ -1560,10 +1571,22 @@ out:
78141 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
78142 struct page *page)
78143 {
78144 +
78145 +#ifdef CONFIG_PAX_SEGMEXEC
78146 + struct vm_area_struct *vma_m;
78147 +#endif
78148 +
78149 if (addr < vma->vm_start || addr >= vma->vm_end)
78150 return -EFAULT;
78151 if (!page_count(page))
78152 return -EINVAL;
78153 +
78154 +#ifdef CONFIG_PAX_SEGMEXEC
78155 + vma_m = pax_find_mirror_vma(vma);
78156 + if (vma_m)
78157 + vma_m->vm_flags |= VM_INSERTPAGE;
78158 +#endif
78159 +
78160 vma->vm_flags |= VM_INSERTPAGE;
78161 return insert_page(vma, addr, page, vma->vm_page_prot);
78162 }
78163 @@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
78164 unsigned long pfn)
78165 {
78166 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
78167 + BUG_ON(vma->vm_mirror);
78168
78169 if (addr < vma->vm_start || addr >= vma->vm_end)
78170 return -EFAULT;
78171 @@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
78172 copy_user_highpage(dst, src, va, vma);
78173 }
78174
78175 +#ifdef CONFIG_PAX_SEGMEXEC
78176 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
78177 +{
78178 + struct mm_struct *mm = vma->vm_mm;
78179 + spinlock_t *ptl;
78180 + pte_t *pte, entry;
78181 +
78182 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
78183 + entry = *pte;
78184 + if (!pte_present(entry)) {
78185 + if (!pte_none(entry)) {
78186 + BUG_ON(pte_file(entry));
78187 + free_swap_and_cache(pte_to_swp_entry(entry));
78188 + pte_clear_not_present_full(mm, address, pte, 0);
78189 + }
78190 + } else {
78191 + struct page *page;
78192 +
78193 + flush_cache_page(vma, address, pte_pfn(entry));
78194 + entry = ptep_clear_flush(vma, address, pte);
78195 + BUG_ON(pte_dirty(entry));
78196 + page = vm_normal_page(vma, address, entry);
78197 + if (page) {
78198 + update_hiwater_rss(mm);
78199 + if (PageAnon(page))
78200 + dec_mm_counter(mm, anon_rss);
78201 + else
78202 + dec_mm_counter(mm, file_rss);
78203 + page_remove_rmap(page);
78204 + page_cache_release(page);
78205 + }
78206 + }
78207 + pte_unmap_unlock(pte, ptl);
78208 +}
78209 +
78210 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
78211 + *
78212 + * the ptl of the lower mapped page is held on entry and is not released on exit
78213 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
78214 + */
78215 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
78216 +{
78217 + struct mm_struct *mm = vma->vm_mm;
78218 + unsigned long address_m;
78219 + spinlock_t *ptl_m;
78220 + struct vm_area_struct *vma_m;
78221 + pmd_t *pmd_m;
78222 + pte_t *pte_m, entry_m;
78223 +
78224 + BUG_ON(!page_m || !PageAnon(page_m));
78225 +
78226 + vma_m = pax_find_mirror_vma(vma);
78227 + if (!vma_m)
78228 + return;
78229 +
78230 + BUG_ON(!PageLocked(page_m));
78231 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
78232 + address_m = address + SEGMEXEC_TASK_SIZE;
78233 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
78234 + pte_m = pte_offset_map_nested(pmd_m, address_m);
78235 + ptl_m = pte_lockptr(mm, pmd_m);
78236 + if (ptl != ptl_m) {
78237 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
78238 + if (!pte_none(*pte_m))
78239 + goto out;
78240 + }
78241 +
78242 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
78243 + page_cache_get(page_m);
78244 + page_add_anon_rmap(page_m, vma_m, address_m);
78245 + inc_mm_counter(mm, anon_rss);
78246 + set_pte_at(mm, address_m, pte_m, entry_m);
78247 + update_mmu_cache(vma_m, address_m, entry_m);
78248 +out:
78249 + if (ptl != ptl_m)
78250 + spin_unlock(ptl_m);
78251 + pte_unmap_nested(pte_m);
78252 + unlock_page(page_m);
78253 +}
78254 +
78255 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
78256 +{
78257 + struct mm_struct *mm = vma->vm_mm;
78258 + unsigned long address_m;
78259 + spinlock_t *ptl_m;
78260 + struct vm_area_struct *vma_m;
78261 + pmd_t *pmd_m;
78262 + pte_t *pte_m, entry_m;
78263 +
78264 + BUG_ON(!page_m || PageAnon(page_m));
78265 +
78266 + vma_m = pax_find_mirror_vma(vma);
78267 + if (!vma_m)
78268 + return;
78269 +
78270 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
78271 + address_m = address + SEGMEXEC_TASK_SIZE;
78272 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
78273 + pte_m = pte_offset_map_nested(pmd_m, address_m);
78274 + ptl_m = pte_lockptr(mm, pmd_m);
78275 + if (ptl != ptl_m) {
78276 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
78277 + if (!pte_none(*pte_m))
78278 + goto out;
78279 + }
78280 +
78281 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
78282 + page_cache_get(page_m);
78283 + page_add_file_rmap(page_m);
78284 + inc_mm_counter(mm, file_rss);
78285 + set_pte_at(mm, address_m, pte_m, entry_m);
78286 + update_mmu_cache(vma_m, address_m, entry_m);
78287 +out:
78288 + if (ptl != ptl_m)
78289 + spin_unlock(ptl_m);
78290 + pte_unmap_nested(pte_m);
78291 +}
78292 +
78293 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
78294 +{
78295 + struct mm_struct *mm = vma->vm_mm;
78296 + unsigned long address_m;
78297 + spinlock_t *ptl_m;
78298 + struct vm_area_struct *vma_m;
78299 + pmd_t *pmd_m;
78300 + pte_t *pte_m, entry_m;
78301 +
78302 + vma_m = pax_find_mirror_vma(vma);
78303 + if (!vma_m)
78304 + return;
78305 +
78306 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
78307 + address_m = address + SEGMEXEC_TASK_SIZE;
78308 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
78309 + pte_m = pte_offset_map_nested(pmd_m, address_m);
78310 + ptl_m = pte_lockptr(mm, pmd_m);
78311 + if (ptl != ptl_m) {
78312 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
78313 + if (!pte_none(*pte_m))
78314 + goto out;
78315 + }
78316 +
78317 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
78318 + set_pte_at(mm, address_m, pte_m, entry_m);
78319 +out:
78320 + if (ptl != ptl_m)
78321 + spin_unlock(ptl_m);
78322 + pte_unmap_nested(pte_m);
78323 +}
78324 +
78325 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
78326 +{
78327 + struct page *page_m;
78328 + pte_t entry;
78329 +
78330 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
78331 + goto out;
78332 +
78333 + entry = *pte;
78334 + page_m = vm_normal_page(vma, address, entry);
78335 + if (!page_m)
78336 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
78337 + else if (PageAnon(page_m)) {
78338 + if (pax_find_mirror_vma(vma)) {
78339 + pte_unmap_unlock(pte, ptl);
78340 + lock_page(page_m);
78341 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
78342 + if (pte_same(entry, *pte))
78343 + pax_mirror_anon_pte(vma, address, page_m, ptl);
78344 + else
78345 + unlock_page(page_m);
78346 + }
78347 + } else
78348 + pax_mirror_file_pte(vma, address, page_m, ptl);
78349 +
78350 +out:
78351 + pte_unmap_unlock(pte, ptl);
78352 +}
78353 +#endif
78354 +
78355 /*
78356 * This routine handles present pages, when users try to write
78357 * to a shared page. It is done by copying the page to a new address
78358 @@ -2156,6 +2360,12 @@ gotten:
78359 */
78360 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
78361 if (likely(pte_same(*page_table, orig_pte))) {
78362 +
78363 +#ifdef CONFIG_PAX_SEGMEXEC
78364 + if (pax_find_mirror_vma(vma))
78365 + BUG_ON(!trylock_page(new_page));
78366 +#endif
78367 +
78368 if (old_page) {
78369 if (!PageAnon(old_page)) {
78370 dec_mm_counter(mm, file_rss);
78371 @@ -2207,6 +2417,10 @@ gotten:
78372 page_remove_rmap(old_page);
78373 }
78374
78375 +#ifdef CONFIG_PAX_SEGMEXEC
78376 + pax_mirror_anon_pte(vma, address, new_page, ptl);
78377 +#endif
78378 +
78379 /* Free the old page.. */
78380 new_page = old_page;
78381 ret |= VM_FAULT_WRITE;
78382 @@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
78383 swap_free(entry);
78384 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
78385 try_to_free_swap(page);
78386 +
78387 +#ifdef CONFIG_PAX_SEGMEXEC
78388 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
78389 +#endif
78390 +
78391 unlock_page(page);
78392
78393 if (flags & FAULT_FLAG_WRITE) {
78394 @@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
78395
78396 /* No need to invalidate - it was non-present before */
78397 update_mmu_cache(vma, address, pte);
78398 +
78399 +#ifdef CONFIG_PAX_SEGMEXEC
78400 + pax_mirror_anon_pte(vma, address, page, ptl);
78401 +#endif
78402 +
78403 unlock:
78404 pte_unmap_unlock(page_table, ptl);
78405 out:
78406 @@ -2632,40 +2856,6 @@ out_release:
78407 }
78408
78409 /*
78410 - * This is like a special single-page "expand_{down|up}wards()",
78411 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
78412 - * doesn't hit another vma.
78413 - */
78414 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
78415 -{
78416 - address &= PAGE_MASK;
78417 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
78418 - struct vm_area_struct *prev = vma->vm_prev;
78419 -
78420 - /*
78421 - * Is there a mapping abutting this one below?
78422 - *
78423 - * That's only ok if it's the same stack mapping
78424 - * that has gotten split..
78425 - */
78426 - if (prev && prev->vm_end == address)
78427 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
78428 -
78429 - expand_stack(vma, address - PAGE_SIZE);
78430 - }
78431 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
78432 - struct vm_area_struct *next = vma->vm_next;
78433 -
78434 - /* As VM_GROWSDOWN but s/below/above/ */
78435 - if (next && next->vm_start == address + PAGE_SIZE)
78436 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
78437 -
78438 - expand_upwards(vma, address + PAGE_SIZE);
78439 - }
78440 - return 0;
78441 -}
78442 -
78443 -/*
78444 * We enter with non-exclusive mmap_sem (to exclude vma changes,
78445 * but allow concurrent faults), and pte mapped but not yet locked.
78446 * We return with mmap_sem still held, but pte unmapped and unlocked.
78447 @@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
78448 unsigned long address, pte_t *page_table, pmd_t *pmd,
78449 unsigned int flags)
78450 {
78451 - struct page *page;
78452 + struct page *page = NULL;
78453 spinlock_t *ptl;
78454 pte_t entry;
78455
78456 - pte_unmap(page_table);
78457 -
78458 - /* Check if we need to add a guard page to the stack */
78459 - if (check_stack_guard_page(vma, address) < 0)
78460 - return VM_FAULT_SIGBUS;
78461 -
78462 - /* Use the zero-page for reads */
78463 if (!(flags & FAULT_FLAG_WRITE)) {
78464 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
78465 vma->vm_page_prot));
78466 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
78467 + ptl = pte_lockptr(mm, pmd);
78468 + spin_lock(ptl);
78469 if (!pte_none(*page_table))
78470 goto unlock;
78471 goto setpte;
78472 }
78473
78474 /* Allocate our own private page. */
78475 + pte_unmap(page_table);
78476 +
78477 if (unlikely(anon_vma_prepare(vma)))
78478 goto oom;
78479 page = alloc_zeroed_user_highpage_movable(vma, address);
78480 @@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
78481 if (!pte_none(*page_table))
78482 goto release;
78483
78484 +#ifdef CONFIG_PAX_SEGMEXEC
78485 + if (pax_find_mirror_vma(vma))
78486 + BUG_ON(!trylock_page(page));
78487 +#endif
78488 +
78489 inc_mm_counter(mm, anon_rss);
78490 page_add_new_anon_rmap(page, vma, address);
78491 setpte:
78492 @@ -2720,6 +2911,12 @@ setpte:
78493
78494 /* No need to invalidate - it was non-present before */
78495 update_mmu_cache(vma, address, entry);
78496 +
78497 +#ifdef CONFIG_PAX_SEGMEXEC
78498 + if (page)
78499 + pax_mirror_anon_pte(vma, address, page, ptl);
78500 +#endif
78501 +
78502 unlock:
78503 pte_unmap_unlock(page_table, ptl);
78504 return 0;
78505 @@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
78506 */
78507 /* Only go through if we didn't race with anybody else... */
78508 if (likely(pte_same(*page_table, orig_pte))) {
78509 +
78510 +#ifdef CONFIG_PAX_SEGMEXEC
78511 + if (anon && pax_find_mirror_vma(vma))
78512 + BUG_ON(!trylock_page(page));
78513 +#endif
78514 +
78515 flush_icache_page(vma, page);
78516 entry = mk_pte(page, vma->vm_page_prot);
78517 if (flags & FAULT_FLAG_WRITE)
78518 @@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
78519
78520 /* no need to invalidate: a not-present page won't be cached */
78521 update_mmu_cache(vma, address, entry);
78522 +
78523 +#ifdef CONFIG_PAX_SEGMEXEC
78524 + if (anon)
78525 + pax_mirror_anon_pte(vma, address, page, ptl);
78526 + else
78527 + pax_mirror_file_pte(vma, address, page, ptl);
78528 +#endif
78529 +
78530 } else {
78531 if (charged)
78532 mem_cgroup_uncharge_page(page);
78533 @@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struct mm_struct *mm,
78534 if (flags & FAULT_FLAG_WRITE)
78535 flush_tlb_page(vma, address);
78536 }
78537 +
78538 +#ifdef CONFIG_PAX_SEGMEXEC
78539 + pax_mirror_pte(vma, address, pte, pmd, ptl);
78540 + return 0;
78541 +#endif
78542 +
78543 unlock:
78544 pte_unmap_unlock(pte, ptl);
78545 return 0;
78546 @@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
78547 pmd_t *pmd;
78548 pte_t *pte;
78549
78550 +#ifdef CONFIG_PAX_SEGMEXEC
78551 + struct vm_area_struct *vma_m;
78552 +#endif
78553 +
78554 __set_current_state(TASK_RUNNING);
78555
78556 count_vm_event(PGFAULT);
78557 @@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
78558 if (unlikely(is_vm_hugetlb_page(vma)))
78559 return hugetlb_fault(mm, vma, address, flags);
78560
78561 +#ifdef CONFIG_PAX_SEGMEXEC
78562 + vma_m = pax_find_mirror_vma(vma);
78563 + if (vma_m) {
78564 + unsigned long address_m;
78565 + pgd_t *pgd_m;
78566 + pud_t *pud_m;
78567 + pmd_t *pmd_m;
78568 +
78569 + if (vma->vm_start > vma_m->vm_start) {
78570 + address_m = address;
78571 + address -= SEGMEXEC_TASK_SIZE;
78572 + vma = vma_m;
78573 + } else
78574 + address_m = address + SEGMEXEC_TASK_SIZE;
78575 +
78576 + pgd_m = pgd_offset(mm, address_m);
78577 + pud_m = pud_alloc(mm, pgd_m, address_m);
78578 + if (!pud_m)
78579 + return VM_FAULT_OOM;
78580 + pmd_m = pmd_alloc(mm, pud_m, address_m);
78581 + if (!pmd_m)
78582 + return VM_FAULT_OOM;
78583 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
78584 + return VM_FAULT_OOM;
78585 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
78586 + }
78587 +#endif
78588 +
78589 pgd = pgd_offset(mm, address);
78590 pud = pud_alloc(mm, pgd, address);
78591 if (!pud)
78592 @@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
78593 gate_vma.vm_start = FIXADDR_USER_START;
78594 gate_vma.vm_end = FIXADDR_USER_END;
78595 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
78596 - gate_vma.vm_page_prot = __P101;
78597 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
78598 /*
78599 * Make sure the vDSO gets into every core dump.
78600 * Dumping its contents makes post-mortem fully interpretable later
78601 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
78602 index 3c6e3e2..b1ddbb8 100644
78603 --- a/mm/mempolicy.c
78604 +++ b/mm/mempolicy.c
78605 @@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
78606 struct vm_area_struct *next;
78607 int err;
78608
78609 +#ifdef CONFIG_PAX_SEGMEXEC
78610 + struct vm_area_struct *vma_m;
78611 +#endif
78612 +
78613 err = 0;
78614 for (; vma && vma->vm_start < end; vma = next) {
78615 next = vma->vm_next;
78616 @@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
78617 err = policy_vma(vma, new);
78618 if (err)
78619 break;
78620 +
78621 +#ifdef CONFIG_PAX_SEGMEXEC
78622 + vma_m = pax_find_mirror_vma(vma);
78623 + if (vma_m) {
78624 + err = policy_vma(vma_m, new);
78625 + if (err)
78626 + break;
78627 + }
78628 +#endif
78629 +
78630 }
78631 return err;
78632 }
78633 @@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start, unsigned long len,
78634
78635 if (end < start)
78636 return -EINVAL;
78637 +
78638 +#ifdef CONFIG_PAX_SEGMEXEC
78639 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
78640 + if (end > SEGMEXEC_TASK_SIZE)
78641 + return -EINVAL;
78642 + } else
78643 +#endif
78644 +
78645 + if (end > TASK_SIZE)
78646 + return -EINVAL;
78647 +
78648 if (end == start)
78649 return 0;
78650
78651 @@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
78652 if (!mm)
78653 return -EINVAL;
78654
78655 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
78656 + if (mm != current->mm &&
78657 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
78658 + err = -EPERM;
78659 + goto out;
78660 + }
78661 +#endif
78662 +
78663 /*
78664 * Check if this process has the right to modify the specified
78665 * process. The right exists if the process has administrative
78666 @@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
78667 rcu_read_lock();
78668 tcred = __task_cred(task);
78669 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
78670 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
78671 - !capable(CAP_SYS_NICE)) {
78672 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
78673 rcu_read_unlock();
78674 err = -EPERM;
78675 goto out;
78676 @@ -2367,6 +2399,12 @@ static inline void check_huge_range(struct vm_area_struct *vma,
78677 }
78678 #endif
78679
78680 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
78681 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
78682 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
78683 + _mm->pax_flags & MF_PAX_SEGMEXEC))
78684 +#endif
78685 +
78686 /*
78687 * Display pages allocated per node and memory policy via /proc.
78688 */
78689 @@ -2381,6 +2419,13 @@ int show_numa_map(struct seq_file *m, void *v)
78690 int n;
78691 char buffer[50];
78692
78693 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
78694 + if (current->exec_id != m->exec_id) {
78695 + gr_log_badprocpid("numa_maps");
78696 + return 0;
78697 + }
78698 +#endif
78699 +
78700 if (!mm)
78701 return 0;
78702
78703 @@ -2392,11 +2437,15 @@ int show_numa_map(struct seq_file *m, void *v)
78704 mpol_to_str(buffer, sizeof(buffer), pol, 0);
78705 mpol_cond_put(pol);
78706
78707 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
78708 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
78709 +#else
78710 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
78711 +#endif
78712
78713 if (file) {
78714 seq_printf(m, " file=");
78715 - seq_path(m, &file->f_path, "\n\t= ");
78716 + seq_path(m, &file->f_path, "\n\t\\= ");
78717 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
78718 seq_printf(m, " heap");
78719 } else if (vma->vm_start <= mm->start_stack &&
78720 diff --git a/mm/migrate.c b/mm/migrate.c
78721 index aaca868..2ebecdc 100644
78722 --- a/mm/migrate.c
78723 +++ b/mm/migrate.c
78724 @@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
78725 unsigned long chunk_start;
78726 int err;
78727
78728 + pax_track_stack();
78729 +
78730 task_nodes = cpuset_mems_allowed(task);
78731
78732 err = -ENOMEM;
78733 @@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
78734 if (!mm)
78735 return -EINVAL;
78736
78737 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
78738 + if (mm != current->mm &&
78739 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
78740 + err = -EPERM;
78741 + goto out;
78742 + }
78743 +#endif
78744 +
78745 /*
78746 * Check if this process has the right to modify the specified
78747 * process. The right exists if the process has administrative
78748 @@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
78749 rcu_read_lock();
78750 tcred = __task_cred(task);
78751 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
78752 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
78753 - !capable(CAP_SYS_NICE)) {
78754 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
78755 rcu_read_unlock();
78756 err = -EPERM;
78757 goto out;
78758 diff --git a/mm/mlock.c b/mm/mlock.c
78759 index 2d846cf..98134d2 100644
78760 --- a/mm/mlock.c
78761 +++ b/mm/mlock.c
78762 @@ -13,6 +13,7 @@
78763 #include <linux/pagemap.h>
78764 #include <linux/mempolicy.h>
78765 #include <linux/syscalls.h>
78766 +#include <linux/security.h>
78767 #include <linux/sched.h>
78768 #include <linux/module.h>
78769 #include <linux/rmap.h>
78770 @@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
78771 }
78772 }
78773
78774 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
78775 -{
78776 - return (vma->vm_flags & VM_GROWSDOWN) &&
78777 - (vma->vm_start == addr) &&
78778 - !vma_stack_continue(vma->vm_prev, addr);
78779 -}
78780 -
78781 /**
78782 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
78783 * @vma: target vma
78784 @@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
78785 if (vma->vm_flags & VM_WRITE)
78786 gup_flags |= FOLL_WRITE;
78787
78788 - /* We don't try to access the guard page of a stack vma */
78789 - if (stack_guard_page(vma, start)) {
78790 - addr += PAGE_SIZE;
78791 - nr_pages--;
78792 - }
78793 -
78794 while (nr_pages > 0) {
78795 int i;
78796
78797 @@ -440,7 +428,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
78798 {
78799 unsigned long nstart, end, tmp;
78800 struct vm_area_struct * vma, * prev;
78801 - int error;
78802 + int error = -EINVAL;
78803
78804 len = PAGE_ALIGN(len);
78805 end = start + len;
78806 @@ -448,6 +436,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
78807 return -EINVAL;
78808 if (end == start)
78809 return 0;
78810 + if (end > TASK_SIZE)
78811 + return -EINVAL;
78812 +
78813 vma = find_vma_prev(current->mm, start, &prev);
78814 if (!vma || vma->vm_start > start)
78815 return -ENOMEM;
78816 @@ -458,6 +449,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
78817 for (nstart = start ; ; ) {
78818 unsigned int newflags;
78819
78820 +#ifdef CONFIG_PAX_SEGMEXEC
78821 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
78822 + break;
78823 +#endif
78824 +
78825 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
78826
78827 newflags = vma->vm_flags | VM_LOCKED;
78828 @@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
78829 lock_limit >>= PAGE_SHIFT;
78830
78831 /* check against resource limits */
78832 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
78833 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
78834 error = do_mlock(start, len, 1);
78835 up_write(&current->mm->mmap_sem);
78836 @@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
78837 static int do_mlockall(int flags)
78838 {
78839 struct vm_area_struct * vma, * prev = NULL;
78840 - unsigned int def_flags = 0;
78841
78842 if (flags & MCL_FUTURE)
78843 - def_flags = VM_LOCKED;
78844 - current->mm->def_flags = def_flags;
78845 + current->mm->def_flags |= VM_LOCKED;
78846 + else
78847 + current->mm->def_flags &= ~VM_LOCKED;
78848 if (flags == MCL_FUTURE)
78849 goto out;
78850
78851 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
78852 - unsigned int newflags;
78853 + unsigned long newflags;
78854
78855 +#ifdef CONFIG_PAX_SEGMEXEC
78856 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
78857 + break;
78858 +#endif
78859 +
78860 + BUG_ON(vma->vm_end > TASK_SIZE);
78861 newflags = vma->vm_flags | VM_LOCKED;
78862 if (!(flags & MCL_CURRENT))
78863 newflags &= ~VM_LOCKED;
78864 @@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
78865 lock_limit >>= PAGE_SHIFT;
78866
78867 ret = -ENOMEM;
78868 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
78869 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
78870 capable(CAP_IPC_LOCK))
78871 ret = do_mlockall(flags);
78872 diff --git a/mm/mmap.c b/mm/mmap.c
78873 index 4b80cbf..12a7861 100644
78874 --- a/mm/mmap.c
78875 +++ b/mm/mmap.c
78876 @@ -45,6 +45,16 @@
78877 #define arch_rebalance_pgtables(addr, len) (addr)
78878 #endif
78879
78880 +static inline void verify_mm_writelocked(struct mm_struct *mm)
78881 +{
78882 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
78883 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
78884 + up_read(&mm->mmap_sem);
78885 + BUG();
78886 + }
78887 +#endif
78888 +}
78889 +
78890 static void unmap_region(struct mm_struct *mm,
78891 struct vm_area_struct *vma, struct vm_area_struct *prev,
78892 unsigned long start, unsigned long end);
78893 @@ -70,22 +80,32 @@ static void unmap_region(struct mm_struct *mm,
78894 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
78895 *
78896 */
78897 -pgprot_t protection_map[16] = {
78898 +pgprot_t protection_map[16] __read_only = {
78899 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
78900 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
78901 };
78902
78903 pgprot_t vm_get_page_prot(unsigned long vm_flags)
78904 {
78905 - return __pgprot(pgprot_val(protection_map[vm_flags &
78906 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
78907 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
78908 pgprot_val(arch_vm_get_page_prot(vm_flags)));
78909 +
78910 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
78911 + if (!nx_enabled &&
78912 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
78913 + (vm_flags & (VM_READ | VM_WRITE)))
78914 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
78915 +#endif
78916 +
78917 + return prot;
78918 }
78919 EXPORT_SYMBOL(vm_get_page_prot);
78920
78921 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
78922 int sysctl_overcommit_ratio = 50; /* default is 50% */
78923 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
78924 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
78925 struct percpu_counter vm_committed_as;
78926
78927 /*
78928 @@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
78929 struct vm_area_struct *next = vma->vm_next;
78930
78931 might_sleep();
78932 + BUG_ON(vma->vm_mirror);
78933 if (vma->vm_ops && vma->vm_ops->close)
78934 vma->vm_ops->close(vma);
78935 if (vma->vm_file) {
78936 @@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
78937 * not page aligned -Ram Gupta
78938 */
78939 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
78940 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
78941 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
78942 (mm->end_data - mm->start_data) > rlim)
78943 goto out;
78944 @@ -704,6 +726,12 @@ static int
78945 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
78946 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
78947 {
78948 +
78949 +#ifdef CONFIG_PAX_SEGMEXEC
78950 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
78951 + return 0;
78952 +#endif
78953 +
78954 if (is_mergeable_vma(vma, file, vm_flags) &&
78955 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
78956 if (vma->vm_pgoff == vm_pgoff)
78957 @@ -723,6 +751,12 @@ static int
78958 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
78959 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
78960 {
78961 +
78962 +#ifdef CONFIG_PAX_SEGMEXEC
78963 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
78964 + return 0;
78965 +#endif
78966 +
78967 if (is_mergeable_vma(vma, file, vm_flags) &&
78968 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
78969 pgoff_t vm_pglen;
78970 @@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
78971 struct vm_area_struct *vma_merge(struct mm_struct *mm,
78972 struct vm_area_struct *prev, unsigned long addr,
78973 unsigned long end, unsigned long vm_flags,
78974 - struct anon_vma *anon_vma, struct file *file,
78975 + struct anon_vma *anon_vma, struct file *file,
78976 pgoff_t pgoff, struct mempolicy *policy)
78977 {
78978 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
78979 struct vm_area_struct *area, *next;
78980
78981 +#ifdef CONFIG_PAX_SEGMEXEC
78982 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
78983 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
78984 +
78985 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
78986 +#endif
78987 +
78988 /*
78989 * We later require that vma->vm_flags == vm_flags,
78990 * so this tests vma->vm_flags & VM_SPECIAL, too.
78991 @@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
78992 if (next && next->vm_end == end) /* cases 6, 7, 8 */
78993 next = next->vm_next;
78994
78995 +#ifdef CONFIG_PAX_SEGMEXEC
78996 + if (prev)
78997 + prev_m = pax_find_mirror_vma(prev);
78998 + if (area)
78999 + area_m = pax_find_mirror_vma(area);
79000 + if (next)
79001 + next_m = pax_find_mirror_vma(next);
79002 +#endif
79003 +
79004 /*
79005 * Can it merge with the predecessor?
79006 */
79007 @@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
79008 /* cases 1, 6 */
79009 vma_adjust(prev, prev->vm_start,
79010 next->vm_end, prev->vm_pgoff, NULL);
79011 - } else /* cases 2, 5, 7 */
79012 +
79013 +#ifdef CONFIG_PAX_SEGMEXEC
79014 + if (prev_m)
79015 + vma_adjust(prev_m, prev_m->vm_start,
79016 + next_m->vm_end, prev_m->vm_pgoff, NULL);
79017 +#endif
79018 +
79019 + } else { /* cases 2, 5, 7 */
79020 vma_adjust(prev, prev->vm_start,
79021 end, prev->vm_pgoff, NULL);
79022 +
79023 +#ifdef CONFIG_PAX_SEGMEXEC
79024 + if (prev_m)
79025 + vma_adjust(prev_m, prev_m->vm_start,
79026 + end_m, prev_m->vm_pgoff, NULL);
79027 +#endif
79028 +
79029 + }
79030 return prev;
79031 }
79032
79033 @@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
79034 mpol_equal(policy, vma_policy(next)) &&
79035 can_vma_merge_before(next, vm_flags,
79036 anon_vma, file, pgoff+pglen)) {
79037 - if (prev && addr < prev->vm_end) /* case 4 */
79038 + if (prev && addr < prev->vm_end) { /* case 4 */
79039 vma_adjust(prev, prev->vm_start,
79040 addr, prev->vm_pgoff, NULL);
79041 - else /* cases 3, 8 */
79042 +
79043 +#ifdef CONFIG_PAX_SEGMEXEC
79044 + if (prev_m)
79045 + vma_adjust(prev_m, prev_m->vm_start,
79046 + addr_m, prev_m->vm_pgoff, NULL);
79047 +#endif
79048 +
79049 + } else { /* cases 3, 8 */
79050 vma_adjust(area, addr, next->vm_end,
79051 next->vm_pgoff - pglen, NULL);
79052 +
79053 +#ifdef CONFIG_PAX_SEGMEXEC
79054 + if (area_m)
79055 + vma_adjust(area_m, addr_m, next_m->vm_end,
79056 + next_m->vm_pgoff - pglen, NULL);
79057 +#endif
79058 +
79059 + }
79060 return area;
79061 }
79062
79063 @@ -898,14 +978,11 @@ none:
79064 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
79065 struct file *file, long pages)
79066 {
79067 - const unsigned long stack_flags
79068 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
79069 -
79070 if (file) {
79071 mm->shared_vm += pages;
79072 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
79073 mm->exec_vm += pages;
79074 - } else if (flags & stack_flags)
79075 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
79076 mm->stack_vm += pages;
79077 if (flags & (VM_RESERVED|VM_IO))
79078 mm->reserved_vm += pages;
79079 @@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
79080 * (the exception is when the underlying filesystem is noexec
79081 * mounted, in which case we dont add PROT_EXEC.)
79082 */
79083 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
79084 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
79085 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
79086 prot |= PROT_EXEC;
79087
79088 @@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
79089 /* Obtain the address to map to. we verify (or select) it and ensure
79090 * that it represents a valid section of the address space.
79091 */
79092 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
79093 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
79094 if (addr & ~PAGE_MASK)
79095 return addr;
79096
79097 @@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
79098 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
79099 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
79100
79101 +#ifdef CONFIG_PAX_MPROTECT
79102 + if (mm->pax_flags & MF_PAX_MPROTECT) {
79103 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
79104 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
79105 + gr_log_rwxmmap(file);
79106 +
79107 +#ifdef CONFIG_PAX_EMUPLT
79108 + vm_flags &= ~VM_EXEC;
79109 +#else
79110 + return -EPERM;
79111 +#endif
79112 +
79113 + }
79114 +
79115 + if (!(vm_flags & VM_EXEC))
79116 + vm_flags &= ~VM_MAYEXEC;
79117 +#else
79118 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
79119 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
79120 +#endif
79121 + else
79122 + vm_flags &= ~VM_MAYWRITE;
79123 + }
79124 +#endif
79125 +
79126 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
79127 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
79128 + vm_flags &= ~VM_PAGEEXEC;
79129 +#endif
79130 +
79131 if (flags & MAP_LOCKED)
79132 if (!can_do_mlock())
79133 return -EPERM;
79134 @@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
79135 locked += mm->locked_vm;
79136 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
79137 lock_limit >>= PAGE_SHIFT;
79138 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
79139 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
79140 return -EAGAIN;
79141 }
79142 @@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
79143 if (error)
79144 return error;
79145
79146 + if (!gr_acl_handle_mmap(file, prot))
79147 + return -EACCES;
79148 +
79149 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
79150 }
79151 EXPORT_SYMBOL(do_mmap_pgoff);
79152 @@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
79153 */
79154 int vma_wants_writenotify(struct vm_area_struct *vma)
79155 {
79156 - unsigned int vm_flags = vma->vm_flags;
79157 + unsigned long vm_flags = vma->vm_flags;
79158
79159 /* If it was private or non-writable, the write bit is already clear */
79160 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
79161 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
79162 return 0;
79163
79164 /* The backer wishes to know when pages are first written to? */
79165 @@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
79166 unsigned long charged = 0;
79167 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
79168
79169 +#ifdef CONFIG_PAX_SEGMEXEC
79170 + struct vm_area_struct *vma_m = NULL;
79171 +#endif
79172 +
79173 + /*
79174 + * mm->mmap_sem is required to protect against another thread
79175 + * changing the mappings in case we sleep.
79176 + */
79177 + verify_mm_writelocked(mm);
79178 +
79179 /* Clear old maps */
79180 error = -ENOMEM;
79181 -munmap_back:
79182 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
79183 if (vma && vma->vm_start < addr + len) {
79184 if (do_munmap(mm, addr, len))
79185 return -ENOMEM;
79186 - goto munmap_back;
79187 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
79188 + BUG_ON(vma && vma->vm_start < addr + len);
79189 }
79190
79191 /* Check against address space limit. */
79192 @@ -1173,6 +1294,16 @@ munmap_back:
79193 goto unacct_error;
79194 }
79195
79196 +#ifdef CONFIG_PAX_SEGMEXEC
79197 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
79198 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
79199 + if (!vma_m) {
79200 + error = -ENOMEM;
79201 + goto free_vma;
79202 + }
79203 + }
79204 +#endif
79205 +
79206 vma->vm_mm = mm;
79207 vma->vm_start = addr;
79208 vma->vm_end = addr + len;
79209 @@ -1180,8 +1311,9 @@ munmap_back:
79210 vma->vm_page_prot = vm_get_page_prot(vm_flags);
79211 vma->vm_pgoff = pgoff;
79212
79213 + error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */
79214 +
79215 if (file) {
79216 - error = -EINVAL;
79217 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
79218 goto free_vma;
79219 if (vm_flags & VM_DENYWRITE) {
79220 @@ -1195,6 +1327,19 @@ munmap_back:
79221 error = file->f_op->mmap(file, vma);
79222 if (error)
79223 goto unmap_and_free_vma;
79224 +
79225 +#ifdef CONFIG_PAX_SEGMEXEC
79226 + if (vma_m && (vm_flags & VM_EXECUTABLE))
79227 + added_exe_file_vma(mm);
79228 +#endif
79229 +
79230 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
79231 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
79232 + vma->vm_flags |= VM_PAGEEXEC;
79233 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
79234 + }
79235 +#endif
79236 +
79237 if (vm_flags & VM_EXECUTABLE)
79238 added_exe_file_vma(mm);
79239
79240 @@ -1207,6 +1352,8 @@ munmap_back:
79241 pgoff = vma->vm_pgoff;
79242 vm_flags = vma->vm_flags;
79243 } else if (vm_flags & VM_SHARED) {
79244 + if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP)))
79245 + goto free_vma;
79246 error = shmem_zero_setup(vma);
79247 if (error)
79248 goto free_vma;
79249 @@ -1218,6 +1365,11 @@ munmap_back:
79250 vma_link(mm, vma, prev, rb_link, rb_parent);
79251 file = vma->vm_file;
79252
79253 +#ifdef CONFIG_PAX_SEGMEXEC
79254 + if (vma_m)
79255 + pax_mirror_vma(vma_m, vma);
79256 +#endif
79257 +
79258 /* Once vma denies write, undo our temporary denial count */
79259 if (correct_wcount)
79260 atomic_inc(&inode->i_writecount);
79261 @@ -1226,6 +1378,7 @@ out:
79262
79263 mm->total_vm += len >> PAGE_SHIFT;
79264 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
79265 + track_exec_limit(mm, addr, addr + len, vm_flags);
79266 if (vm_flags & VM_LOCKED) {
79267 /*
79268 * makes pages present; downgrades, drops, reacquires mmap_sem
79269 @@ -1248,6 +1401,12 @@ unmap_and_free_vma:
79270 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
79271 charged = 0;
79272 free_vma:
79273 +
79274 +#ifdef CONFIG_PAX_SEGMEXEC
79275 + if (vma_m)
79276 + kmem_cache_free(vm_area_cachep, vma_m);
79277 +#endif
79278 +
79279 kmem_cache_free(vm_area_cachep, vma);
79280 unacct_error:
79281 if (charged)
79282 @@ -1255,6 +1414,44 @@ unacct_error:
79283 return error;
79284 }
79285
79286 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
79287 +{
79288 + if (!vma) {
79289 +#ifdef CONFIG_STACK_GROWSUP
79290 + if (addr > sysctl_heap_stack_gap)
79291 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
79292 + else
79293 + vma = find_vma(current->mm, 0);
79294 + if (vma && (vma->vm_flags & VM_GROWSUP))
79295 + return false;
79296 +#endif
79297 + return true;
79298 + }
79299 +
79300 + if (addr + len > vma->vm_start)
79301 + return false;
79302 +
79303 + if (vma->vm_flags & VM_GROWSDOWN)
79304 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
79305 +#ifdef CONFIG_STACK_GROWSUP
79306 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
79307 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
79308 +#endif
79309 +
79310 + return true;
79311 +}
79312 +
79313 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
79314 +{
79315 + if (vma->vm_start < len)
79316 + return -ENOMEM;
79317 + if (!(vma->vm_flags & VM_GROWSDOWN))
79318 + return vma->vm_start - len;
79319 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
79320 + return vma->vm_start - len - sysctl_heap_stack_gap;
79321 + return -ENOMEM;
79322 +}
79323 +
79324 /* Get an address range which is currently unmapped.
79325 * For shmat() with addr=0.
79326 *
79327 @@ -1281,18 +1478,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
79328 if (flags & MAP_FIXED)
79329 return addr;
79330
79331 +#ifdef CONFIG_PAX_RANDMMAP
79332 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
79333 +#endif
79334 +
79335 if (addr) {
79336 addr = PAGE_ALIGN(addr);
79337 - vma = find_vma(mm, addr);
79338 - if (TASK_SIZE - len >= addr &&
79339 - (!vma || addr + len <= vma->vm_start))
79340 - return addr;
79341 + if (TASK_SIZE - len >= addr) {
79342 + vma = find_vma(mm, addr);
79343 + if (check_heap_stack_gap(vma, addr, len))
79344 + return addr;
79345 + }
79346 }
79347 if (len > mm->cached_hole_size) {
79348 - start_addr = addr = mm->free_area_cache;
79349 + start_addr = addr = mm->free_area_cache;
79350 } else {
79351 - start_addr = addr = TASK_UNMAPPED_BASE;
79352 - mm->cached_hole_size = 0;
79353 + start_addr = addr = mm->mmap_base;
79354 + mm->cached_hole_size = 0;
79355 }
79356
79357 full_search:
79358 @@ -1303,34 +1505,40 @@ full_search:
79359 * Start a new search - just in case we missed
79360 * some holes.
79361 */
79362 - if (start_addr != TASK_UNMAPPED_BASE) {
79363 - addr = TASK_UNMAPPED_BASE;
79364 - start_addr = addr;
79365 + if (start_addr != mm->mmap_base) {
79366 + start_addr = addr = mm->mmap_base;
79367 mm->cached_hole_size = 0;
79368 goto full_search;
79369 }
79370 return -ENOMEM;
79371 }
79372 - if (!vma || addr + len <= vma->vm_start) {
79373 - /*
79374 - * Remember the place where we stopped the search:
79375 - */
79376 - mm->free_area_cache = addr + len;
79377 - return addr;
79378 - }
79379 + if (check_heap_stack_gap(vma, addr, len))
79380 + break;
79381 if (addr + mm->cached_hole_size < vma->vm_start)
79382 mm->cached_hole_size = vma->vm_start - addr;
79383 addr = vma->vm_end;
79384 }
79385 +
79386 + /*
79387 + * Remember the place where we stopped the search:
79388 + */
79389 + mm->free_area_cache = addr + len;
79390 + return addr;
79391 }
79392 #endif
79393
79394 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
79395 {
79396 +
79397 +#ifdef CONFIG_PAX_SEGMEXEC
79398 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
79399 + return;
79400 +#endif
79401 +
79402 /*
79403 * Is this a new hole at the lowest possible address?
79404 */
79405 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
79406 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
79407 mm->free_area_cache = addr;
79408 mm->cached_hole_size = ~0UL;
79409 }
79410 @@ -1348,7 +1556,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
79411 {
79412 struct vm_area_struct *vma;
79413 struct mm_struct *mm = current->mm;
79414 - unsigned long addr = addr0;
79415 + unsigned long base = mm->mmap_base, addr = addr0;
79416
79417 /* requested length too big for entire address space */
79418 if (len > TASK_SIZE)
79419 @@ -1357,13 +1565,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
79420 if (flags & MAP_FIXED)
79421 return addr;
79422
79423 +#ifdef CONFIG_PAX_RANDMMAP
79424 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
79425 +#endif
79426 +
79427 /* requesting a specific address */
79428 if (addr) {
79429 addr = PAGE_ALIGN(addr);
79430 - vma = find_vma(mm, addr);
79431 - if (TASK_SIZE - len >= addr &&
79432 - (!vma || addr + len <= vma->vm_start))
79433 - return addr;
79434 + if (TASK_SIZE - len >= addr) {
79435 + vma = find_vma(mm, addr);
79436 + if (check_heap_stack_gap(vma, addr, len))
79437 + return addr;
79438 + }
79439 }
79440
79441 /* check if free_area_cache is useful for us */
79442 @@ -1378,7 +1591,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
79443 /* make sure it can fit in the remaining address space */
79444 if (addr > len) {
79445 vma = find_vma(mm, addr-len);
79446 - if (!vma || addr <= vma->vm_start)
79447 + if (check_heap_stack_gap(vma, addr - len, len))
79448 /* remember the address as a hint for next time */
79449 return (mm->free_area_cache = addr-len);
79450 }
79451 @@ -1395,7 +1608,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
79452 * return with success:
79453 */
79454 vma = find_vma(mm, addr);
79455 - if (!vma || addr+len <= vma->vm_start)
79456 + if (check_heap_stack_gap(vma, addr, len))
79457 /* remember the address as a hint for next time */
79458 return (mm->free_area_cache = addr);
79459
79460 @@ -1404,8 +1617,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
79461 mm->cached_hole_size = vma->vm_start - addr;
79462
79463 /* try just below the current vma->vm_start */
79464 - addr = vma->vm_start-len;
79465 - } while (len < vma->vm_start);
79466 + addr = skip_heap_stack_gap(vma, len);
79467 + } while (!IS_ERR_VALUE(addr));
79468
79469 bottomup:
79470 /*
79471 @@ -1414,13 +1627,21 @@ bottomup:
79472 * can happen with large stack limits and large mmap()
79473 * allocations.
79474 */
79475 + mm->mmap_base = TASK_UNMAPPED_BASE;
79476 +
79477 +#ifdef CONFIG_PAX_RANDMMAP
79478 + if (mm->pax_flags & MF_PAX_RANDMMAP)
79479 + mm->mmap_base += mm->delta_mmap;
79480 +#endif
79481 +
79482 + mm->free_area_cache = mm->mmap_base;
79483 mm->cached_hole_size = ~0UL;
79484 - mm->free_area_cache = TASK_UNMAPPED_BASE;
79485 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
79486 /*
79487 * Restore the topdown base:
79488 */
79489 - mm->free_area_cache = mm->mmap_base;
79490 + mm->mmap_base = base;
79491 + mm->free_area_cache = base;
79492 mm->cached_hole_size = ~0UL;
79493
79494 return addr;
79495 @@ -1429,6 +1650,12 @@ bottomup:
79496
79497 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
79498 {
79499 +
79500 +#ifdef CONFIG_PAX_SEGMEXEC
79501 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
79502 + return;
79503 +#endif
79504 +
79505 /*
79506 * Is this a new hole at the highest possible address?
79507 */
79508 @@ -1436,8 +1663,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
79509 mm->free_area_cache = addr;
79510
79511 /* dont allow allocations above current base */
79512 - if (mm->free_area_cache > mm->mmap_base)
79513 + if (mm->free_area_cache > mm->mmap_base) {
79514 mm->free_area_cache = mm->mmap_base;
79515 + mm->cached_hole_size = ~0UL;
79516 + }
79517 }
79518
79519 unsigned long
79520 @@ -1510,40 +1739,49 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
79521
79522 EXPORT_SYMBOL(find_vma);
79523
79524 -/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
79525 +/*
79526 + * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
79527 + */
79528 struct vm_area_struct *
79529 find_vma_prev(struct mm_struct *mm, unsigned long addr,
79530 struct vm_area_struct **pprev)
79531 {
79532 - struct vm_area_struct *vma = NULL, *prev = NULL;
79533 - struct rb_node *rb_node;
79534 - if (!mm)
79535 - goto out;
79536 -
79537 - /* Guard against addr being lower than the first VMA */
79538 - vma = mm->mmap;
79539 -
79540 - /* Go through the RB tree quickly. */
79541 - rb_node = mm->mm_rb.rb_node;
79542 -
79543 - while (rb_node) {
79544 - struct vm_area_struct *vma_tmp;
79545 - vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
79546 -
79547 - if (addr < vma_tmp->vm_end) {
79548 - rb_node = rb_node->rb_left;
79549 - } else {
79550 - prev = vma_tmp;
79551 - if (!prev->vm_next || (addr < prev->vm_next->vm_end))
79552 - break;
79553 + struct vm_area_struct *vma;
79554 +
79555 + vma = find_vma(mm, addr);
79556 + if (vma) {
79557 + *pprev = vma->vm_prev;
79558 + } else {
79559 + struct rb_node *rb_node = mm->mm_rb.rb_node;
79560 + *pprev = NULL;
79561 + while (rb_node) {
79562 + *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
79563 rb_node = rb_node->rb_right;
79564 }
79565 }
79566 + return vma;
79567 +}
79568 +
79569 +#ifdef CONFIG_PAX_SEGMEXEC
79570 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
79571 +{
79572 + struct vm_area_struct *vma_m;
79573
79574 -out:
79575 - *pprev = prev;
79576 - return prev ? prev->vm_next : vma;
79577 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
79578 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
79579 + BUG_ON(vma->vm_mirror);
79580 + return NULL;
79581 + }
79582 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
79583 + vma_m = vma->vm_mirror;
79584 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
79585 + BUG_ON(vma->vm_file != vma_m->vm_file);
79586 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
79587 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
79588 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
79589 + return vma_m;
79590 }
79591 +#endif
79592
79593 /*
79594 * Verify that the stack growth is acceptable and
79595 @@ -1561,6 +1799,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
79596 return -ENOMEM;
79597
79598 /* Stack limit test */
79599 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
79600 if (size > rlim[RLIMIT_STACK].rlim_cur)
79601 return -ENOMEM;
79602
79603 @@ -1570,6 +1809,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
79604 unsigned long limit;
79605 locked = mm->locked_vm + grow;
79606 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
79607 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
79608 if (locked > limit && !capable(CAP_IPC_LOCK))
79609 return -ENOMEM;
79610 }
79611 @@ -1600,37 +1840,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
79612 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
79613 * vma is the last one with address > vma->vm_end. Have to extend vma.
79614 */
79615 +#ifndef CONFIG_IA64
79616 +static
79617 +#endif
79618 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
79619 {
79620 int error;
79621 + bool locknext;
79622
79623 if (!(vma->vm_flags & VM_GROWSUP))
79624 return -EFAULT;
79625
79626 + /* Also guard against wrapping around to address 0. */
79627 + if (address < PAGE_ALIGN(address+1))
79628 + address = PAGE_ALIGN(address+1);
79629 + else
79630 + return -ENOMEM;
79631 +
79632 /*
79633 * We must make sure the anon_vma is allocated
79634 * so that the anon_vma locking is not a noop.
79635 */
79636 if (unlikely(anon_vma_prepare(vma)))
79637 return -ENOMEM;
79638 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
79639 + if (locknext && anon_vma_prepare(vma->vm_next))
79640 + return -ENOMEM;
79641 anon_vma_lock(vma);
79642 + if (locknext)
79643 + anon_vma_lock(vma->vm_next);
79644
79645 /*
79646 * vma->vm_start/vm_end cannot change under us because the caller
79647 * is required to hold the mmap_sem in read mode. We need the
79648 - * anon_vma lock to serialize against concurrent expand_stacks.
79649 - * Also guard against wrapping around to address 0.
79650 + * anon_vma locks to serialize against concurrent expand_stacks
79651 + * and expand_upwards.
79652 */
79653 - if (address < PAGE_ALIGN(address+4))
79654 - address = PAGE_ALIGN(address+4);
79655 - else {
79656 - anon_vma_unlock(vma);
79657 - return -ENOMEM;
79658 - }
79659 error = 0;
79660
79661 /* Somebody else might have raced and expanded it already */
79662 - if (address > vma->vm_end) {
79663 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
79664 + error = -ENOMEM;
79665 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
79666 unsigned long size, grow;
79667
79668 size = address - vma->vm_start;
79669 @@ -1643,6 +1894,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
79670 vma->vm_end = address;
79671 }
79672 }
79673 + if (locknext)
79674 + anon_vma_unlock(vma->vm_next);
79675 anon_vma_unlock(vma);
79676 return error;
79677 }
79678 @@ -1655,6 +1908,8 @@ static int expand_downwards(struct vm_area_struct *vma,
79679 unsigned long address)
79680 {
79681 int error;
79682 + bool lockprev = false;
79683 + struct vm_area_struct *prev;
79684
79685 /*
79686 * We must make sure the anon_vma is allocated
79687 @@ -1668,6 +1923,15 @@ static int expand_downwards(struct vm_area_struct *vma,
79688 if (error)
79689 return error;
79690
79691 + prev = vma->vm_prev;
79692 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
79693 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
79694 +#endif
79695 + if (lockprev && anon_vma_prepare(prev))
79696 + return -ENOMEM;
79697 + if (lockprev)
79698 + anon_vma_lock(prev);
79699 +
79700 anon_vma_lock(vma);
79701
79702 /*
79703 @@ -1677,9 +1941,17 @@ static int expand_downwards(struct vm_area_struct *vma,
79704 */
79705
79706 /* Somebody else might have raced and expanded it already */
79707 - if (address < vma->vm_start) {
79708 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
79709 + error = -ENOMEM;
79710 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
79711 unsigned long size, grow;
79712
79713 +#ifdef CONFIG_PAX_SEGMEXEC
79714 + struct vm_area_struct *vma_m;
79715 +
79716 + vma_m = pax_find_mirror_vma(vma);
79717 +#endif
79718 +
79719 size = vma->vm_end - address;
79720 grow = (vma->vm_start - address) >> PAGE_SHIFT;
79721
79722 @@ -1689,10 +1961,22 @@ static int expand_downwards(struct vm_area_struct *vma,
79723 if (!error) {
79724 vma->vm_start = address;
79725 vma->vm_pgoff -= grow;
79726 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
79727 +
79728 +#ifdef CONFIG_PAX_SEGMEXEC
79729 + if (vma_m) {
79730 + vma_m->vm_start -= grow << PAGE_SHIFT;
79731 + vma_m->vm_pgoff -= grow;
79732 + }
79733 +#endif
79734 +
79735 +
79736 }
79737 }
79738 }
79739 anon_vma_unlock(vma);
79740 + if (lockprev)
79741 + anon_vma_unlock(prev);
79742 return error;
79743 }
79744
79745 @@ -1768,6 +2052,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
79746 do {
79747 long nrpages = vma_pages(vma);
79748
79749 +#ifdef CONFIG_PAX_SEGMEXEC
79750 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
79751 + vma = remove_vma(vma);
79752 + continue;
79753 + }
79754 +#endif
79755 +
79756 mm->total_vm -= nrpages;
79757 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
79758 vma = remove_vma(vma);
79759 @@ -1813,6 +2104,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
79760 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
79761 vma->vm_prev = NULL;
79762 do {
79763 +
79764 +#ifdef CONFIG_PAX_SEGMEXEC
79765 + if (vma->vm_mirror) {
79766 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
79767 + vma->vm_mirror->vm_mirror = NULL;
79768 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
79769 + vma->vm_mirror = NULL;
79770 + }
79771 +#endif
79772 +
79773 rb_erase(&vma->vm_rb, &mm->mm_rb);
79774 mm->map_count--;
79775 tail_vma = vma;
79776 @@ -1840,10 +2141,25 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
79777 struct mempolicy *pol;
79778 struct vm_area_struct *new;
79779
79780 +#ifdef CONFIG_PAX_SEGMEXEC
79781 + struct vm_area_struct *vma_m, *new_m = NULL;
79782 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
79783 +#endif
79784 +
79785 if (is_vm_hugetlb_page(vma) && (addr &
79786 ~(huge_page_mask(hstate_vma(vma)))))
79787 return -EINVAL;
79788
79789 +#ifdef CONFIG_PAX_SEGMEXEC
79790 + vma_m = pax_find_mirror_vma(vma);
79791 +
79792 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
79793 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
79794 + if (mm->map_count >= sysctl_max_map_count-1)
79795 + return -ENOMEM;
79796 + } else
79797 +#endif
79798 +
79799 if (mm->map_count >= sysctl_max_map_count)
79800 return -ENOMEM;
79801
79802 @@ -1851,6 +2167,16 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
79803 if (!new)
79804 return -ENOMEM;
79805
79806 +#ifdef CONFIG_PAX_SEGMEXEC
79807 + if (vma_m) {
79808 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
79809 + if (!new_m) {
79810 + kmem_cache_free(vm_area_cachep, new);
79811 + return -ENOMEM;
79812 + }
79813 + }
79814 +#endif
79815 +
79816 /* most fields are the same, copy all, and then fixup */
79817 *new = *vma;
79818
79819 @@ -1861,8 +2187,29 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
79820 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
79821 }
79822
79823 +#ifdef CONFIG_PAX_SEGMEXEC
79824 + if (vma_m) {
79825 + *new_m = *vma_m;
79826 + new_m->vm_mirror = new;
79827 + new->vm_mirror = new_m;
79828 +
79829 + if (new_below)
79830 + new_m->vm_end = addr_m;
79831 + else {
79832 + new_m->vm_start = addr_m;
79833 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
79834 + }
79835 + }
79836 +#endif
79837 +
79838 pol = mpol_dup(vma_policy(vma));
79839 if (IS_ERR(pol)) {
79840 +
79841 +#ifdef CONFIG_PAX_SEGMEXEC
79842 + if (new_m)
79843 + kmem_cache_free(vm_area_cachep, new_m);
79844 +#endif
79845 +
79846 kmem_cache_free(vm_area_cachep, new);
79847 return PTR_ERR(pol);
79848 }
79849 @@ -1883,6 +2230,28 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
79850 else
79851 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
79852
79853 +#ifdef CONFIG_PAX_SEGMEXEC
79854 + if (vma_m) {
79855 + mpol_get(pol);
79856 + vma_set_policy(new_m, pol);
79857 +
79858 + if (new_m->vm_file) {
79859 + get_file(new_m->vm_file);
79860 + if (vma_m->vm_flags & VM_EXECUTABLE)
79861 + added_exe_file_vma(mm);
79862 + }
79863 +
79864 + if (new_m->vm_ops && new_m->vm_ops->open)
79865 + new_m->vm_ops->open(new_m);
79866 +
79867 + if (new_below)
79868 + vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
79869 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
79870 + else
79871 + vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
79872 + }
79873 +#endif
79874 +
79875 return 0;
79876 }
79877
79878 @@ -1891,11 +2260,30 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
79879 * work. This now handles partial unmappings.
79880 * Jeremy Fitzhardinge <jeremy@goop.org>
79881 */
79882 +#ifdef CONFIG_PAX_SEGMEXEC
79883 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
79884 {
79885 + int ret = __do_munmap(mm, start, len);
79886 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
79887 + return ret;
79888 +
79889 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
79890 +}
79891 +
79892 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
79893 +#else
79894 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
79895 +#endif
79896 +{
79897 unsigned long end;
79898 struct vm_area_struct *vma, *prev, *last;
79899
79900 + /*
79901 + * mm->mmap_sem is required to protect against another thread
79902 + * changing the mappings in case we sleep.
79903 + */
79904 + verify_mm_writelocked(mm);
79905 +
79906 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
79907 return -EINVAL;
79908
79909 @@ -1959,6 +2347,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
79910 /* Fix up all other VM information */
79911 remove_vma_list(mm, vma);
79912
79913 + track_exec_limit(mm, start, end, 0UL);
79914 +
79915 return 0;
79916 }
79917
79918 @@ -1971,22 +2361,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
79919
79920 profile_munmap(addr);
79921
79922 +#ifdef CONFIG_PAX_SEGMEXEC
79923 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
79924 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
79925 + return -EINVAL;
79926 +#endif
79927 +
79928 down_write(&mm->mmap_sem);
79929 ret = do_munmap(mm, addr, len);
79930 up_write(&mm->mmap_sem);
79931 return ret;
79932 }
79933
79934 -static inline void verify_mm_writelocked(struct mm_struct *mm)
79935 -{
79936 -#ifdef CONFIG_DEBUG_VM
79937 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
79938 - WARN_ON(1);
79939 - up_read(&mm->mmap_sem);
79940 - }
79941 -#endif
79942 -}
79943 -
79944 /*
79945 * this is really a simplified "do_mmap". it only handles
79946 * anonymous maps. eventually we may be able to do some
79947 @@ -2000,6 +2386,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79948 struct rb_node ** rb_link, * rb_parent;
79949 pgoff_t pgoff = addr >> PAGE_SHIFT;
79950 int error;
79951 + unsigned long charged;
79952
79953 len = PAGE_ALIGN(len);
79954 if (!len)
79955 @@ -2011,16 +2398,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79956
79957 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
79958
79959 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
79960 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
79961 + flags &= ~VM_EXEC;
79962 +
79963 +#ifdef CONFIG_PAX_MPROTECT
79964 + if (mm->pax_flags & MF_PAX_MPROTECT)
79965 + flags &= ~VM_MAYEXEC;
79966 +#endif
79967 +
79968 + }
79969 +#endif
79970 +
79971 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
79972 if (error & ~PAGE_MASK)
79973 return error;
79974
79975 + charged = len >> PAGE_SHIFT;
79976 +
79977 /*
79978 * mlock MCL_FUTURE?
79979 */
79980 if (mm->def_flags & VM_LOCKED) {
79981 unsigned long locked, lock_limit;
79982 - locked = len >> PAGE_SHIFT;
79983 + locked = charged;
79984 locked += mm->locked_vm;
79985 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
79986 lock_limit >>= PAGE_SHIFT;
79987 @@ -2037,22 +2438,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79988 /*
79989 * Clear old maps. this also does some error checking for us
79990 */
79991 - munmap_back:
79992 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
79993 if (vma && vma->vm_start < addr + len) {
79994 if (do_munmap(mm, addr, len))
79995 return -ENOMEM;
79996 - goto munmap_back;
79997 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
79998 + BUG_ON(vma && vma->vm_start < addr + len);
79999 }
80000
80001 /* Check against address space limits *after* clearing old maps... */
80002 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
80003 + if (!may_expand_vm(mm, charged))
80004 return -ENOMEM;
80005
80006 if (mm->map_count > sysctl_max_map_count)
80007 return -ENOMEM;
80008
80009 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
80010 + if (security_vm_enough_memory(charged))
80011 return -ENOMEM;
80012
80013 /* Can we just expand an old private anonymous mapping? */
80014 @@ -2066,7 +2467,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
80015 */
80016 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
80017 if (!vma) {
80018 - vm_unacct_memory(len >> PAGE_SHIFT);
80019 + vm_unacct_memory(charged);
80020 return -ENOMEM;
80021 }
80022
80023 @@ -2078,11 +2479,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
80024 vma->vm_page_prot = vm_get_page_prot(flags);
80025 vma_link(mm, vma, prev, rb_link, rb_parent);
80026 out:
80027 - mm->total_vm += len >> PAGE_SHIFT;
80028 + mm->total_vm += charged;
80029 if (flags & VM_LOCKED) {
80030 if (!mlock_vma_pages_range(vma, addr, addr + len))
80031 - mm->locked_vm += (len >> PAGE_SHIFT);
80032 + mm->locked_vm += charged;
80033 }
80034 + track_exec_limit(mm, addr, addr + len, flags);
80035 return addr;
80036 }
80037
80038 @@ -2129,8 +2531,10 @@ void exit_mmap(struct mm_struct *mm)
80039 * Walk the list again, actually closing and freeing it,
80040 * with preemption enabled, without holding any MM locks.
80041 */
80042 - while (vma)
80043 + while (vma) {
80044 + vma->vm_mirror = NULL;
80045 vma = remove_vma(vma);
80046 + }
80047
80048 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
80049 }
80050 @@ -2144,6 +2548,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
80051 struct vm_area_struct * __vma, * prev;
80052 struct rb_node ** rb_link, * rb_parent;
80053
80054 +#ifdef CONFIG_PAX_SEGMEXEC
80055 + struct vm_area_struct *vma_m = NULL;
80056 +#endif
80057 +
80058 /*
80059 * The vm_pgoff of a purely anonymous vma should be irrelevant
80060 * until its first write fault, when page's anon_vma and index
80061 @@ -2166,7 +2574,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
80062 if ((vma->vm_flags & VM_ACCOUNT) &&
80063 security_vm_enough_memory_mm(mm, vma_pages(vma)))
80064 return -ENOMEM;
80065 +
80066 +#ifdef CONFIG_PAX_SEGMEXEC
80067 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
80068 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
80069 + if (!vma_m)
80070 + return -ENOMEM;
80071 + }
80072 +#endif
80073 +
80074 vma_link(mm, vma, prev, rb_link, rb_parent);
80075 +
80076 +#ifdef CONFIG_PAX_SEGMEXEC
80077 + if (vma_m)
80078 + pax_mirror_vma(vma_m, vma);
80079 +#endif
80080 +
80081 return 0;
80082 }
80083
80084 @@ -2184,6 +2607,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
80085 struct rb_node **rb_link, *rb_parent;
80086 struct mempolicy *pol;
80087
80088 + BUG_ON(vma->vm_mirror);
80089 +
80090 /*
80091 * If anonymous vma has not yet been faulted, update new pgoff
80092 * to match new location, to increase its chance of merging.
80093 @@ -2227,6 +2652,35 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
80094 return new_vma;
80095 }
80096
80097 +#ifdef CONFIG_PAX_SEGMEXEC
80098 +void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
80099 +{
80100 + struct vm_area_struct *prev_m;
80101 + struct rb_node **rb_link_m, *rb_parent_m;
80102 + struct mempolicy *pol_m;
80103 +
80104 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
80105 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
80106 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
80107 + *vma_m = *vma;
80108 + pol_m = vma_policy(vma_m);
80109 + mpol_get(pol_m);
80110 + vma_set_policy(vma_m, pol_m);
80111 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
80112 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
80113 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
80114 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
80115 + if (vma_m->vm_file)
80116 + get_file(vma_m->vm_file);
80117 + if (vma_m->vm_ops && vma_m->vm_ops->open)
80118 + vma_m->vm_ops->open(vma_m);
80119 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
80120 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
80121 + vma_m->vm_mirror = vma;
80122 + vma->vm_mirror = vma_m;
80123 +}
80124 +#endif
80125 +
80126 /*
80127 * Return true if the calling process may expand its vm space by the passed
80128 * number of pages
80129 @@ -2237,7 +2691,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
80130 unsigned long lim;
80131
80132 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
80133 -
80134 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
80135 if (cur + npages > lim)
80136 return 0;
80137 return 1;
80138 @@ -2307,6 +2761,22 @@ int install_special_mapping(struct mm_struct *mm,
80139 vma->vm_start = addr;
80140 vma->vm_end = addr + len;
80141
80142 +#ifdef CONFIG_PAX_MPROTECT
80143 + if (mm->pax_flags & MF_PAX_MPROTECT) {
80144 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
80145 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
80146 + return -EPERM;
80147 + if (!(vm_flags & VM_EXEC))
80148 + vm_flags &= ~VM_MAYEXEC;
80149 +#else
80150 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
80151 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
80152 +#endif
80153 + else
80154 + vm_flags &= ~VM_MAYWRITE;
80155 + }
80156 +#endif
80157 +
80158 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
80159 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
80160
80161 diff --git a/mm/mprotect.c b/mm/mprotect.c
80162 index 1737c7e..c7faeb4 100644
80163 --- a/mm/mprotect.c
80164 +++ b/mm/mprotect.c
80165 @@ -24,10 +24,16 @@
80166 #include <linux/mmu_notifier.h>
80167 #include <linux/migrate.h>
80168 #include <linux/perf_event.h>
80169 +
80170 +#ifdef CONFIG_PAX_MPROTECT
80171 +#include <linux/elf.h>
80172 +#endif
80173 +
80174 #include <asm/uaccess.h>
80175 #include <asm/pgtable.h>
80176 #include <asm/cacheflush.h>
80177 #include <asm/tlbflush.h>
80178 +#include <asm/mmu_context.h>
80179
80180 #ifndef pgprot_modify
80181 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
80182 @@ -132,6 +138,48 @@ static void change_protection(struct vm_area_struct *vma,
80183 flush_tlb_range(vma, start, end);
80184 }
80185
80186 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
80187 +/* called while holding the mmap semaphor for writing except stack expansion */
80188 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
80189 +{
80190 + unsigned long oldlimit, newlimit = 0UL;
80191 +
80192 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
80193 + return;
80194 +
80195 + spin_lock(&mm->page_table_lock);
80196 + oldlimit = mm->context.user_cs_limit;
80197 + if ((prot & VM_EXEC) && oldlimit < end)
80198 + /* USER_CS limit moved up */
80199 + newlimit = end;
80200 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
80201 + /* USER_CS limit moved down */
80202 + newlimit = start;
80203 +
80204 + if (newlimit) {
80205 + mm->context.user_cs_limit = newlimit;
80206 +
80207 +#ifdef CONFIG_SMP
80208 + wmb();
80209 + cpus_clear(mm->context.cpu_user_cs_mask);
80210 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
80211 +#endif
80212 +
80213 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
80214 + }
80215 + spin_unlock(&mm->page_table_lock);
80216 + if (newlimit == end) {
80217 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
80218 +
80219 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
80220 + if (is_vm_hugetlb_page(vma))
80221 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
80222 + else
80223 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
80224 + }
80225 +}
80226 +#endif
80227 +
80228 int
80229 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
80230 unsigned long start, unsigned long end, unsigned long newflags)
80231 @@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
80232 int error;
80233 int dirty_accountable = 0;
80234
80235 +#ifdef CONFIG_PAX_SEGMEXEC
80236 + struct vm_area_struct *vma_m = NULL;
80237 + unsigned long start_m, end_m;
80238 +
80239 + start_m = start + SEGMEXEC_TASK_SIZE;
80240 + end_m = end + SEGMEXEC_TASK_SIZE;
80241 +#endif
80242 +
80243 if (newflags == oldflags) {
80244 *pprev = vma;
80245 return 0;
80246 }
80247
80248 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
80249 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
80250 +
80251 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
80252 + return -ENOMEM;
80253 +
80254 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
80255 + return -ENOMEM;
80256 + }
80257 +
80258 /*
80259 * If we make a private mapping writable we increase our commit;
80260 * but (without finer accounting) cannot reduce our commit if we
80261 @@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
80262 }
80263 }
80264
80265 +#ifdef CONFIG_PAX_SEGMEXEC
80266 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
80267 + if (start != vma->vm_start) {
80268 + error = split_vma(mm, vma, start, 1);
80269 + if (error)
80270 + goto fail;
80271 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
80272 + *pprev = (*pprev)->vm_next;
80273 + }
80274 +
80275 + if (end != vma->vm_end) {
80276 + error = split_vma(mm, vma, end, 0);
80277 + if (error)
80278 + goto fail;
80279 + }
80280 +
80281 + if (pax_find_mirror_vma(vma)) {
80282 + error = __do_munmap(mm, start_m, end_m - start_m);
80283 + if (error)
80284 + goto fail;
80285 + } else {
80286 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
80287 + if (!vma_m) {
80288 + error = -ENOMEM;
80289 + goto fail;
80290 + }
80291 + vma->vm_flags = newflags;
80292 + pax_mirror_vma(vma_m, vma);
80293 + }
80294 + }
80295 +#endif
80296 +
80297 /*
80298 * First try to merge with previous and/or next vma.
80299 */
80300 @@ -195,9 +293,21 @@ success:
80301 * vm_flags and vm_page_prot are protected by the mmap_sem
80302 * held in write mode.
80303 */
80304 +
80305 +#ifdef CONFIG_PAX_SEGMEXEC
80306 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
80307 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
80308 +#endif
80309 +
80310 vma->vm_flags = newflags;
80311 +
80312 +#ifdef CONFIG_PAX_MPROTECT
80313 + if (mm->binfmt && mm->binfmt->handle_mprotect)
80314 + mm->binfmt->handle_mprotect(vma, newflags);
80315 +#endif
80316 +
80317 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
80318 - vm_get_page_prot(newflags));
80319 + vm_get_page_prot(vma->vm_flags));
80320
80321 if (vma_wants_writenotify(vma)) {
80322 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
80323 @@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
80324 end = start + len;
80325 if (end <= start)
80326 return -ENOMEM;
80327 +
80328 +#ifdef CONFIG_PAX_SEGMEXEC
80329 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
80330 + if (end > SEGMEXEC_TASK_SIZE)
80331 + return -EINVAL;
80332 + } else
80333 +#endif
80334 +
80335 + if (end > TASK_SIZE)
80336 + return -EINVAL;
80337 +
80338 if (!arch_validate_prot(prot))
80339 return -EINVAL;
80340
80341 @@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
80342 /*
80343 * Does the application expect PROT_READ to imply PROT_EXEC:
80344 */
80345 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
80346 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
80347 prot |= PROT_EXEC;
80348
80349 vm_flags = calc_vm_prot_bits(prot);
80350 @@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
80351 if (start > vma->vm_start)
80352 prev = vma;
80353
80354 +#ifdef CONFIG_PAX_MPROTECT
80355 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
80356 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
80357 +#endif
80358 +
80359 for (nstart = start ; ; ) {
80360 unsigned long newflags;
80361
80362 @@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
80363
80364 /* newflags >> 4 shift VM_MAY% in place of VM_% */
80365 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
80366 + if (prot & (PROT_WRITE | PROT_EXEC))
80367 + gr_log_rwxmprotect(vma->vm_file);
80368 +
80369 + error = -EACCES;
80370 + goto out;
80371 + }
80372 +
80373 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
80374 error = -EACCES;
80375 goto out;
80376 }
80377 @@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
80378 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
80379 if (error)
80380 goto out;
80381 +
80382 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
80383 +
80384 nstart = tmp;
80385
80386 if (nstart < prev->vm_end)
80387 diff --git a/mm/mremap.c b/mm/mremap.c
80388 index 3e98d79..1706cec 100644
80389 --- a/mm/mremap.c
80390 +++ b/mm/mremap.c
80391 @@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
80392 continue;
80393 pte = ptep_clear_flush(vma, old_addr, old_pte);
80394 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
80395 +
80396 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
80397 + if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
80398 + pte = pte_exprotect(pte);
80399 +#endif
80400 +
80401 set_pte_at(mm, new_addr, new_pte, pte);
80402 }
80403
80404 @@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
80405 if (is_vm_hugetlb_page(vma))
80406 goto Einval;
80407
80408 +#ifdef CONFIG_PAX_SEGMEXEC
80409 + if (pax_find_mirror_vma(vma))
80410 + goto Einval;
80411 +#endif
80412 +
80413 /* We can't remap across vm area boundaries */
80414 if (old_len > vma->vm_end - addr)
80415 goto Efault;
80416 @@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned long addr,
80417 unsigned long ret = -EINVAL;
80418 unsigned long charged = 0;
80419 unsigned long map_flags;
80420 + unsigned long pax_task_size = TASK_SIZE;
80421
80422 if (new_addr & ~PAGE_MASK)
80423 goto out;
80424
80425 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
80426 +#ifdef CONFIG_PAX_SEGMEXEC
80427 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
80428 + pax_task_size = SEGMEXEC_TASK_SIZE;
80429 +#endif
80430 +
80431 + pax_task_size -= PAGE_SIZE;
80432 +
80433 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
80434 goto out;
80435
80436 /* Check if the location we're moving into overlaps the
80437 * old location at all, and fail if it does.
80438 */
80439 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
80440 - goto out;
80441 -
80442 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
80443 + if (addr + old_len > new_addr && new_addr + new_len > addr)
80444 goto out;
80445
80446 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
80447 @@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long addr,
80448 struct vm_area_struct *vma;
80449 unsigned long ret = -EINVAL;
80450 unsigned long charged = 0;
80451 + unsigned long pax_task_size = TASK_SIZE;
80452
80453 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
80454 goto out;
80455 @@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long addr,
80456 if (!new_len)
80457 goto out;
80458
80459 +#ifdef CONFIG_PAX_SEGMEXEC
80460 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
80461 + pax_task_size = SEGMEXEC_TASK_SIZE;
80462 +#endif
80463 +
80464 + pax_task_size -= PAGE_SIZE;
80465 +
80466 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
80467 + old_len > pax_task_size || addr > pax_task_size-old_len)
80468 + goto out;
80469 +
80470 if (flags & MREMAP_FIXED) {
80471 if (flags & MREMAP_MAYMOVE)
80472 ret = mremap_to(addr, old_len, new_addr, new_len);
80473 @@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long addr,
80474 addr + new_len);
80475 }
80476 ret = addr;
80477 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
80478 goto out;
80479 }
80480 }
80481 @@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long addr,
80482 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
80483 if (ret)
80484 goto out;
80485 +
80486 + map_flags = vma->vm_flags;
80487 ret = move_vma(vma, addr, old_len, new_len, new_addr);
80488 + if (!(ret & ~PAGE_MASK)) {
80489 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
80490 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
80491 + }
80492 }
80493 out:
80494 if (ret & ~PAGE_MASK)
80495 diff --git a/mm/nommu.c b/mm/nommu.c
80496 index 406e8d4..53970d3 100644
80497 --- a/mm/nommu.c
80498 +++ b/mm/nommu.c
80499 @@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
80500 int sysctl_overcommit_ratio = 50; /* default is 50% */
80501 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
80502 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
80503 -int heap_stack_gap = 0;
80504
80505 atomic_long_t mmap_pages_allocated;
80506
80507 @@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
80508 EXPORT_SYMBOL(find_vma);
80509
80510 /*
80511 - * find a VMA
80512 - * - we don't extend stack VMAs under NOMMU conditions
80513 - */
80514 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
80515 -{
80516 - return find_vma(mm, addr);
80517 -}
80518 -
80519 -/*
80520 * expand a stack to a given address
80521 * - not supported under NOMMU conditions
80522 */
80523 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
80524 index 3ecab7e..594a471 100644
80525 --- a/mm/page_alloc.c
80526 +++ b/mm/page_alloc.c
80527 @@ -289,7 +289,7 @@ out:
80528 * This usage means that zero-order pages may not be compound.
80529 */
80530
80531 -static void free_compound_page(struct page *page)
80532 +void free_compound_page(struct page *page)
80533 {
80534 __free_pages_ok(page, compound_order(page));
80535 }
80536 @@ -587,6 +587,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
80537 int bad = 0;
80538 int wasMlocked = __TestClearPageMlocked(page);
80539
80540 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
80541 + unsigned long index = 1UL << order;
80542 +#endif
80543 +
80544 kmemcheck_free_shadow(page, order);
80545
80546 for (i = 0 ; i < (1 << order) ; ++i)
80547 @@ -599,6 +603,12 @@ static void __free_pages_ok(struct page *page, unsigned int order)
80548 debug_check_no_obj_freed(page_address(page),
80549 PAGE_SIZE << order);
80550 }
80551 +
80552 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
80553 + for (; index; --index)
80554 + sanitize_highpage(page + index - 1);
80555 +#endif
80556 +
80557 arch_free_page(page, order);
80558 kernel_map_pages(page, 1 << order, 0);
80559
80560 @@ -702,8 +712,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
80561 arch_alloc_page(page, order);
80562 kernel_map_pages(page, 1 << order, 1);
80563
80564 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
80565 if (gfp_flags & __GFP_ZERO)
80566 prep_zero_page(page, order, gfp_flags);
80567 +#endif
80568
80569 if (order && (gfp_flags & __GFP_COMP))
80570 prep_compound_page(page, order);
80571 @@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct page *page, int cold)
80572 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
80573 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
80574 }
80575 +
80576 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
80577 + sanitize_highpage(page);
80578 +#endif
80579 +
80580 arch_free_page(page, 0);
80581 kernel_map_pages(page, 1, 0);
80582
80583 @@ -2179,6 +2196,8 @@ void show_free_areas(void)
80584 int cpu;
80585 struct zone *zone;
80586
80587 + pax_track_stack();
80588 +
80589 for_each_populated_zone(zone) {
80590 show_node(zone);
80591 printk("%s per-cpu:\n", zone->name);
80592 @@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct pglist_data *pgdat,
80593 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
80594 }
80595 #else
80596 -static void inline setup_usemap(struct pglist_data *pgdat,
80597 +static inline void setup_usemap(struct pglist_data *pgdat,
80598 struct zone *zone, unsigned long zonesize) {}
80599 #endif /* CONFIG_SPARSEMEM */
80600
80601 diff --git a/mm/percpu.c b/mm/percpu.c
80602 index c90614a..5f7b7b8 100644
80603 --- a/mm/percpu.c
80604 +++ b/mm/percpu.c
80605 @@ -115,7 +115,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
80606 static unsigned int pcpu_high_unit_cpu __read_mostly;
80607
80608 /* the address of the first chunk which starts with the kernel static area */
80609 -void *pcpu_base_addr __read_mostly;
80610 +void *pcpu_base_addr __read_only;
80611 EXPORT_SYMBOL_GPL(pcpu_base_addr);
80612
80613 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
80614 diff --git a/mm/rmap.c b/mm/rmap.c
80615 index dd43373..d848cd7 100644
80616 --- a/mm/rmap.c
80617 +++ b/mm/rmap.c
80618 @@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_struct *vma)
80619 /* page_table_lock to protect against threads */
80620 spin_lock(&mm->page_table_lock);
80621 if (likely(!vma->anon_vma)) {
80622 +
80623 +#ifdef CONFIG_PAX_SEGMEXEC
80624 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
80625 +
80626 + if (vma_m) {
80627 + BUG_ON(vma_m->anon_vma);
80628 + vma_m->anon_vma = anon_vma;
80629 + list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
80630 + }
80631 +#endif
80632 +
80633 vma->anon_vma = anon_vma;
80634 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
80635 allocated = NULL;
80636 diff --git a/mm/shmem.c b/mm/shmem.c
80637 index 3e0005b..1d659a8 100644
80638 --- a/mm/shmem.c
80639 +++ b/mm/shmem.c
80640 @@ -31,7 +31,7 @@
80641 #include <linux/swap.h>
80642 #include <linux/ima.h>
80643
80644 -static struct vfsmount *shm_mnt;
80645 +struct vfsmount *shm_mnt;
80646
80647 #ifdef CONFIG_SHMEM
80648 /*
80649 @@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
80650 goto unlock;
80651 }
80652 entry = shmem_swp_entry(info, index, NULL);
80653 + if (!entry)
80654 + goto unlock;
80655 if (entry->val) {
80656 /*
80657 * The more uptodate page coming down from a stacked
80658 @@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
80659 struct vm_area_struct pvma;
80660 struct page *page;
80661
80662 + pax_track_stack();
80663 +
80664 spol = mpol_cond_copy(&mpol,
80665 mpol_shared_policy_lookup(&info->policy, idx));
80666
80667 @@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
80668
80669 info = SHMEM_I(inode);
80670 inode->i_size = len-1;
80671 - if (len <= (char *)inode - (char *)info) {
80672 + if (len <= (char *)inode - (char *)info && len <= 64) {
80673 /* do it inline */
80674 memcpy(info, symname, len);
80675 inode->i_op = &shmem_symlink_inline_operations;
80676 @@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
80677 int err = -ENOMEM;
80678
80679 /* Round up to L1_CACHE_BYTES to resist false sharing */
80680 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
80681 - L1_CACHE_BYTES), GFP_KERNEL);
80682 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
80683 if (!sbinfo)
80684 return -ENOMEM;
80685
80686 diff --git a/mm/slab.c b/mm/slab.c
80687 index c8d466a..909e01e 100644
80688 --- a/mm/slab.c
80689 +++ b/mm/slab.c
80690 @@ -174,7 +174,7 @@
80691
80692 /* Legal flag mask for kmem_cache_create(). */
80693 #if DEBUG
80694 -# define CREATE_MASK (SLAB_RED_ZONE | \
80695 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
80696 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
80697 SLAB_CACHE_DMA | \
80698 SLAB_STORE_USER | \
80699 @@ -182,7 +182,7 @@
80700 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
80701 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
80702 #else
80703 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
80704 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
80705 SLAB_CACHE_DMA | \
80706 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
80707 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
80708 @@ -308,7 +308,7 @@ struct kmem_list3 {
80709 * Need this for bootstrapping a per node allocator.
80710 */
80711 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
80712 -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
80713 +struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
80714 #define CACHE_CACHE 0
80715 #define SIZE_AC MAX_NUMNODES
80716 #define SIZE_L3 (2 * MAX_NUMNODES)
80717 @@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
80718 if ((x)->max_freeable < i) \
80719 (x)->max_freeable = i; \
80720 } while (0)
80721 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
80722 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
80723 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
80724 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
80725 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
80726 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
80727 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
80728 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
80729 #else
80730 #define STATS_INC_ACTIVE(x) do { } while (0)
80731 #define STATS_DEC_ACTIVE(x) do { } while (0)
80732 @@ -558,7 +558,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
80733 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
80734 */
80735 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
80736 - const struct slab *slab, void *obj)
80737 + const struct slab *slab, const void *obj)
80738 {
80739 u32 offset = (obj - slab->s_mem);
80740 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
80741 @@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
80742 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
80743 sizes[INDEX_AC].cs_size,
80744 ARCH_KMALLOC_MINALIGN,
80745 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
80746 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
80747 NULL);
80748
80749 if (INDEX_AC != INDEX_L3) {
80750 @@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
80751 kmem_cache_create(names[INDEX_L3].name,
80752 sizes[INDEX_L3].cs_size,
80753 ARCH_KMALLOC_MINALIGN,
80754 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
80755 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
80756 NULL);
80757 }
80758
80759 @@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
80760 sizes->cs_cachep = kmem_cache_create(names->name,
80761 sizes->cs_size,
80762 ARCH_KMALLOC_MINALIGN,
80763 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
80764 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
80765 NULL);
80766 }
80767 #ifdef CONFIG_ZONE_DMA
80768 @@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, void *p)
80769 }
80770 /* cpu stats */
80771 {
80772 - unsigned long allochit = atomic_read(&cachep->allochit);
80773 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
80774 - unsigned long freehit = atomic_read(&cachep->freehit);
80775 - unsigned long freemiss = atomic_read(&cachep->freemiss);
80776 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
80777 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
80778 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
80779 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
80780
80781 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
80782 allochit, allocmiss, freehit, freemiss);
80783 @@ -4471,15 +4471,70 @@ static const struct file_operations proc_slabstats_operations = {
80784
80785 static int __init slab_proc_init(void)
80786 {
80787 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
80788 + mode_t gr_mode = S_IRUGO;
80789 +
80790 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
80791 + gr_mode = S_IRUSR;
80792 +#endif
80793 +
80794 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
80795 #ifdef CONFIG_DEBUG_SLAB_LEAK
80796 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
80797 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
80798 #endif
80799 return 0;
80800 }
80801 module_init(slab_proc_init);
80802 #endif
80803
80804 +void check_object_size(const void *ptr, unsigned long n, bool to)
80805 +{
80806 +
80807 +#ifdef CONFIG_PAX_USERCOPY
80808 + struct page *page;
80809 + struct kmem_cache *cachep = NULL;
80810 + struct slab *slabp;
80811 + unsigned int objnr;
80812 + unsigned long offset;
80813 + const char *type;
80814 +
80815 + if (!n)
80816 + return;
80817 +
80818 + type = "<null>";
80819 + if (ZERO_OR_NULL_PTR(ptr))
80820 + goto report;
80821 +
80822 + if (!virt_addr_valid(ptr))
80823 + return;
80824 +
80825 + page = virt_to_head_page(ptr);
80826 +
80827 + type = "<process stack>";
80828 + if (!PageSlab(page)) {
80829 + if (object_is_on_stack(ptr, n) == -1)
80830 + goto report;
80831 + return;
80832 + }
80833 +
80834 + cachep = page_get_cache(page);
80835 + type = cachep->name;
80836 + if (!(cachep->flags & SLAB_USERCOPY))
80837 + goto report;
80838 +
80839 + slabp = page_get_slab(page);
80840 + objnr = obj_to_index(cachep, slabp, ptr);
80841 + BUG_ON(objnr >= cachep->num);
80842 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
80843 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
80844 + return;
80845 +
80846 +report:
80847 + pax_report_usercopy(ptr, n, to, type);
80848 +#endif
80849 +
80850 +}
80851 +EXPORT_SYMBOL(check_object_size);
80852 +
80853 /**
80854 * ksize - get the actual amount of memory allocated for a given object
80855 * @objp: Pointer to the object
80856 diff --git a/mm/slob.c b/mm/slob.c
80857 index 837ebd6..0bd23bc 100644
80858 --- a/mm/slob.c
80859 +++ b/mm/slob.c
80860 @@ -29,7 +29,7 @@
80861 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
80862 * alloc_pages() directly, allocating compound pages so the page order
80863 * does not have to be separately tracked, and also stores the exact
80864 - * allocation size in page->private so that it can be used to accurately
80865 + * allocation size in slob_page->size so that it can be used to accurately
80866 * provide ksize(). These objects are detected in kfree() because slob_page()
80867 * is false for them.
80868 *
80869 @@ -58,6 +58,7 @@
80870 */
80871
80872 #include <linux/kernel.h>
80873 +#include <linux/sched.h>
80874 #include <linux/slab.h>
80875 #include <linux/mm.h>
80876 #include <linux/swap.h> /* struct reclaim_state */
80877 @@ -100,7 +101,8 @@ struct slob_page {
80878 unsigned long flags; /* mandatory */
80879 atomic_t _count; /* mandatory */
80880 slobidx_t units; /* free units left in page */
80881 - unsigned long pad[2];
80882 + unsigned long pad[1];
80883 + unsigned long size; /* size when >=PAGE_SIZE */
80884 slob_t *free; /* first free slob_t in page */
80885 struct list_head list; /* linked list of free pages */
80886 };
80887 @@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
80888 */
80889 static inline int is_slob_page(struct slob_page *sp)
80890 {
80891 - return PageSlab((struct page *)sp);
80892 + return PageSlab((struct page *)sp) && !sp->size;
80893 }
80894
80895 static inline void set_slob_page(struct slob_page *sp)
80896 @@ -148,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp)
80897
80898 static inline struct slob_page *slob_page(const void *addr)
80899 {
80900 - return (struct slob_page *)virt_to_page(addr);
80901 + return (struct slob_page *)virt_to_head_page(addr);
80902 }
80903
80904 /*
80905 @@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
80906 /*
80907 * Return the size of a slob block.
80908 */
80909 -static slobidx_t slob_units(slob_t *s)
80910 +static slobidx_t slob_units(const slob_t *s)
80911 {
80912 if (s->units > 0)
80913 return s->units;
80914 @@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
80915 /*
80916 * Return the next free slob block pointer after this one.
80917 */
80918 -static slob_t *slob_next(slob_t *s)
80919 +static slob_t *slob_next(const slob_t *s)
80920 {
80921 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
80922 slobidx_t next;
80923 @@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
80924 /*
80925 * Returns true if s is the last free block in its page.
80926 */
80927 -static int slob_last(slob_t *s)
80928 +static int slob_last(const slob_t *s)
80929 {
80930 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
80931 }
80932 @@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
80933 if (!page)
80934 return NULL;
80935
80936 + set_slob_page(page);
80937 return page_address(page);
80938 }
80939
80940 @@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
80941 if (!b)
80942 return NULL;
80943 sp = slob_page(b);
80944 - set_slob_page(sp);
80945
80946 spin_lock_irqsave(&slob_lock, flags);
80947 sp->units = SLOB_UNITS(PAGE_SIZE);
80948 sp->free = b;
80949 + sp->size = 0;
80950 INIT_LIST_HEAD(&sp->list);
80951 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
80952 set_slob_page_free(sp, slob_list);
80953 @@ -475,10 +478,9 @@ out:
80954 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
80955 #endif
80956
80957 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
80958 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
80959 {
80960 - unsigned int *m;
80961 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
80962 + slob_t *m;
80963 void *ret;
80964
80965 lockdep_trace_alloc(gfp);
80966 @@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
80967
80968 if (!m)
80969 return NULL;
80970 - *m = size;
80971 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
80972 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
80973 + m[0].units = size;
80974 + m[1].units = align;
80975 ret = (void *)m + align;
80976
80977 trace_kmalloc_node(_RET_IP_, ret,
80978 @@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
80979
80980 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
80981 if (ret) {
80982 - struct page *page;
80983 - page = virt_to_page(ret);
80984 - page->private = size;
80985 + struct slob_page *sp;
80986 + sp = slob_page(ret);
80987 + sp->size = size;
80988 }
80989
80990 trace_kmalloc_node(_RET_IP_, ret,
80991 size, PAGE_SIZE << order, gfp, node);
80992 }
80993
80994 - kmemleak_alloc(ret, size, 1, gfp);
80995 + return ret;
80996 +}
80997 +
80998 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
80999 +{
81000 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
81001 + void *ret = __kmalloc_node_align(size, gfp, node, align);
81002 +
81003 + if (!ZERO_OR_NULL_PTR(ret))
81004 + kmemleak_alloc(ret, size, 1, gfp);
81005 return ret;
81006 }
81007 EXPORT_SYMBOL(__kmalloc_node);
81008 @@ -528,13 +542,92 @@ void kfree(const void *block)
81009 sp = slob_page(block);
81010 if (is_slob_page(sp)) {
81011 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
81012 - unsigned int *m = (unsigned int *)(block - align);
81013 - slob_free(m, *m + align);
81014 - } else
81015 + slob_t *m = (slob_t *)(block - align);
81016 + slob_free(m, m[0].units + align);
81017 + } else {
81018 + clear_slob_page(sp);
81019 + free_slob_page(sp);
81020 + sp->size = 0;
81021 put_page(&sp->page);
81022 + }
81023 }
81024 EXPORT_SYMBOL(kfree);
81025
81026 +void check_object_size(const void *ptr, unsigned long n, bool to)
81027 +{
81028 +
81029 +#ifdef CONFIG_PAX_USERCOPY
81030 + struct slob_page *sp;
81031 + const slob_t *free;
81032 + const void *base;
81033 + unsigned long flags;
81034 + const char *type;
81035 +
81036 + if (!n)
81037 + return;
81038 +
81039 + type = "<null>";
81040 + if (ZERO_OR_NULL_PTR(ptr))
81041 + goto report;
81042 +
81043 + if (!virt_addr_valid(ptr))
81044 + return;
81045 +
81046 + type = "<process stack>";
81047 + sp = slob_page(ptr);
81048 + if (!PageSlab((struct page *)sp)) {
81049 + if (object_is_on_stack(ptr, n) == -1)
81050 + goto report;
81051 + return;
81052 + }
81053 +
81054 + type = "<slob>";
81055 + if (sp->size) {
81056 + base = page_address(&sp->page);
81057 + if (base <= ptr && n <= sp->size - (ptr - base))
81058 + return;
81059 + goto report;
81060 + }
81061 +
81062 + /* some tricky double walking to find the chunk */
81063 + spin_lock_irqsave(&slob_lock, flags);
81064 + base = (void *)((unsigned long)ptr & PAGE_MASK);
81065 + free = sp->free;
81066 +
81067 + while (!slob_last(free) && (void *)free <= ptr) {
81068 + base = free + slob_units(free);
81069 + free = slob_next(free);
81070 + }
81071 +
81072 + while (base < (void *)free) {
81073 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
81074 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
81075 + int offset;
81076 +
81077 + if (ptr < base + align)
81078 + break;
81079 +
81080 + offset = ptr - base - align;
81081 + if (offset >= m) {
81082 + base += size;
81083 + continue;
81084 + }
81085 +
81086 + if (n > m - offset)
81087 + break;
81088 +
81089 + spin_unlock_irqrestore(&slob_lock, flags);
81090 + return;
81091 + }
81092 +
81093 + spin_unlock_irqrestore(&slob_lock, flags);
81094 +report:
81095 + pax_report_usercopy(ptr, n, to, type);
81096 +#endif
81097 +
81098 +}
81099 +EXPORT_SYMBOL(check_object_size);
81100 +
81101 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
81102 size_t ksize(const void *block)
81103 {
81104 @@ -547,10 +640,10 @@ size_t ksize(const void *block)
81105 sp = slob_page(block);
81106 if (is_slob_page(sp)) {
81107 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
81108 - unsigned int *m = (unsigned int *)(block - align);
81109 - return SLOB_UNITS(*m) * SLOB_UNIT;
81110 + slob_t *m = (slob_t *)(block - align);
81111 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
81112 } else
81113 - return sp->page.private;
81114 + return sp->size;
81115 }
81116 EXPORT_SYMBOL(ksize);
81117
81118 @@ -566,8 +659,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
81119 {
81120 struct kmem_cache *c;
81121
81122 +#ifdef CONFIG_PAX_USERCOPY
81123 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
81124 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
81125 +#else
81126 c = slob_alloc(sizeof(struct kmem_cache),
81127 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
81128 +#endif
81129
81130 if (c) {
81131 c->name = name;
81132 @@ -605,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
81133 {
81134 void *b;
81135
81136 +#ifdef CONFIG_PAX_USERCOPY
81137 + b = __kmalloc_node_align(c->size, flags, node, c->align);
81138 +#else
81139 if (c->size < PAGE_SIZE) {
81140 b = slob_alloc(c->size, flags, c->align, node);
81141 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
81142 SLOB_UNITS(c->size) * SLOB_UNIT,
81143 flags, node);
81144 } else {
81145 + struct slob_page *sp;
81146 +
81147 b = slob_new_pages(flags, get_order(c->size), node);
81148 + sp = slob_page(b);
81149 + sp->size = c->size;
81150 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
81151 PAGE_SIZE << get_order(c->size),
81152 flags, node);
81153 }
81154 +#endif
81155
81156 if (c->ctor)
81157 c->ctor(b);
81158 @@ -627,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
81159
81160 static void __kmem_cache_free(void *b, int size)
81161 {
81162 - if (size < PAGE_SIZE)
81163 + struct slob_page *sp = slob_page(b);
81164 +
81165 + if (is_slob_page(sp))
81166 slob_free(b, size);
81167 - else
81168 + else {
81169 + clear_slob_page(sp);
81170 + free_slob_page(sp);
81171 + sp->size = 0;
81172 slob_free_pages(b, get_order(size));
81173 + }
81174 }
81175
81176 static void kmem_rcu_free(struct rcu_head *head)
81177 @@ -643,18 +755,32 @@ static void kmem_rcu_free(struct rcu_head *head)
81178
81179 void kmem_cache_free(struct kmem_cache *c, void *b)
81180 {
81181 + int size = c->size;
81182 +
81183 +#ifdef CONFIG_PAX_USERCOPY
81184 + if (size + c->align < PAGE_SIZE) {
81185 + size += c->align;
81186 + b -= c->align;
81187 + }
81188 +#endif
81189 +
81190 kmemleak_free_recursive(b, c->flags);
81191 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
81192 struct slob_rcu *slob_rcu;
81193 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
81194 + slob_rcu = b + (size - sizeof(struct slob_rcu));
81195 INIT_RCU_HEAD(&slob_rcu->head);
81196 - slob_rcu->size = c->size;
81197 + slob_rcu->size = size;
81198 call_rcu(&slob_rcu->head, kmem_rcu_free);
81199 } else {
81200 - __kmem_cache_free(b, c->size);
81201 + __kmem_cache_free(b, size);
81202 }
81203
81204 +#ifdef CONFIG_PAX_USERCOPY
81205 + trace_kfree(_RET_IP_, b);
81206 +#else
81207 trace_kmem_cache_free(_RET_IP_, b);
81208 +#endif
81209 +
81210 }
81211 EXPORT_SYMBOL(kmem_cache_free);
81212
81213 diff --git a/mm/slub.c b/mm/slub.c
81214 index 4996fc7..87e01d0 100644
81215 --- a/mm/slub.c
81216 +++ b/mm/slub.c
81217 @@ -201,7 +201,7 @@ struct track {
81218
81219 enum track_item { TRACK_ALLOC, TRACK_FREE };
81220
81221 -#ifdef CONFIG_SLUB_DEBUG
81222 +#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
81223 static int sysfs_slab_add(struct kmem_cache *);
81224 static int sysfs_slab_alias(struct kmem_cache *, const char *);
81225 static void sysfs_slab_remove(struct kmem_cache *);
81226 @@ -410,7 +410,7 @@ static void print_track(const char *s, struct track *t)
81227 if (!t->addr)
81228 return;
81229
81230 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
81231 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
81232 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
81233 }
81234
81235 @@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
81236
81237 page = virt_to_head_page(x);
81238
81239 + BUG_ON(!PageSlab(page));
81240 +
81241 slab_free(s, page, x, _RET_IP_);
81242
81243 trace_kmem_cache_free(_RET_IP_, x);
81244 @@ -1937,7 +1939,7 @@ static int slub_min_objects;
81245 * Merge control. If this is set then no merging of slab caches will occur.
81246 * (Could be removed. This was introduced to pacify the merge skeptics.)
81247 */
81248 -static int slub_nomerge;
81249 +static int slub_nomerge = 1;
81250
81251 /*
81252 * Calculate the order of allocation given an slab object size.
81253 @@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
81254 * list to avoid pounding the page allocator excessively.
81255 */
81256 set_min_partial(s, ilog2(s->size));
81257 - s->refcount = 1;
81258 + atomic_set(&s->refcount, 1);
81259 #ifdef CONFIG_NUMA
81260 s->remote_node_defrag_ratio = 1000;
81261 #endif
81262 @@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
81263 void kmem_cache_destroy(struct kmem_cache *s)
81264 {
81265 down_write(&slub_lock);
81266 - s->refcount--;
81267 - if (!s->refcount) {
81268 + if (atomic_dec_and_test(&s->refcount)) {
81269 list_del(&s->list);
81270 up_write(&slub_lock);
81271 if (kmem_cache_close(s)) {
81272 @@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(char *str)
81273 __setup("slub_nomerge", setup_slub_nomerge);
81274
81275 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
81276 - const char *name, int size, gfp_t gfp_flags)
81277 + const char *name, int size, gfp_t gfp_flags, unsigned int flags)
81278 {
81279 - unsigned int flags = 0;
81280 -
81281 if (gfp_flags & SLUB_DMA)
81282 - flags = SLAB_CACHE_DMA;
81283 + flags |= SLAB_CACHE_DMA;
81284
81285 /*
81286 * This function is called with IRQs disabled during early-boot on
81287 @@ -2915,6 +2914,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
81288 EXPORT_SYMBOL(__kmalloc_node);
81289 #endif
81290
81291 +void check_object_size(const void *ptr, unsigned long n, bool to)
81292 +{
81293 +
81294 +#ifdef CONFIG_PAX_USERCOPY
81295 + struct page *page;
81296 + struct kmem_cache *s = NULL;
81297 + unsigned long offset;
81298 + const char *type;
81299 +
81300 + if (!n)
81301 + return;
81302 +
81303 + type = "<null>";
81304 + if (ZERO_OR_NULL_PTR(ptr))
81305 + goto report;
81306 +
81307 + if (!virt_addr_valid(ptr))
81308 + return;
81309 +
81310 + page = get_object_page(ptr);
81311 +
81312 + type = "<process stack>";
81313 + if (!page) {
81314 + if (object_is_on_stack(ptr, n) == -1)
81315 + goto report;
81316 + return;
81317 + }
81318 +
81319 + s = page->slab;
81320 + type = s->name;
81321 + if (!(s->flags & SLAB_USERCOPY))
81322 + goto report;
81323 +
81324 + offset = (ptr - page_address(page)) % s->size;
81325 + if (offset <= s->objsize && n <= s->objsize - offset)
81326 + return;
81327 +
81328 +report:
81329 + pax_report_usercopy(ptr, n, to, type);
81330 +#endif
81331 +
81332 +}
81333 +EXPORT_SYMBOL(check_object_size);
81334 +
81335 size_t ksize(const void *object)
81336 {
81337 struct page *page;
81338 @@ -3185,8 +3228,8 @@ void __init kmem_cache_init(void)
81339 * kmem_cache_open for slab_state == DOWN.
81340 */
81341 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
81342 - sizeof(struct kmem_cache_node), GFP_NOWAIT);
81343 - kmalloc_caches[0].refcount = -1;
81344 + sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
81345 + atomic_set(&kmalloc_caches[0].refcount, -1);
81346 caches++;
81347
81348 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
81349 @@ -3198,18 +3241,18 @@ void __init kmem_cache_init(void)
81350 /* Caches that are not of the two-to-the-power-of size */
81351 if (KMALLOC_MIN_SIZE <= 32) {
81352 create_kmalloc_cache(&kmalloc_caches[1],
81353 - "kmalloc-96", 96, GFP_NOWAIT);
81354 + "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
81355 caches++;
81356 }
81357 if (KMALLOC_MIN_SIZE <= 64) {
81358 create_kmalloc_cache(&kmalloc_caches[2],
81359 - "kmalloc-192", 192, GFP_NOWAIT);
81360 + "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
81361 caches++;
81362 }
81363
81364 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
81365 create_kmalloc_cache(&kmalloc_caches[i],
81366 - "kmalloc", 1 << i, GFP_NOWAIT);
81367 + "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
81368 caches++;
81369 }
81370
81371 @@ -3293,7 +3336,7 @@ static int slab_unmergeable(struct kmem_cache *s)
81372 /*
81373 * We may have set a slab to be unmergeable during bootstrap.
81374 */
81375 - if (s->refcount < 0)
81376 + if (atomic_read(&s->refcount) < 0)
81377 return 1;
81378
81379 return 0;
81380 @@ -3353,7 +3396,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
81381 if (s) {
81382 int cpu;
81383
81384 - s->refcount++;
81385 + atomic_inc(&s->refcount);
81386 /*
81387 * Adjust the object sizes so that we clear
81388 * the complete object on kzalloc.
81389 @@ -3372,7 +3415,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
81390
81391 if (sysfs_slab_alias(s, name)) {
81392 down_write(&slub_lock);
81393 - s->refcount--;
81394 + atomic_dec(&s->refcount);
81395 up_write(&slub_lock);
81396 goto err;
81397 }
81398 @@ -4101,7 +4144,7 @@ SLAB_ATTR_RO(ctor);
81399
81400 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
81401 {
81402 - return sprintf(buf, "%d\n", s->refcount - 1);
81403 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
81404 }
81405 SLAB_ATTR_RO(aliases);
81406
81407 @@ -4503,7 +4546,7 @@ static void kmem_cache_release(struct kobject *kobj)
81408 kfree(s);
81409 }
81410
81411 -static struct sysfs_ops slab_sysfs_ops = {
81412 +static const struct sysfs_ops slab_sysfs_ops = {
81413 .show = slab_attr_show,
81414 .store = slab_attr_store,
81415 };
81416 @@ -4522,7 +4565,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
81417 return 0;
81418 }
81419
81420 -static struct kset_uevent_ops slab_uevent_ops = {
81421 +static const struct kset_uevent_ops slab_uevent_ops = {
81422 .filter = uevent_filter,
81423 };
81424
81425 @@ -4564,6 +4607,7 @@ static char *create_unique_id(struct kmem_cache *s)
81426 return name;
81427 }
81428
81429 +#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
81430 static int sysfs_slab_add(struct kmem_cache *s)
81431 {
81432 int err;
81433 @@ -4619,6 +4663,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
81434 kobject_del(&s->kobj);
81435 kobject_put(&s->kobj);
81436 }
81437 +#endif
81438
81439 /*
81440 * Need to buffer aliases during bootup until sysfs becomes
81441 @@ -4632,6 +4677,7 @@ struct saved_alias {
81442
81443 static struct saved_alias *alias_list;
81444
81445 +#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
81446 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
81447 {
81448 struct saved_alias *al;
81449 @@ -4654,6 +4700,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
81450 alias_list = al;
81451 return 0;
81452 }
81453 +#endif
81454
81455 static int __init slab_sysfs_init(void)
81456 {
81457 @@ -4785,7 +4832,13 @@ static const struct file_operations proc_slabinfo_operations = {
81458
81459 static int __init slab_proc_init(void)
81460 {
81461 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
81462 + mode_t gr_mode = S_IRUGO;
81463 +
81464 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
81465 + gr_mode = S_IRUSR;
81466 +#endif
81467 +
81468 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
81469 return 0;
81470 }
81471 module_init(slab_proc_init);
81472 diff --git a/mm/swap.c b/mm/swap.c
81473 index 308e57d..5de19c0 100644
81474 --- a/mm/swap.c
81475 +++ b/mm/swap.c
81476 @@ -30,6 +30,7 @@
81477 #include <linux/notifier.h>
81478 #include <linux/backing-dev.h>
81479 #include <linux/memcontrol.h>
81480 +#include <linux/hugetlb.h>
81481
81482 #include "internal.h"
81483
81484 @@ -65,6 +66,8 @@ static void put_compound_page(struct page *page)
81485 compound_page_dtor *dtor;
81486
81487 dtor = get_compound_page_dtor(page);
81488 + if (!PageHuge(page))
81489 + BUG_ON(dtor != free_compound_page);
81490 (*dtor)(page);
81491 }
81492 }
81493 diff --git a/mm/util.c b/mm/util.c
81494 index e48b493..24a601d 100644
81495 --- a/mm/util.c
81496 +++ b/mm/util.c
81497 @@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
81498 void arch_pick_mmap_layout(struct mm_struct *mm)
81499 {
81500 mm->mmap_base = TASK_UNMAPPED_BASE;
81501 +
81502 +#ifdef CONFIG_PAX_RANDMMAP
81503 + if (mm->pax_flags & MF_PAX_RANDMMAP)
81504 + mm->mmap_base += mm->delta_mmap;
81505 +#endif
81506 +
81507 mm->get_unmapped_area = arch_get_unmapped_area;
81508 mm->unmap_area = arch_unmap_area;
81509 }
81510 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
81511 index f34ffd0..e60c44f 100644
81512 --- a/mm/vmalloc.c
81513 +++ b/mm/vmalloc.c
81514 @@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
81515
81516 pte = pte_offset_kernel(pmd, addr);
81517 do {
81518 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
81519 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
81520 +
81521 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
81522 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
81523 + BUG_ON(!pte_exec(*pte));
81524 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
81525 + continue;
81526 + }
81527 +#endif
81528 +
81529 + {
81530 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
81531 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
81532 + }
81533 } while (pte++, addr += PAGE_SIZE, addr != end);
81534 }
81535
81536 @@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
81537 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
81538 {
81539 pte_t *pte;
81540 + int ret = -ENOMEM;
81541
81542 /*
81543 * nr is a running index into the array which helps higher level
81544 @@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
81545 pte = pte_alloc_kernel(pmd, addr);
81546 if (!pte)
81547 return -ENOMEM;
81548 +
81549 + pax_open_kernel();
81550 do {
81551 struct page *page = pages[*nr];
81552
81553 - if (WARN_ON(!pte_none(*pte)))
81554 - return -EBUSY;
81555 - if (WARN_ON(!page))
81556 - return -ENOMEM;
81557 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
81558 + if (!(pgprot_val(prot) & _PAGE_NX))
81559 + BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
81560 + else
81561 +#endif
81562 +
81563 + if (WARN_ON(!pte_none(*pte))) {
81564 + ret = -EBUSY;
81565 + goto out;
81566 + }
81567 + if (WARN_ON(!page)) {
81568 + ret = -ENOMEM;
81569 + goto out;
81570 + }
81571 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
81572 (*nr)++;
81573 } while (pte++, addr += PAGE_SIZE, addr != end);
81574 - return 0;
81575 + ret = 0;
81576 +out:
81577 + pax_close_kernel();
81578 + return ret;
81579 }
81580
81581 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
81582 @@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void *x)
81583 * and fall back on vmalloc() if that fails. Others
81584 * just put it in the vmalloc space.
81585 */
81586 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
81587 +#ifdef CONFIG_MODULES
81588 +#ifdef MODULES_VADDR
81589 unsigned long addr = (unsigned long)x;
81590 if (addr >= MODULES_VADDR && addr < MODULES_END)
81591 return 1;
81592 #endif
81593 +
81594 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
81595 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
81596 + return 1;
81597 +#endif
81598 +
81599 +#endif
81600 +
81601 return is_vmalloc_addr(x);
81602 }
81603
81604 @@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
81605
81606 if (!pgd_none(*pgd)) {
81607 pud_t *pud = pud_offset(pgd, addr);
81608 +#ifdef CONFIG_X86
81609 + if (!pud_large(*pud))
81610 +#endif
81611 if (!pud_none(*pud)) {
81612 pmd_t *pmd = pmd_offset(pud, addr);
81613 +#ifdef CONFIG_X86
81614 + if (!pmd_large(*pmd))
81615 +#endif
81616 if (!pmd_none(*pmd)) {
81617 pte_t *ptep, pte;
81618
81619 @@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vmap_area *va)
81620 struct rb_node *tmp;
81621
81622 while (*p) {
81623 - struct vmap_area *tmp;
81624 + struct vmap_area *varea;
81625
81626 parent = *p;
81627 - tmp = rb_entry(parent, struct vmap_area, rb_node);
81628 - if (va->va_start < tmp->va_end)
81629 + varea = rb_entry(parent, struct vmap_area, rb_node);
81630 + if (va->va_start < varea->va_end)
81631 p = &(*p)->rb_left;
81632 - else if (va->va_end > tmp->va_start)
81633 + else if (va->va_end > varea->va_start)
81634 p = &(*p)->rb_right;
81635 else
81636 BUG();
81637 @@ -1245,6 +1287,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
81638 struct vm_struct *area;
81639
81640 BUG_ON(in_interrupt());
81641 +
81642 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
81643 + if (flags & VM_KERNEXEC) {
81644 + if (start != VMALLOC_START || end != VMALLOC_END)
81645 + return NULL;
81646 + start = (unsigned long)MODULES_EXEC_VADDR;
81647 + end = (unsigned long)MODULES_EXEC_END;
81648 + }
81649 +#endif
81650 +
81651 if (flags & VM_IOREMAP) {
81652 int bit = fls(size);
81653
81654 @@ -1484,6 +1536,11 @@ void *vmap(struct page **pages, unsigned int count,
81655 if (count > totalram_pages)
81656 return NULL;
81657
81658 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
81659 + if (!(pgprot_val(prot) & _PAGE_NX))
81660 + flags |= VM_KERNEXEC;
81661 +#endif
81662 +
81663 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
81664 __builtin_return_address(0));
81665 if (!area)
81666 @@ -1594,6 +1651,14 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
81667 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
81668 return NULL;
81669
81670 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
81671 + if (!(pgprot_val(prot) & _PAGE_NX))
81672 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
81673 + VMALLOC_START, VMALLOC_END, node,
81674 + gfp_mask, caller);
81675 + else
81676 +#endif
81677 +
81678 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
81679 VMALLOC_START, VMALLOC_END, node,
81680 gfp_mask, caller);
81681 @@ -1619,6 +1684,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
81682 return addr;
81683 }
81684
81685 +#undef __vmalloc
81686 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
81687 {
81688 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
81689 @@ -1635,6 +1701,7 @@ EXPORT_SYMBOL(__vmalloc);
81690 * For tight control over page level allocator and protection flags
81691 * use __vmalloc() instead.
81692 */
81693 +#undef vmalloc
81694 void *vmalloc(unsigned long size)
81695 {
81696 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
81697 @@ -1649,6 +1716,7 @@ EXPORT_SYMBOL(vmalloc);
81698 * The resulting memory area is zeroed so it can be mapped to userspace
81699 * without leaking data.
81700 */
81701 +#undef vmalloc_user
81702 void *vmalloc_user(unsigned long size)
81703 {
81704 struct vm_struct *area;
81705 @@ -1676,6 +1744,7 @@ EXPORT_SYMBOL(vmalloc_user);
81706 * For tight control over page level allocator and protection flags
81707 * use __vmalloc() instead.
81708 */
81709 +#undef vmalloc_node
81710 void *vmalloc_node(unsigned long size, int node)
81711 {
81712 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
81713 @@ -1698,10 +1767,10 @@ EXPORT_SYMBOL(vmalloc_node);
81714 * For tight control over page level allocator and protection flags
81715 * use __vmalloc() instead.
81716 */
81717 -
81718 +#undef vmalloc_exec
81719 void *vmalloc_exec(unsigned long size)
81720 {
81721 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
81722 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
81723 -1, __builtin_return_address(0));
81724 }
81725
81726 @@ -1720,6 +1789,7 @@ void *vmalloc_exec(unsigned long size)
81727 * Allocate enough 32bit PA addressable pages to cover @size from the
81728 * page level allocator and map them into contiguous kernel virtual space.
81729 */
81730 +#undef vmalloc_32
81731 void *vmalloc_32(unsigned long size)
81732 {
81733 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
81734 @@ -1734,6 +1804,7 @@ EXPORT_SYMBOL(vmalloc_32);
81735 * The resulting memory area is 32bit addressable and zeroed so it can be
81736 * mapped to userspace without leaking data.
81737 */
81738 +#undef vmalloc_32_user
81739 void *vmalloc_32_user(unsigned long size)
81740 {
81741 struct vm_struct *area;
81742 @@ -1998,6 +2069,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
81743 unsigned long uaddr = vma->vm_start;
81744 unsigned long usize = vma->vm_end - vma->vm_start;
81745
81746 + BUG_ON(vma->vm_mirror);
81747 +
81748 if ((PAGE_SIZE-1) & (unsigned long)addr)
81749 return -EINVAL;
81750
81751 diff --git a/mm/vmstat.c b/mm/vmstat.c
81752 index 42d76c6..5643dc4 100644
81753 --- a/mm/vmstat.c
81754 +++ b/mm/vmstat.c
81755 @@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
81756 *
81757 * vm_stat contains the global counters
81758 */
81759 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
81760 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
81761 EXPORT_SYMBOL(vm_stat);
81762
81763 #ifdef CONFIG_SMP
81764 @@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
81765 v = p->vm_stat_diff[i];
81766 p->vm_stat_diff[i] = 0;
81767 local_irq_restore(flags);
81768 - atomic_long_add(v, &zone->vm_stat[i]);
81769 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
81770 global_diff[i] += v;
81771 #ifdef CONFIG_NUMA
81772 /* 3 seconds idle till flush */
81773 @@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
81774
81775 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
81776 if (global_diff[i])
81777 - atomic_long_add(global_diff[i], &vm_stat[i]);
81778 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
81779 }
81780
81781 #endif
81782 @@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
81783 start_cpu_timer(cpu);
81784 #endif
81785 #ifdef CONFIG_PROC_FS
81786 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
81787 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
81788 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
81789 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
81790 + {
81791 + mode_t gr_mode = S_IRUGO;
81792 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
81793 + gr_mode = S_IRUSR;
81794 +#endif
81795 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
81796 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
81797 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
81798 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
81799 +#else
81800 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
81801 +#endif
81802 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
81803 + }
81804 #endif
81805 return 0;
81806 }
81807 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
81808 index a29c5ab..6143f20 100644
81809 --- a/net/8021q/vlan.c
81810 +++ b/net/8021q/vlan.c
81811 @@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
81812 err = -EPERM;
81813 if (!capable(CAP_NET_ADMIN))
81814 break;
81815 - if ((args.u.name_type >= 0) &&
81816 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
81817 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
81818 struct vlan_net *vn;
81819
81820 vn = net_generic(net, vlan_net_id);
81821 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
81822 index a2d2984..f9eb711 100644
81823 --- a/net/9p/trans_fd.c
81824 +++ b/net/9p/trans_fd.c
81825 @@ -419,7 +419,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
81826 oldfs = get_fs();
81827 set_fs(get_ds());
81828 /* The cast to a user pointer is valid due to the set_fs() */
81829 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
81830 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
81831 set_fs(oldfs);
81832
81833 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
81834 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
81835 index 02cc7e7..4514f1b 100644
81836 --- a/net/atm/atm_misc.c
81837 +++ b/net/atm/atm_misc.c
81838 @@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int truesize)
81839 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
81840 return 1;
81841 atm_return(vcc,truesize);
81842 - atomic_inc(&vcc->stats->rx_drop);
81843 + atomic_inc_unchecked(&vcc->stats->rx_drop);
81844 return 0;
81845 }
81846
81847 @@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
81848 }
81849 }
81850 atm_return(vcc,guess);
81851 - atomic_inc(&vcc->stats->rx_drop);
81852 + atomic_inc_unchecked(&vcc->stats->rx_drop);
81853 return NULL;
81854 }
81855
81856 @@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafprm *tp)
81857
81858 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
81859 {
81860 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
81861 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
81862 __SONET_ITEMS
81863 #undef __HANDLE_ITEM
81864 }
81865 @@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
81866
81867 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
81868 {
81869 -#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
81870 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
81871 __SONET_ITEMS
81872 #undef __HANDLE_ITEM
81873 }
81874 diff --git a/net/atm/lec.h b/net/atm/lec.h
81875 index 9d14d19..5c145f3 100644
81876 --- a/net/atm/lec.h
81877 +++ b/net/atm/lec.h
81878 @@ -48,7 +48,7 @@ struct lane2_ops {
81879 const u8 *tlvs, u32 sizeoftlvs);
81880 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
81881 const u8 *tlvs, u32 sizeoftlvs);
81882 -};
81883 +} __no_const;
81884
81885 /*
81886 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
81887 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
81888 index 0919a88..a23d54e 100644
81889 --- a/net/atm/mpc.h
81890 +++ b/net/atm/mpc.h
81891 @@ -33,7 +33,7 @@ struct mpoa_client {
81892 struct mpc_parameters parameters; /* parameters for this client */
81893
81894 const struct net_device_ops *old_ops;
81895 - struct net_device_ops new_ops;
81896 + net_device_ops_no_const new_ops;
81897 };
81898
81899
81900 diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
81901 index 4504a4b..1733f1e 100644
81902 --- a/net/atm/mpoa_caches.c
81903 +++ b/net/atm/mpoa_caches.c
81904 @@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_client *client)
81905 struct timeval now;
81906 struct k_message msg;
81907
81908 + pax_track_stack();
81909 +
81910 do_gettimeofday(&now);
81911
81912 write_lock_irq(&client->egress_lock);
81913 diff --git a/net/atm/proc.c b/net/atm/proc.c
81914 index ab8419a..aa91497 100644
81915 --- a/net/atm/proc.c
81916 +++ b/net/atm/proc.c
81917 @@ -43,9 +43,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
81918 const struct k_atm_aal_stats *stats)
81919 {
81920 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
81921 - atomic_read(&stats->tx),atomic_read(&stats->tx_err),
81922 - atomic_read(&stats->rx),atomic_read(&stats->rx_err),
81923 - atomic_read(&stats->rx_drop));
81924 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
81925 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
81926 + atomic_read_unchecked(&stats->rx_drop));
81927 }
81928
81929 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
81930 @@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
81931 {
81932 struct sock *sk = sk_atm(vcc);
81933
81934 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81935 + seq_printf(seq, "%p ", NULL);
81936 +#else
81937 seq_printf(seq, "%p ", vcc);
81938 +#endif
81939 +
81940 if (!vcc->dev)
81941 seq_printf(seq, "Unassigned ");
81942 else
81943 @@ -214,7 +219,11 @@ static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
81944 {
81945 if (!vcc->dev)
81946 seq_printf(seq, sizeof(void *) == 4 ?
81947 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81948 + "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
81949 +#else
81950 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
81951 +#endif
81952 else
81953 seq_printf(seq, "%3d %3d %5d ",
81954 vcc->dev->number, vcc->vpi, vcc->vci);
81955 diff --git a/net/atm/resources.c b/net/atm/resources.c
81956 index 56b7322..c48b84e 100644
81957 --- a/net/atm/resources.c
81958 +++ b/net/atm/resources.c
81959 @@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *dev)
81960 static void copy_aal_stats(struct k_atm_aal_stats *from,
81961 struct atm_aal_stats *to)
81962 {
81963 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
81964 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
81965 __AAL_STAT_ITEMS
81966 #undef __HANDLE_ITEM
81967 }
81968 @@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
81969 static void subtract_aal_stats(struct k_atm_aal_stats *from,
81970 struct atm_aal_stats *to)
81971 {
81972 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
81973 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
81974 __AAL_STAT_ITEMS
81975 #undef __HANDLE_ITEM
81976 }
81977 diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
81978 index 8567d47..bba2292 100644
81979 --- a/net/bridge/br_private.h
81980 +++ b/net/bridge/br_private.h
81981 @@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
81982
81983 #ifdef CONFIG_SYSFS
81984 /* br_sysfs_if.c */
81985 -extern struct sysfs_ops brport_sysfs_ops;
81986 +extern const struct sysfs_ops brport_sysfs_ops;
81987 extern int br_sysfs_addif(struct net_bridge_port *p);
81988
81989 /* br_sysfs_br.c */
81990 diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
81991 index 9a52ac5..c97538e 100644
81992 --- a/net/bridge/br_stp_if.c
81993 +++ b/net/bridge/br_stp_if.c
81994 @@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridge *br)
81995 char *envp[] = { NULL };
81996
81997 if (br->stp_enabled == BR_USER_STP) {
81998 - r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
81999 + r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
82000 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
82001 br->dev->name, r);
82002
82003 diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
82004 index 820643a..ce77fb3 100644
82005 --- a/net/bridge/br_sysfs_if.c
82006 +++ b/net/bridge/br_sysfs_if.c
82007 @@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobject * kobj,
82008 return ret;
82009 }
82010
82011 -struct sysfs_ops brport_sysfs_ops = {
82012 +const struct sysfs_ops brport_sysfs_ops = {
82013 .show = brport_show,
82014 .store = brport_store,
82015 };
82016 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
82017 index d73d47f..72df42a 100644
82018 --- a/net/bridge/netfilter/ebtables.c
82019 +++ b/net/bridge/netfilter/ebtables.c
82020 @@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
82021 unsigned int entries_size, nentries;
82022 char *entries;
82023
82024 + pax_track_stack();
82025 +
82026 if (cmd == EBT_SO_GET_ENTRIES) {
82027 entries_size = t->private->entries_size;
82028 nentries = t->private->nentries;
82029 diff --git a/net/can/bcm.c b/net/can/bcm.c
82030 index 2ffd2e0..72a7486 100644
82031 --- a/net/can/bcm.c
82032 +++ b/net/can/bcm.c
82033 @@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file *m, void *v)
82034 struct bcm_sock *bo = bcm_sk(sk);
82035 struct bcm_op *op;
82036
82037 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82038 + seq_printf(m, ">>> socket %p", NULL);
82039 + seq_printf(m, " / sk %p", NULL);
82040 + seq_printf(m, " / bo %p", NULL);
82041 +#else
82042 seq_printf(m, ">>> socket %p", sk->sk_socket);
82043 seq_printf(m, " / sk %p", sk);
82044 seq_printf(m, " / bo %p", bo);
82045 +#endif
82046 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
82047 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
82048 seq_printf(m, " <<<\n");
82049 diff --git a/net/compat.c b/net/compat.c
82050 index 9559afc..ccd74e1 100644
82051 --- a/net/compat.c
82052 +++ b/net/compat.c
82053 @@ -69,9 +69,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
82054 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
82055 __get_user(kmsg->msg_flags, &umsg->msg_flags))
82056 return -EFAULT;
82057 - kmsg->msg_name = compat_ptr(tmp1);
82058 - kmsg->msg_iov = compat_ptr(tmp2);
82059 - kmsg->msg_control = compat_ptr(tmp3);
82060 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
82061 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
82062 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
82063 return 0;
82064 }
82065
82066 @@ -94,7 +94,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
82067 kern_msg->msg_name = NULL;
82068
82069 tot_len = iov_from_user_compat_to_kern(kern_iov,
82070 - (struct compat_iovec __user *)kern_msg->msg_iov,
82071 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
82072 kern_msg->msg_iovlen);
82073 if (tot_len >= 0)
82074 kern_msg->msg_iov = kern_iov;
82075 @@ -114,20 +114,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
82076
82077 #define CMSG_COMPAT_FIRSTHDR(msg) \
82078 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
82079 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
82080 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
82081 (struct compat_cmsghdr __user *)NULL)
82082
82083 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
82084 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
82085 (ucmlen) <= (unsigned long) \
82086 ((mhdr)->msg_controllen - \
82087 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
82088 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
82089
82090 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
82091 struct compat_cmsghdr __user *cmsg, int cmsg_len)
82092 {
82093 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
82094 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
82095 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
82096 msg->msg_controllen)
82097 return NULL;
82098 return (struct compat_cmsghdr __user *)ptr;
82099 @@ -219,7 +219,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
82100 {
82101 struct compat_timeval ctv;
82102 struct compat_timespec cts[3];
82103 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
82104 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
82105 struct compat_cmsghdr cmhdr;
82106 int cmlen;
82107
82108 @@ -271,7 +271,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
82109
82110 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
82111 {
82112 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
82113 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
82114 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
82115 int fdnum = scm->fp->count;
82116 struct file **fp = scm->fp->fp;
82117 @@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
82118 len = sizeof(ktime);
82119 old_fs = get_fs();
82120 set_fs(KERNEL_DS);
82121 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
82122 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
82123 set_fs(old_fs);
82124
82125 if (!err) {
82126 @@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
82127 case MCAST_JOIN_GROUP:
82128 case MCAST_LEAVE_GROUP:
82129 {
82130 - struct compat_group_req __user *gr32 = (void *)optval;
82131 + struct compat_group_req __user *gr32 = (void __user *)optval;
82132 struct group_req __user *kgr =
82133 compat_alloc_user_space(sizeof(struct group_req));
82134 u32 interface;
82135 @@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
82136 case MCAST_BLOCK_SOURCE:
82137 case MCAST_UNBLOCK_SOURCE:
82138 {
82139 - struct compat_group_source_req __user *gsr32 = (void *)optval;
82140 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
82141 struct group_source_req __user *kgsr = compat_alloc_user_space(
82142 sizeof(struct group_source_req));
82143 u32 interface;
82144 @@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
82145 }
82146 case MCAST_MSFILTER:
82147 {
82148 - struct compat_group_filter __user *gf32 = (void *)optval;
82149 + struct compat_group_filter __user *gf32 = (void __user *)optval;
82150 struct group_filter __user *kgf;
82151 u32 interface, fmode, numsrc;
82152
82153 diff --git a/net/core/dev.c b/net/core/dev.c
82154 index 84a0705..575db4c 100644
82155 --- a/net/core/dev.c
82156 +++ b/net/core/dev.c
82157 @@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const char *name)
82158 if (no_module && capable(CAP_NET_ADMIN))
82159 no_module = request_module("netdev-%s", name);
82160 if (no_module && capable(CAP_SYS_MODULE)) {
82161 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
82162 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
82163 +#else
82164 if (!request_module("%s", name))
82165 pr_err("Loading kernel module for a network device "
82166 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
82167 "instead\n", name);
82168 +#endif
82169 }
82170 }
82171 EXPORT_SYMBOL(dev_load);
82172 @@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
82173
82174 struct dev_gso_cb {
82175 void (*destructor)(struct sk_buff *skb);
82176 -};
82177 +} __no_const;
82178
82179 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
82180
82181 @@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
82182 }
82183 EXPORT_SYMBOL(netif_rx_ni);
82184
82185 -static void net_tx_action(struct softirq_action *h)
82186 +static void net_tx_action(void)
82187 {
82188 struct softnet_data *sd = &__get_cpu_var(softnet_data);
82189
82190 @@ -2827,7 +2831,7 @@ void netif_napi_del(struct napi_struct *napi)
82191 EXPORT_SYMBOL(netif_napi_del);
82192
82193
82194 -static void net_rx_action(struct softirq_action *h)
82195 +static void net_rx_action(void)
82196 {
82197 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
82198 unsigned long time_limit = jiffies + 2;
82199 diff --git a/net/core/flow.c b/net/core/flow.c
82200 index 9601587..8c4824e 100644
82201 --- a/net/core/flow.c
82202 +++ b/net/core/flow.c
82203 @@ -35,11 +35,11 @@ struct flow_cache_entry {
82204 atomic_t *object_ref;
82205 };
82206
82207 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
82208 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
82209
82210 static u32 flow_hash_shift;
82211 #define flow_hash_size (1 << flow_hash_shift)
82212 -static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
82213 +static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
82214
82215 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
82216
82217 @@ -52,7 +52,7 @@ struct flow_percpu_info {
82218 u32 hash_rnd;
82219 int count;
82220 };
82221 -static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
82222 +static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
82223
82224 #define flow_hash_rnd_recalc(cpu) \
82225 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
82226 @@ -69,7 +69,7 @@ struct flow_flush_info {
82227 atomic_t cpuleft;
82228 struct completion completion;
82229 };
82230 -static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
82231 +static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
82232
82233 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
82234
82235 @@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
82236 if (fle->family == family &&
82237 fle->dir == dir &&
82238 flow_key_compare(key, &fle->key) == 0) {
82239 - if (fle->genid == atomic_read(&flow_cache_genid)) {
82240 + if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
82241 void *ret = fle->object;
82242
82243 if (ret)
82244 @@ -228,7 +228,7 @@ nocache:
82245 err = resolver(net, key, family, dir, &obj, &obj_ref);
82246
82247 if (fle && !err) {
82248 - fle->genid = atomic_read(&flow_cache_genid);
82249 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
82250
82251 if (fle->object)
82252 atomic_dec(fle->object_ref);
82253 @@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
82254
82255 fle = flow_table(cpu)[i];
82256 for (; fle; fle = fle->next) {
82257 - unsigned genid = atomic_read(&flow_cache_genid);
82258 + unsigned genid = atomic_read_unchecked(&flow_cache_genid);
82259
82260 if (!fle->object || fle->genid == genid)
82261 continue;
82262 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
82263 index d4fd895..ac9b1e6 100644
82264 --- a/net/core/rtnetlink.c
82265 +++ b/net/core/rtnetlink.c
82266 @@ -57,7 +57,7 @@ struct rtnl_link
82267 {
82268 rtnl_doit_func doit;
82269 rtnl_dumpit_func dumpit;
82270 -};
82271 +} __no_const;
82272
82273 static DEFINE_MUTEX(rtnl_mutex);
82274
82275 diff --git a/net/core/scm.c b/net/core/scm.c
82276 index d98eafc..1a190a9 100644
82277 --- a/net/core/scm.c
82278 +++ b/net/core/scm.c
82279 @@ -191,7 +191,7 @@ error:
82280 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
82281 {
82282 struct cmsghdr __user *cm
82283 - = (__force struct cmsghdr __user *)msg->msg_control;
82284 + = (struct cmsghdr __force_user *)msg->msg_control;
82285 struct cmsghdr cmhdr;
82286 int cmlen = CMSG_LEN(len);
82287 int err;
82288 @@ -214,7 +214,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
82289 err = -EFAULT;
82290 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
82291 goto out;
82292 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
82293 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
82294 goto out;
82295 cmlen = CMSG_SPACE(len);
82296 if (msg->msg_controllen < cmlen)
82297 @@ -229,7 +229,7 @@ out:
82298 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
82299 {
82300 struct cmsghdr __user *cm
82301 - = (__force struct cmsghdr __user*)msg->msg_control;
82302 + = (struct cmsghdr __force_user *)msg->msg_control;
82303
82304 int fdmax = 0;
82305 int fdnum = scm->fp->count;
82306 @@ -249,7 +249,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
82307 if (fdnum < fdmax)
82308 fdmax = fdnum;
82309
82310 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
82311 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
82312 i++, cmfptr++)
82313 {
82314 int new_fd;
82315 diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
82316 index 45329d7..626aaa6 100644
82317 --- a/net/core/secure_seq.c
82318 +++ b/net/core/secure_seq.c
82319 @@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
82320 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
82321
82322 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
82323 - __be16 dport)
82324 + __be16 dport)
82325 {
82326 u32 secret[MD5_MESSAGE_BYTES / 4];
82327 u32 hash[MD5_DIGEST_WORDS];
82328 @@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
82329 secret[i] = net_secret[i];
82330
82331 md5_transform(hash, secret);
82332 -
82333 return hash[0];
82334 }
82335 #endif
82336 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
82337 index 025f924..70a71c4 100644
82338 --- a/net/core/skbuff.c
82339 +++ b/net/core/skbuff.c
82340 @@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
82341 struct sk_buff *frag_iter;
82342 struct sock *sk = skb->sk;
82343
82344 + pax_track_stack();
82345 +
82346 /*
82347 * __skb_splice_bits() only fails if the output has no room left,
82348 * so no point in going over the frag_list for the error case.
82349 diff --git a/net/core/sock.c b/net/core/sock.c
82350 index 6605e75..3acebda 100644
82351 --- a/net/core/sock.c
82352 +++ b/net/core/sock.c
82353 @@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
82354 break;
82355
82356 case SO_PEERCRED:
82357 + {
82358 + struct ucred peercred;
82359 if (len > sizeof(sk->sk_peercred))
82360 len = sizeof(sk->sk_peercred);
82361 - if (copy_to_user(optval, &sk->sk_peercred, len))
82362 + peercred = sk->sk_peercred;
82363 + if (copy_to_user(optval, &peercred, len))
82364 return -EFAULT;
82365 goto lenout;
82366 + }
82367
82368 case SO_PEERNAME:
82369 {
82370 @@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
82371 */
82372 smp_wmb();
82373 atomic_set(&sk->sk_refcnt, 1);
82374 - atomic_set(&sk->sk_drops, 0);
82375 + atomic_set_unchecked(&sk->sk_drops, 0);
82376 }
82377 EXPORT_SYMBOL(sock_init_data);
82378
82379 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
82380 index 2036568..c55883d 100644
82381 --- a/net/decnet/sysctl_net_decnet.c
82382 +++ b/net/decnet/sysctl_net_decnet.c
82383 @@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
82384
82385 if (len > *lenp) len = *lenp;
82386
82387 - if (copy_to_user(buffer, addr, len))
82388 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
82389 return -EFAULT;
82390
82391 *lenp = len;
82392 @@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
82393
82394 if (len > *lenp) len = *lenp;
82395
82396 - if (copy_to_user(buffer, devname, len))
82397 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
82398 return -EFAULT;
82399
82400 *lenp = len;
82401 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
82402 index 39a2d29..f39c0fe 100644
82403 --- a/net/econet/Kconfig
82404 +++ b/net/econet/Kconfig
82405 @@ -4,7 +4,7 @@
82406
82407 config ECONET
82408 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
82409 - depends on EXPERIMENTAL && INET
82410 + depends on EXPERIMENTAL && INET && BROKEN
82411 ---help---
82412 Econet is a fairly old and slow networking protocol mainly used by
82413 Acorn computers to access file and print servers. It uses native
82414 diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
82415 index a413b1b..380849c 100644
82416 --- a/net/ieee802154/dgram.c
82417 +++ b/net/ieee802154/dgram.c
82418 @@ -318,7 +318,7 @@ out:
82419 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
82420 {
82421 if (sock_queue_rcv_skb(sk, skb) < 0) {
82422 - atomic_inc(&sk->sk_drops);
82423 + atomic_inc_unchecked(&sk->sk_drops);
82424 kfree_skb(skb);
82425 return NET_RX_DROP;
82426 }
82427 diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
82428 index 30e74ee..bfc6ee0 100644
82429 --- a/net/ieee802154/raw.c
82430 +++ b/net/ieee802154/raw.c
82431 @@ -206,7 +206,7 @@ out:
82432 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
82433 {
82434 if (sock_queue_rcv_skb(sk, skb) < 0) {
82435 - atomic_inc(&sk->sk_drops);
82436 + atomic_inc_unchecked(&sk->sk_drops);
82437 kfree_skb(skb);
82438 return NET_RX_DROP;
82439 }
82440 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
82441 index dba56d2..acee5d6 100644
82442 --- a/net/ipv4/inet_diag.c
82443 +++ b/net/ipv4/inet_diag.c
82444 @@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct sock *sk,
82445 r->idiag_retrans = 0;
82446
82447 r->id.idiag_if = sk->sk_bound_dev_if;
82448 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82449 + r->id.idiag_cookie[0] = 0;
82450 + r->id.idiag_cookie[1] = 0;
82451 +#else
82452 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
82453 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
82454 +#endif
82455
82456 r->id.idiag_sport = inet->sport;
82457 r->id.idiag_dport = inet->dport;
82458 @@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
82459 r->idiag_family = tw->tw_family;
82460 r->idiag_retrans = 0;
82461 r->id.idiag_if = tw->tw_bound_dev_if;
82462 +
82463 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82464 + r->id.idiag_cookie[0] = 0;
82465 + r->id.idiag_cookie[1] = 0;
82466 +#else
82467 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
82468 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
82469 +#endif
82470 +
82471 r->id.idiag_sport = tw->tw_sport;
82472 r->id.idiag_dport = tw->tw_dport;
82473 r->id.idiag_src[0] = tw->tw_rcv_saddr;
82474 @@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
82475 if (sk == NULL)
82476 goto unlock;
82477
82478 +#ifndef CONFIG_GRKERNSEC_HIDESYM
82479 err = -ESTALE;
82480 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
82481 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
82482 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
82483 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
82484 goto out;
82485 +#endif
82486
82487 err = -ENOMEM;
82488 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
82489 @@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
82490 r->idiag_retrans = req->retrans;
82491
82492 r->id.idiag_if = sk->sk_bound_dev_if;
82493 +
82494 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82495 + r->id.idiag_cookie[0] = 0;
82496 + r->id.idiag_cookie[1] = 0;
82497 +#else
82498 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
82499 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
82500 +#endif
82501
82502 tmo = req->expires - jiffies;
82503 if (tmo < 0)
82504 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
82505 index d717267..56de7e7 100644
82506 --- a/net/ipv4/inet_hashtables.c
82507 +++ b/net/ipv4/inet_hashtables.c
82508 @@ -18,12 +18,15 @@
82509 #include <linux/sched.h>
82510 #include <linux/slab.h>
82511 #include <linux/wait.h>
82512 +#include <linux/security.h>
82513
82514 #include <net/inet_connection_sock.h>
82515 #include <net/inet_hashtables.h>
82516 #include <net/secure_seq.h>
82517 #include <net/ip.h>
82518
82519 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
82520 +
82521 /*
82522 * Allocate and initialize a new local port bind bucket.
82523 * The bindhash mutex for snum's hash chain must be held here.
82524 @@ -491,6 +494,8 @@ ok:
82525 }
82526 spin_unlock(&head->lock);
82527
82528 + gr_update_task_in_ip_table(current, inet_sk(sk));
82529 +
82530 if (tw) {
82531 inet_twsk_deschedule(tw, death_row);
82532 inet_twsk_put(tw);
82533 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
82534 index 13b229f..6956484 100644
82535 --- a/net/ipv4/inetpeer.c
82536 +++ b/net/ipv4/inetpeer.c
82537 @@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
82538 struct inet_peer *p, *n;
82539 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
82540
82541 + pax_track_stack();
82542 +
82543 /* Look up for the address quickly. */
82544 read_lock_bh(&peer_pool_lock);
82545 p = lookup(daddr, NULL);
82546 @@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
82547 return NULL;
82548 n->v4daddr = daddr;
82549 atomic_set(&n->refcnt, 1);
82550 - atomic_set(&n->rid, 0);
82551 + atomic_set_unchecked(&n->rid, 0);
82552 n->ip_id_count = secure_ip_id(daddr);
82553 n->tcp_ts_stamp = 0;
82554
82555 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
82556 index d3fe10b..feeafc9 100644
82557 --- a/net/ipv4/ip_fragment.c
82558 +++ b/net/ipv4/ip_fragment.c
82559 @@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
82560 return 0;
82561
82562 start = qp->rid;
82563 - end = atomic_inc_return(&peer->rid);
82564 + end = atomic_inc_return_unchecked(&peer->rid);
82565 qp->rid = end;
82566
82567 rc = qp->q.fragments && (end - start) > max;
82568 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
82569 index e982b5c..f079d75 100644
82570 --- a/net/ipv4/ip_sockglue.c
82571 +++ b/net/ipv4/ip_sockglue.c
82572 @@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
82573 int val;
82574 int len;
82575
82576 + pax_track_stack();
82577 +
82578 if (level != SOL_IP)
82579 return -EOPNOTSUPP;
82580
82581 @@ -1173,7 +1175,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
82582 if (sk->sk_type != SOCK_STREAM)
82583 return -ENOPROTOOPT;
82584
82585 - msg.msg_control = optval;
82586 + msg.msg_control = (void __force_kernel *)optval;
82587 msg.msg_controllen = len;
82588 msg.msg_flags = 0;
82589
82590 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
82591 index f8d04c2..c1188f2 100644
82592 --- a/net/ipv4/ipconfig.c
82593 +++ b/net/ipv4/ipconfig.c
82594 @@ -295,7 +295,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
82595
82596 mm_segment_t oldfs = get_fs();
82597 set_fs(get_ds());
82598 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
82599 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
82600 set_fs(oldfs);
82601 return res;
82602 }
82603 @@ -306,7 +306,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
82604
82605 mm_segment_t oldfs = get_fs();
82606 set_fs(get_ds());
82607 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
82608 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
82609 set_fs(oldfs);
82610 return res;
82611 }
82612 @@ -317,7 +317,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
82613
82614 mm_segment_t oldfs = get_fs();
82615 set_fs(get_ds());
82616 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
82617 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
82618 set_fs(oldfs);
82619 return res;
82620 }
82621 diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
82622 index c8b0cc3..4da5ae2 100644
82623 --- a/net/ipv4/netfilter/arp_tables.c
82624 +++ b/net/ipv4/netfilter/arp_tables.c
82625 @@ -934,6 +934,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
82626 private = &tmp;
82627 }
82628 #endif
82629 + memset(&info, 0, sizeof(info));
82630 info.valid_hooks = t->valid_hooks;
82631 memcpy(info.hook_entry, private->hook_entry,
82632 sizeof(info.hook_entry));
82633 diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
82634 index c156db2..e772975 100644
82635 --- a/net/ipv4/netfilter/ip_queue.c
82636 +++ b/net/ipv4/netfilter/ip_queue.c
82637 @@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
82638
82639 if (v->data_len < sizeof(*user_iph))
82640 return 0;
82641 + if (v->data_len > 65535)
82642 + return -EMSGSIZE;
82643 +
82644 diff = v->data_len - e->skb->len;
82645 if (diff < 0) {
82646 if (pskb_trim(e->skb, v->data_len))
82647 @@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex)
82648 static inline void
82649 __ipq_rcv_skb(struct sk_buff *skb)
82650 {
82651 - int status, type, pid, flags, nlmsglen, skblen;
82652 + int status, type, pid, flags;
82653 + unsigned int nlmsglen, skblen;
82654 struct nlmsghdr *nlh;
82655
82656 skblen = skb->len;
82657 diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
82658 index 0606db1..02e7e4c 100644
82659 --- a/net/ipv4/netfilter/ip_tables.c
82660 +++ b/net/ipv4/netfilter/ip_tables.c
82661 @@ -1141,6 +1141,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
82662 private = &tmp;
82663 }
82664 #endif
82665 + memset(&info, 0, sizeof(info));
82666 info.valid_hooks = t->valid_hooks;
82667 memcpy(info.hook_entry, private->hook_entry,
82668 sizeof(info.hook_entry));
82669 diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
82670 index d9521f6..3c3eb25 100644
82671 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
82672 +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
82673 @@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
82674
82675 *len = 0;
82676
82677 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
82678 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
82679 if (*octets == NULL) {
82680 if (net_ratelimit())
82681 printk("OOM in bsalg (%d)\n", __LINE__);
82682 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
82683 index ab996f9..3da5f96 100644
82684 --- a/net/ipv4/raw.c
82685 +++ b/net/ipv4/raw.c
82686 @@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
82687 /* Charge it to the socket. */
82688
82689 if (sock_queue_rcv_skb(sk, skb) < 0) {
82690 - atomic_inc(&sk->sk_drops);
82691 + atomic_inc_unchecked(&sk->sk_drops);
82692 kfree_skb(skb);
82693 return NET_RX_DROP;
82694 }
82695 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
82696 int raw_rcv(struct sock *sk, struct sk_buff *skb)
82697 {
82698 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
82699 - atomic_inc(&sk->sk_drops);
82700 + atomic_inc_unchecked(&sk->sk_drops);
82701 kfree_skb(skb);
82702 return NET_RX_DROP;
82703 }
82704 @@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
82705
82706 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
82707 {
82708 + struct icmp_filter filter;
82709 +
82710 + if (optlen < 0)
82711 + return -EINVAL;
82712 if (optlen > sizeof(struct icmp_filter))
82713 optlen = sizeof(struct icmp_filter);
82714 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
82715 + if (copy_from_user(&filter, optval, optlen))
82716 return -EFAULT;
82717 + raw_sk(sk)->filter = filter;
82718 +
82719 return 0;
82720 }
82721
82722 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
82723 {
82724 int len, ret = -EFAULT;
82725 + struct icmp_filter filter;
82726
82727 if (get_user(len, optlen))
82728 goto out;
82729 @@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
82730 if (len > sizeof(struct icmp_filter))
82731 len = sizeof(struct icmp_filter);
82732 ret = -EFAULT;
82733 - if (put_user(len, optlen) ||
82734 - copy_to_user(optval, &raw_sk(sk)->filter, len))
82735 + filter = raw_sk(sk)->filter;
82736 + if (put_user(len, optlen) || len > sizeof filter ||
82737 + copy_to_user(optval, &filter, len))
82738 goto out;
82739 ret = 0;
82740 out: return ret;
82741 @@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
82742 sk_wmem_alloc_get(sp),
82743 sk_rmem_alloc_get(sp),
82744 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
82745 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
82746 + atomic_read(&sp->sk_refcnt),
82747 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82748 + NULL,
82749 +#else
82750 + sp,
82751 +#endif
82752 + atomic_read_unchecked(&sp->sk_drops));
82753 }
82754
82755 static int raw_seq_show(struct seq_file *seq, void *v)
82756 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
82757 index 58f141b..b759702 100644
82758 --- a/net/ipv4/route.c
82759 +++ b/net/ipv4/route.c
82760 @@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
82761
82762 static inline int rt_genid(struct net *net)
82763 {
82764 - return atomic_read(&net->ipv4.rt_genid);
82765 + return atomic_read_unchecked(&net->ipv4.rt_genid);
82766 }
82767
82768 #ifdef CONFIG_PROC_FS
82769 @@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct net *net)
82770 unsigned char shuffle;
82771
82772 get_random_bytes(&shuffle, sizeof(shuffle));
82773 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
82774 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
82775 }
82776
82777 /*
82778 @@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
82779
82780 static __net_init int rt_secret_timer_init(struct net *net)
82781 {
82782 - atomic_set(&net->ipv4.rt_genid,
82783 + atomic_set_unchecked(&net->ipv4.rt_genid,
82784 (int) ((num_physpages ^ (num_physpages>>8)) ^
82785 (jiffies ^ (jiffies >> 7))));
82786
82787 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
82788 index f095659..adc892a 100644
82789 --- a/net/ipv4/tcp.c
82790 +++ b/net/ipv4/tcp.c
82791 @@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
82792 int val;
82793 int err = 0;
82794
82795 + pax_track_stack();
82796 +
82797 /* This is a string value all the others are int's */
82798 if (optname == TCP_CONGESTION) {
82799 char name[TCP_CA_NAME_MAX];
82800 @@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
82801 struct tcp_sock *tp = tcp_sk(sk);
82802 int val, len;
82803
82804 + pax_track_stack();
82805 +
82806 if (get_user(len, optlen))
82807 return -EFAULT;
82808
82809 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
82810 index 6fc7961..33bad4a 100644
82811 --- a/net/ipv4/tcp_ipv4.c
82812 +++ b/net/ipv4/tcp_ipv4.c
82813 @@ -85,6 +85,9 @@
82814 int sysctl_tcp_tw_reuse __read_mostly;
82815 int sysctl_tcp_low_latency __read_mostly;
82816
82817 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82818 +extern int grsec_enable_blackhole;
82819 +#endif
82820
82821 #ifdef CONFIG_TCP_MD5SIG
82822 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
82823 @@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
82824 return 0;
82825
82826 reset:
82827 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82828 + if (!grsec_enable_blackhole)
82829 +#endif
82830 tcp_v4_send_reset(rsk, skb);
82831 discard:
82832 kfree_skb(skb);
82833 @@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
82834 TCP_SKB_CB(skb)->sacked = 0;
82835
82836 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
82837 - if (!sk)
82838 + if (!sk) {
82839 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82840 + ret = 1;
82841 +#endif
82842 goto no_tcp_socket;
82843 + }
82844
82845 process:
82846 - if (sk->sk_state == TCP_TIME_WAIT)
82847 + if (sk->sk_state == TCP_TIME_WAIT) {
82848 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82849 + ret = 2;
82850 +#endif
82851 goto do_time_wait;
82852 + }
82853
82854 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
82855 goto discard_and_relse;
82856 @@ -1651,6 +1665,10 @@ no_tcp_socket:
82857 bad_packet:
82858 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
82859 } else {
82860 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82861 + if (!grsec_enable_blackhole || (ret == 1 &&
82862 + (skb->dev->flags & IFF_LOOPBACK)))
82863 +#endif
82864 tcp_v4_send_reset(NULL, skb);
82865 }
82866
82867 @@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
82868 0, /* non standard timer */
82869 0, /* open_requests have no inode */
82870 atomic_read(&sk->sk_refcnt),
82871 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82872 + NULL,
82873 +#else
82874 req,
82875 +#endif
82876 len);
82877 }
82878
82879 @@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
82880 sock_i_uid(sk),
82881 icsk->icsk_probes_out,
82882 sock_i_ino(sk),
82883 - atomic_read(&sk->sk_refcnt), sk,
82884 + atomic_read(&sk->sk_refcnt),
82885 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82886 + NULL,
82887 +#else
82888 + sk,
82889 +#endif
82890 jiffies_to_clock_t(icsk->icsk_rto),
82891 jiffies_to_clock_t(icsk->icsk_ack.ato),
82892 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
82893 @@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
82894 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
82895 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
82896 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
82897 - atomic_read(&tw->tw_refcnt), tw, len);
82898 + atomic_read(&tw->tw_refcnt),
82899 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82900 + NULL,
82901 +#else
82902 + tw,
82903 +#endif
82904 + len);
82905 }
82906
82907 #define TMPSZ 150
82908 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
82909 index 4c03598..e09a8e8 100644
82910 --- a/net/ipv4/tcp_minisocks.c
82911 +++ b/net/ipv4/tcp_minisocks.c
82912 @@ -26,6 +26,10 @@
82913 #include <net/inet_common.h>
82914 #include <net/xfrm.h>
82915
82916 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82917 +extern int grsec_enable_blackhole;
82918 +#endif
82919 +
82920 #ifdef CONFIG_SYSCTL
82921 #define SYNC_INIT 0 /* let the user enable it */
82922 #else
82923 @@ -672,6 +676,10 @@ listen_overflow:
82924
82925 embryonic_reset:
82926 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
82927 +
82928 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82929 + if (!grsec_enable_blackhole)
82930 +#endif
82931 if (!(flg & TCP_FLAG_RST))
82932 req->rsk_ops->send_reset(sk, skb);
82933
82934 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
82935 index af83bdf..ec91cb2 100644
82936 --- a/net/ipv4/tcp_output.c
82937 +++ b/net/ipv4/tcp_output.c
82938 @@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
82939 __u8 *md5_hash_location;
82940 int mss;
82941
82942 + pax_track_stack();
82943 +
82944 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
82945 if (skb == NULL)
82946 return NULL;
82947 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
82948 index 59f5b5e..193860f 100644
82949 --- a/net/ipv4/tcp_probe.c
82950 +++ b/net/ipv4/tcp_probe.c
82951 @@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
82952 if (cnt + width >= len)
82953 break;
82954
82955 - if (copy_to_user(buf + cnt, tbuf, width))
82956 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
82957 return -EFAULT;
82958 cnt += width;
82959 }
82960 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
82961 index 57d5501..a9ed13a 100644
82962 --- a/net/ipv4/tcp_timer.c
82963 +++ b/net/ipv4/tcp_timer.c
82964 @@ -21,6 +21,10 @@
82965 #include <linux/module.h>
82966 #include <net/tcp.h>
82967
82968 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82969 +extern int grsec_lastack_retries;
82970 +#endif
82971 +
82972 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
82973 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
82974 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
82975 @@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock *sk)
82976 }
82977 }
82978
82979 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82980 + if ((sk->sk_state == TCP_LAST_ACK) &&
82981 + (grsec_lastack_retries > 0) &&
82982 + (grsec_lastack_retries < retry_until))
82983 + retry_until = grsec_lastack_retries;
82984 +#endif
82985 +
82986 if (retransmits_timed_out(sk, retry_until)) {
82987 /* Has it gone just too far? */
82988 tcp_write_err(sk);
82989 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
82990 index 8e28770..72105c8 100644
82991 --- a/net/ipv4/udp.c
82992 +++ b/net/ipv4/udp.c
82993 @@ -86,6 +86,7 @@
82994 #include <linux/types.h>
82995 #include <linux/fcntl.h>
82996 #include <linux/module.h>
82997 +#include <linux/security.h>
82998 #include <linux/socket.h>
82999 #include <linux/sockios.h>
83000 #include <linux/igmp.h>
83001 @@ -106,6 +107,10 @@
83002 #include <net/xfrm.h>
83003 #include "udp_impl.h"
83004
83005 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
83006 +extern int grsec_enable_blackhole;
83007 +#endif
83008 +
83009 struct udp_table udp_table;
83010 EXPORT_SYMBOL(udp_table);
83011
83012 @@ -371,6 +376,9 @@ found:
83013 return s;
83014 }
83015
83016 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
83017 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
83018 +
83019 /*
83020 * This routine is called by the ICMP module when it gets some
83021 * sort of error condition. If err < 0 then the socket should
83022 @@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
83023 dport = usin->sin_port;
83024 if (dport == 0)
83025 return -EINVAL;
83026 +
83027 + err = gr_search_udp_sendmsg(sk, usin);
83028 + if (err)
83029 + return err;
83030 } else {
83031 if (sk->sk_state != TCP_ESTABLISHED)
83032 return -EDESTADDRREQ;
83033 +
83034 + err = gr_search_udp_sendmsg(sk, NULL);
83035 + if (err)
83036 + return err;
83037 +
83038 daddr = inet->daddr;
83039 dport = inet->dport;
83040 /* Open fast path for connected socket.
83041 @@ -945,6 +962,10 @@ try_again:
83042 if (!skb)
83043 goto out;
83044
83045 + err = gr_search_udp_recvmsg(sk, skb);
83046 + if (err)
83047 + goto out_free;
83048 +
83049 ulen = skb->len - sizeof(struct udphdr);
83050 copied = len;
83051 if (copied > ulen)
83052 @@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
83053 if (rc == -ENOMEM) {
83054 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
83055 is_udplite);
83056 - atomic_inc(&sk->sk_drops);
83057 + atomic_inc_unchecked(&sk->sk_drops);
83058 }
83059 goto drop;
83060 }
83061 @@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
83062 goto csum_error;
83063
83064 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
83065 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
83066 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
83067 +#endif
83068 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
83069
83070 /*
83071 @@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
83072 sk_wmem_alloc_get(sp),
83073 sk_rmem_alloc_get(sp),
83074 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
83075 - atomic_read(&sp->sk_refcnt), sp,
83076 - atomic_read(&sp->sk_drops), len);
83077 + atomic_read(&sp->sk_refcnt),
83078 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83079 + NULL,
83080 +#else
83081 + sp,
83082 +#endif
83083 + atomic_read_unchecked(&sp->sk_drops), len);
83084 }
83085
83086 int udp4_seq_show(struct seq_file *seq, void *v)
83087 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
83088 index 8ac3d09..fc58c5f 100644
83089 --- a/net/ipv6/addrconf.c
83090 +++ b/net/ipv6/addrconf.c
83091 @@ -2053,7 +2053,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
83092 p.iph.ihl = 5;
83093 p.iph.protocol = IPPROTO_IPV6;
83094 p.iph.ttl = 64;
83095 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
83096 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
83097
83098 if (ops->ndo_do_ioctl) {
83099 mm_segment_t oldfs = get_fs();
83100 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
83101 index cc4797d..7cfdfcc 100644
83102 --- a/net/ipv6/inet6_connection_sock.c
83103 +++ b/net/ipv6/inet6_connection_sock.c
83104 @@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
83105 #ifdef CONFIG_XFRM
83106 {
83107 struct rt6_info *rt = (struct rt6_info *)dst;
83108 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
83109 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
83110 }
83111 #endif
83112 }
83113 @@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
83114 #ifdef CONFIG_XFRM
83115 if (dst) {
83116 struct rt6_info *rt = (struct rt6_info *)dst;
83117 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
83118 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
83119 sk->sk_dst_cache = NULL;
83120 dst_release(dst);
83121 dst = NULL;
83122 diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
83123 index 093e9b2..f72cddb 100644
83124 --- a/net/ipv6/inet6_hashtables.c
83125 +++ b/net/ipv6/inet6_hashtables.c
83126 @@ -119,7 +119,7 @@ out:
83127 }
83128 EXPORT_SYMBOL(__inet6_lookup_established);
83129
83130 -static int inline compute_score(struct sock *sk, struct net *net,
83131 +static inline int compute_score(struct sock *sk, struct net *net,
83132 const unsigned short hnum,
83133 const struct in6_addr *daddr,
83134 const int dif)
83135 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
83136 index 4f7aaf6..f7acf45 100644
83137 --- a/net/ipv6/ipv6_sockglue.c
83138 +++ b/net/ipv6/ipv6_sockglue.c
83139 @@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
83140 int val, valbool;
83141 int retv = -ENOPROTOOPT;
83142
83143 + pax_track_stack();
83144 +
83145 if (optval == NULL)
83146 val=0;
83147 else {
83148 @@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
83149 int len;
83150 int val;
83151
83152 + pax_track_stack();
83153 +
83154 if (ip6_mroute_opt(optname))
83155 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
83156
83157 @@ -922,7 +926,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
83158 if (sk->sk_type != SOCK_STREAM)
83159 return -ENOPROTOOPT;
83160
83161 - msg.msg_control = optval;
83162 + msg.msg_control = (void __force_kernel *)optval;
83163 msg.msg_controllen = len;
83164 msg.msg_flags = 0;
83165
83166 diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
83167 index 1cf3f0c..1d4376f 100644
83168 --- a/net/ipv6/netfilter/ip6_queue.c
83169 +++ b/net/ipv6/netfilter/ip6_queue.c
83170 @@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
83171
83172 if (v->data_len < sizeof(*user_iph))
83173 return 0;
83174 + if (v->data_len > 65535)
83175 + return -EMSGSIZE;
83176 +
83177 diff = v->data_len - e->skb->len;
83178 if (diff < 0) {
83179 if (pskb_trim(e->skb, v->data_len))
83180 @@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex)
83181 static inline void
83182 __ipq_rcv_skb(struct sk_buff *skb)
83183 {
83184 - int status, type, pid, flags, nlmsglen, skblen;
83185 + int status, type, pid, flags;
83186 + unsigned int nlmsglen, skblen;
83187 struct nlmsghdr *nlh;
83188
83189 skblen = skb->len;
83190 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
83191 index 78b5a36..7f37433 100644
83192 --- a/net/ipv6/netfilter/ip6_tables.c
83193 +++ b/net/ipv6/netfilter/ip6_tables.c
83194 @@ -1173,6 +1173,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
83195 private = &tmp;
83196 }
83197 #endif
83198 + memset(&info, 0, sizeof(info));
83199 info.valid_hooks = t->valid_hooks;
83200 memcpy(info.hook_entry, private->hook_entry,
83201 sizeof(info.hook_entry));
83202 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
83203 index 4f24570..b813b34 100644
83204 --- a/net/ipv6/raw.c
83205 +++ b/net/ipv6/raw.c
83206 @@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
83207 {
83208 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
83209 skb_checksum_complete(skb)) {
83210 - atomic_inc(&sk->sk_drops);
83211 + atomic_inc_unchecked(&sk->sk_drops);
83212 kfree_skb(skb);
83213 return NET_RX_DROP;
83214 }
83215
83216 /* Charge it to the socket. */
83217 if (sock_queue_rcv_skb(sk,skb)<0) {
83218 - atomic_inc(&sk->sk_drops);
83219 + atomic_inc_unchecked(&sk->sk_drops);
83220 kfree_skb(skb);
83221 return NET_RX_DROP;
83222 }
83223 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
83224 struct raw6_sock *rp = raw6_sk(sk);
83225
83226 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
83227 - atomic_inc(&sk->sk_drops);
83228 + atomic_inc_unchecked(&sk->sk_drops);
83229 kfree_skb(skb);
83230 return NET_RX_DROP;
83231 }
83232 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
83233
83234 if (inet->hdrincl) {
83235 if (skb_checksum_complete(skb)) {
83236 - atomic_inc(&sk->sk_drops);
83237 + atomic_inc_unchecked(&sk->sk_drops);
83238 kfree_skb(skb);
83239 return NET_RX_DROP;
83240 }
83241 @@ -518,7 +518,7 @@ csum_copy_err:
83242 as some normal condition.
83243 */
83244 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
83245 - atomic_inc(&sk->sk_drops);
83246 + atomic_inc_unchecked(&sk->sk_drops);
83247 goto out;
83248 }
83249
83250 @@ -600,7 +600,7 @@ out:
83251 return err;
83252 }
83253
83254 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
83255 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
83256 struct flowi *fl, struct rt6_info *rt,
83257 unsigned int flags)
83258 {
83259 @@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
83260 u16 proto;
83261 int err;
83262
83263 + pax_track_stack();
83264 +
83265 /* Rough check on arithmetic overflow,
83266 better check is made in ip6_append_data().
83267 */
83268 @@ -916,12 +918,17 @@ do_confirm:
83269 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
83270 char __user *optval, int optlen)
83271 {
83272 + struct icmp6_filter filter;
83273 +
83274 switch (optname) {
83275 case ICMPV6_FILTER:
83276 + if (optlen < 0)
83277 + return -EINVAL;
83278 if (optlen > sizeof(struct icmp6_filter))
83279 optlen = sizeof(struct icmp6_filter);
83280 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
83281 + if (copy_from_user(&filter, optval, optlen))
83282 return -EFAULT;
83283 + raw6_sk(sk)->filter = filter;
83284 return 0;
83285 default:
83286 return -ENOPROTOOPT;
83287 @@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
83288 char __user *optval, int __user *optlen)
83289 {
83290 int len;
83291 + struct icmp6_filter filter;
83292
83293 switch (optname) {
83294 case ICMPV6_FILTER:
83295 @@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
83296 len = sizeof(struct icmp6_filter);
83297 if (put_user(len, optlen))
83298 return -EFAULT;
83299 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
83300 + filter = raw6_sk(sk)->filter;
83301 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
83302 return -EFAULT;
83303 return 0;
83304 default:
83305 @@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
83306 0, 0L, 0,
83307 sock_i_uid(sp), 0,
83308 sock_i_ino(sp),
83309 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
83310 + atomic_read(&sp->sk_refcnt),
83311 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83312 + NULL,
83313 +#else
83314 + sp,
83315 +#endif
83316 + atomic_read_unchecked(&sp->sk_drops));
83317 }
83318
83319 static int raw6_seq_show(struct seq_file *seq, void *v)
83320 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
83321 index faae6df..d4430c1 100644
83322 --- a/net/ipv6/tcp_ipv6.c
83323 +++ b/net/ipv6/tcp_ipv6.c
83324 @@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
83325 }
83326 #endif
83327
83328 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
83329 +extern int grsec_enable_blackhole;
83330 +#endif
83331 +
83332 static void tcp_v6_hash(struct sock *sk)
83333 {
83334 if (sk->sk_state != TCP_CLOSE) {
83335 @@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
83336 return 0;
83337
83338 reset:
83339 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
83340 + if (!grsec_enable_blackhole)
83341 +#endif
83342 tcp_v6_send_reset(sk, skb);
83343 discard:
83344 if (opt_skb)
83345 @@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
83346 TCP_SKB_CB(skb)->sacked = 0;
83347
83348 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
83349 - if (!sk)
83350 + if (!sk) {
83351 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
83352 + ret = 1;
83353 +#endif
83354 goto no_tcp_socket;
83355 + }
83356
83357 process:
83358 - if (sk->sk_state == TCP_TIME_WAIT)
83359 + if (sk->sk_state == TCP_TIME_WAIT) {
83360 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
83361 + ret = 2;
83362 +#endif
83363 goto do_time_wait;
83364 + }
83365
83366 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
83367 goto discard_and_relse;
83368 @@ -1701,6 +1716,10 @@ no_tcp_socket:
83369 bad_packet:
83370 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
83371 } else {
83372 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
83373 + if (!grsec_enable_blackhole || (ret == 1 &&
83374 + (skb->dev->flags & IFF_LOOPBACK)))
83375 +#endif
83376 tcp_v6_send_reset(NULL, skb);
83377 }
83378
83379 @@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file *seq,
83380 uid,
83381 0, /* non standard timer */
83382 0, /* open_requests have no inode */
83383 - 0, req);
83384 + 0,
83385 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83386 + NULL
83387 +#else
83388 + req
83389 +#endif
83390 + );
83391 }
83392
83393 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
83394 @@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
83395 sock_i_uid(sp),
83396 icsk->icsk_probes_out,
83397 sock_i_ino(sp),
83398 - atomic_read(&sp->sk_refcnt), sp,
83399 + atomic_read(&sp->sk_refcnt),
83400 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83401 + NULL,
83402 +#else
83403 + sp,
83404 +#endif
83405 jiffies_to_clock_t(icsk->icsk_rto),
83406 jiffies_to_clock_t(icsk->icsk_ack.ato),
83407 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
83408 @@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct seq_file *seq,
83409 dest->s6_addr32[2], dest->s6_addr32[3], destp,
83410 tw->tw_substate, 0, 0,
83411 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
83412 - atomic_read(&tw->tw_refcnt), tw);
83413 + atomic_read(&tw->tw_refcnt),
83414 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83415 + NULL
83416 +#else
83417 + tw
83418 +#endif
83419 + );
83420 }
83421
83422 static int tcp6_seq_show(struct seq_file *seq, void *v)
83423 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
83424 index 9cc6289..052c521 100644
83425 --- a/net/ipv6/udp.c
83426 +++ b/net/ipv6/udp.c
83427 @@ -49,6 +49,10 @@
83428 #include <linux/seq_file.h>
83429 #include "udp_impl.h"
83430
83431 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
83432 +extern int grsec_enable_blackhole;
83433 +#endif
83434 +
83435 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
83436 {
83437 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
83438 @@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
83439 if (rc == -ENOMEM) {
83440 UDP6_INC_STATS_BH(sock_net(sk),
83441 UDP_MIB_RCVBUFERRORS, is_udplite);
83442 - atomic_inc(&sk->sk_drops);
83443 + atomic_inc_unchecked(&sk->sk_drops);
83444 }
83445 goto drop;
83446 }
83447 @@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
83448 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
83449 proto == IPPROTO_UDPLITE);
83450
83451 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
83452 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
83453 +#endif
83454 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
83455
83456 kfree_skb(skb);
83457 @@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
83458 0, 0L, 0,
83459 sock_i_uid(sp), 0,
83460 sock_i_ino(sp),
83461 - atomic_read(&sp->sk_refcnt), sp,
83462 - atomic_read(&sp->sk_drops));
83463 + atomic_read(&sp->sk_refcnt),
83464 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83465 + NULL,
83466 +#else
83467 + sp,
83468 +#endif
83469 + atomic_read_unchecked(&sp->sk_drops));
83470 }
83471
83472 int udp6_seq_show(struct seq_file *seq, void *v)
83473 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
83474 index 811984d..11f59b7 100644
83475 --- a/net/irda/ircomm/ircomm_tty.c
83476 +++ b/net/irda/ircomm/ircomm_tty.c
83477 @@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
83478 add_wait_queue(&self->open_wait, &wait);
83479
83480 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
83481 - __FILE__,__LINE__, tty->driver->name, self->open_count );
83482 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
83483
83484 /* As far as I can see, we protect open_count - Jean II */
83485 spin_lock_irqsave(&self->spinlock, flags);
83486 if (!tty_hung_up_p(filp)) {
83487 extra_count = 1;
83488 - self->open_count--;
83489 + local_dec(&self->open_count);
83490 }
83491 spin_unlock_irqrestore(&self->spinlock, flags);
83492 - self->blocked_open++;
83493 + local_inc(&self->blocked_open);
83494
83495 while (1) {
83496 if (tty->termios->c_cflag & CBAUD) {
83497 @@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
83498 }
83499
83500 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
83501 - __FILE__,__LINE__, tty->driver->name, self->open_count );
83502 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
83503
83504 schedule();
83505 }
83506 @@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
83507 if (extra_count) {
83508 /* ++ is not atomic, so this should be protected - Jean II */
83509 spin_lock_irqsave(&self->spinlock, flags);
83510 - self->open_count++;
83511 + local_inc(&self->open_count);
83512 spin_unlock_irqrestore(&self->spinlock, flags);
83513 }
83514 - self->blocked_open--;
83515 + local_dec(&self->blocked_open);
83516
83517 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
83518 - __FILE__,__LINE__, tty->driver->name, self->open_count);
83519 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
83520
83521 if (!retval)
83522 self->flags |= ASYNC_NORMAL_ACTIVE;
83523 @@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
83524 }
83525 /* ++ is not atomic, so this should be protected - Jean II */
83526 spin_lock_irqsave(&self->spinlock, flags);
83527 - self->open_count++;
83528 + local_inc(&self->open_count);
83529
83530 tty->driver_data = self;
83531 self->tty = tty;
83532 spin_unlock_irqrestore(&self->spinlock, flags);
83533
83534 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
83535 - self->line, self->open_count);
83536 + self->line, local_read(&self->open_count));
83537
83538 /* Not really used by us, but lets do it anyway */
83539 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
83540 @@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
83541 return;
83542 }
83543
83544 - if ((tty->count == 1) && (self->open_count != 1)) {
83545 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
83546 /*
83547 * Uh, oh. tty->count is 1, which means that the tty
83548 * structure will be freed. state->count should always
83549 @@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
83550 */
83551 IRDA_DEBUG(0, "%s(), bad serial port count; "
83552 "tty->count is 1, state->count is %d\n", __func__ ,
83553 - self->open_count);
83554 - self->open_count = 1;
83555 + local_read(&self->open_count));
83556 + local_set(&self->open_count, 1);
83557 }
83558
83559 - if (--self->open_count < 0) {
83560 + if (local_dec_return(&self->open_count) < 0) {
83561 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
83562 - __func__, self->line, self->open_count);
83563 - self->open_count = 0;
83564 + __func__, self->line, local_read(&self->open_count));
83565 + local_set(&self->open_count, 0);
83566 }
83567 - if (self->open_count) {
83568 + if (local_read(&self->open_count)) {
83569 spin_unlock_irqrestore(&self->spinlock, flags);
83570
83571 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
83572 @@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
83573 tty->closing = 0;
83574 self->tty = NULL;
83575
83576 - if (self->blocked_open) {
83577 + if (local_read(&self->blocked_open)) {
83578 if (self->close_delay)
83579 schedule_timeout_interruptible(self->close_delay);
83580 wake_up_interruptible(&self->open_wait);
83581 @@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
83582 spin_lock_irqsave(&self->spinlock, flags);
83583 self->flags &= ~ASYNC_NORMAL_ACTIVE;
83584 self->tty = NULL;
83585 - self->open_count = 0;
83586 + local_set(&self->open_count, 0);
83587 spin_unlock_irqrestore(&self->spinlock, flags);
83588
83589 wake_up_interruptible(&self->open_wait);
83590 @@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
83591 seq_putc(m, '\n');
83592
83593 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
83594 - seq_printf(m, "Open count: %d\n", self->open_count);
83595 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
83596 seq_printf(m, "Max data size: %d\n", self->max_data_size);
83597 seq_printf(m, "Max header size: %d\n", self->max_header_size);
83598
83599 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
83600 index bada1b9..f325943 100644
83601 --- a/net/iucv/af_iucv.c
83602 +++ b/net/iucv/af_iucv.c
83603 @@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct sock *sk)
83604
83605 write_lock_bh(&iucv_sk_list.lock);
83606
83607 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
83608 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
83609 while (__iucv_get_sock_by_name(name)) {
83610 sprintf(name, "%08x",
83611 - atomic_inc_return(&iucv_sk_list.autobind_name));
83612 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
83613 }
83614
83615 write_unlock_bh(&iucv_sk_list.lock);
83616 diff --git a/net/key/af_key.c b/net/key/af_key.c
83617 index 4e98193..439b449 100644
83618 --- a/net/key/af_key.c
83619 +++ b/net/key/af_key.c
83620 @@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
83621 struct xfrm_migrate m[XFRM_MAX_DEPTH];
83622 struct xfrm_kmaddress k;
83623
83624 + pax_track_stack();
83625 +
83626 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
83627 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
83628 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
83629 @@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
83630 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
83631 else
83632 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
83633 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83634 + NULL,
83635 +#else
83636 s,
83637 +#endif
83638 atomic_read(&s->sk_refcnt),
83639 sk_rmem_alloc_get(s),
83640 sk_wmem_alloc_get(s),
83641 diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
83642 index bda96d1..c038b72 100644
83643 --- a/net/lapb/lapb_iface.c
83644 +++ b/net/lapb/lapb_iface.c
83645 @@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
83646 goto out;
83647
83648 lapb->dev = dev;
83649 - lapb->callbacks = *callbacks;
83650 + lapb->callbacks = callbacks;
83651
83652 __lapb_insert_cb(lapb);
83653
83654 @@ -379,32 +379,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
83655
83656 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
83657 {
83658 - if (lapb->callbacks.connect_confirmation)
83659 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
83660 + if (lapb->callbacks->connect_confirmation)
83661 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
83662 }
83663
83664 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
83665 {
83666 - if (lapb->callbacks.connect_indication)
83667 - lapb->callbacks.connect_indication(lapb->dev, reason);
83668 + if (lapb->callbacks->connect_indication)
83669 + lapb->callbacks->connect_indication(lapb->dev, reason);
83670 }
83671
83672 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
83673 {
83674 - if (lapb->callbacks.disconnect_confirmation)
83675 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
83676 + if (lapb->callbacks->disconnect_confirmation)
83677 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
83678 }
83679
83680 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
83681 {
83682 - if (lapb->callbacks.disconnect_indication)
83683 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
83684 + if (lapb->callbacks->disconnect_indication)
83685 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
83686 }
83687
83688 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
83689 {
83690 - if (lapb->callbacks.data_indication)
83691 - return lapb->callbacks.data_indication(lapb->dev, skb);
83692 + if (lapb->callbacks->data_indication)
83693 + return lapb->callbacks->data_indication(lapb->dev, skb);
83694
83695 kfree_skb(skb);
83696 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
83697 @@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
83698 {
83699 int used = 0;
83700
83701 - if (lapb->callbacks.data_transmit) {
83702 - lapb->callbacks.data_transmit(lapb->dev, skb);
83703 + if (lapb->callbacks->data_transmit) {
83704 + lapb->callbacks->data_transmit(lapb->dev, skb);
83705 used = 1;
83706 }
83707
83708 diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
83709 index fe2d3f8..e57f683 100644
83710 --- a/net/mac80211/cfg.c
83711 +++ b/net/mac80211/cfg.c
83712 @@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
83713 return err;
83714 }
83715
83716 -struct cfg80211_ops mac80211_config_ops = {
83717 +const struct cfg80211_ops mac80211_config_ops = {
83718 .add_virtual_intf = ieee80211_add_iface,
83719 .del_virtual_intf = ieee80211_del_iface,
83720 .change_virtual_intf = ieee80211_change_iface,
83721 diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h
83722 index 7d7879f..2d51f62 100644
83723 --- a/net/mac80211/cfg.h
83724 +++ b/net/mac80211/cfg.h
83725 @@ -4,6 +4,6 @@
83726 #ifndef __CFG_H
83727 #define __CFG_H
83728
83729 -extern struct cfg80211_ops mac80211_config_ops;
83730 +extern const struct cfg80211_ops mac80211_config_ops;
83731
83732 #endif /* __CFG_H */
83733 diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
83734 index 99c7525..9cb4937 100644
83735 --- a/net/mac80211/debugfs_key.c
83736 +++ b/net/mac80211/debugfs_key.c
83737 @@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf,
83738 size_t count, loff_t *ppos)
83739 {
83740 struct ieee80211_key *key = file->private_data;
83741 - int i, res, bufsize = 2 * key->conf.keylen + 2;
83742 + int i, bufsize = 2 * key->conf.keylen + 2;
83743 char *buf = kmalloc(bufsize, GFP_KERNEL);
83744 char *p = buf;
83745 + ssize_t res;
83746 +
83747 + if (buf == NULL)
83748 + return -ENOMEM;
83749
83750 for (i = 0; i < key->conf.keylen; i++)
83751 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
83752 diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
83753 index 33a2e89..08650c8 100644
83754 --- a/net/mac80211/debugfs_sta.c
83755 +++ b/net/mac80211/debugfs_sta.c
83756 @@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
83757 int i;
83758 struct sta_info *sta = file->private_data;
83759
83760 + pax_track_stack();
83761 +
83762 spin_lock_bh(&sta->lock);
83763 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
83764 sta->ampdu_mlme.dialog_token_allocator + 1);
83765 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
83766 index ca62bfe..6657a03 100644
83767 --- a/net/mac80211/ieee80211_i.h
83768 +++ b/net/mac80211/ieee80211_i.h
83769 @@ -25,6 +25,7 @@
83770 #include <linux/etherdevice.h>
83771 #include <net/cfg80211.h>
83772 #include <net/mac80211.h>
83773 +#include <asm/local.h>
83774 #include "key.h"
83775 #include "sta_info.h"
83776
83777 @@ -635,7 +636,7 @@ struct ieee80211_local {
83778 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
83779 spinlock_t queue_stop_reason_lock;
83780
83781 - int open_count;
83782 + local_t open_count;
83783 int monitors, cooked_mntrs;
83784 /* number of interfaces with corresponding FIF_ flags */
83785 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
83786 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
83787 index 079c500..eb3c6d4 100644
83788 --- a/net/mac80211/iface.c
83789 +++ b/net/mac80211/iface.c
83790 @@ -166,7 +166,7 @@ static int ieee80211_open(struct net_device *dev)
83791 break;
83792 }
83793
83794 - if (local->open_count == 0) {
83795 + if (local_read(&local->open_count) == 0) {
83796 res = drv_start(local);
83797 if (res)
83798 goto err_del_bss;
83799 @@ -196,7 +196,7 @@ static int ieee80211_open(struct net_device *dev)
83800 * Validate the MAC address for this device.
83801 */
83802 if (!is_valid_ether_addr(dev->dev_addr)) {
83803 - if (!local->open_count)
83804 + if (!local_read(&local->open_count))
83805 drv_stop(local);
83806 return -EADDRNOTAVAIL;
83807 }
83808 @@ -292,7 +292,7 @@ static int ieee80211_open(struct net_device *dev)
83809
83810 hw_reconf_flags |= __ieee80211_recalc_idle(local);
83811
83812 - local->open_count++;
83813 + local_inc(&local->open_count);
83814 if (hw_reconf_flags) {
83815 ieee80211_hw_config(local, hw_reconf_flags);
83816 /*
83817 @@ -320,7 +320,7 @@ static int ieee80211_open(struct net_device *dev)
83818 err_del_interface:
83819 drv_remove_interface(local, &conf);
83820 err_stop:
83821 - if (!local->open_count)
83822 + if (!local_read(&local->open_count))
83823 drv_stop(local);
83824 err_del_bss:
83825 sdata->bss = NULL;
83826 @@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_device *dev)
83827 WARN_ON(!list_empty(&sdata->u.ap.vlans));
83828 }
83829
83830 - local->open_count--;
83831 + local_dec(&local->open_count);
83832
83833 switch (sdata->vif.type) {
83834 case NL80211_IFTYPE_AP_VLAN:
83835 @@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_device *dev)
83836
83837 ieee80211_recalc_ps(local, -1);
83838
83839 - if (local->open_count == 0) {
83840 + if (local_read(&local->open_count) == 0) {
83841 ieee80211_clear_tx_pending(local);
83842 ieee80211_stop_device(local);
83843
83844 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
83845 index 2dfe176..74e4388 100644
83846 --- a/net/mac80211/main.c
83847 +++ b/net/mac80211/main.c
83848 @@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
83849 local->hw.conf.power_level = power;
83850 }
83851
83852 - if (changed && local->open_count) {
83853 + if (changed && local_read(&local->open_count)) {
83854 ret = drv_config(local, changed);
83855 /*
83856 * Goal:
83857 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
83858 index e67eea7..fcc227e 100644
83859 --- a/net/mac80211/mlme.c
83860 +++ b/net/mac80211/mlme.c
83861 @@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
83862 bool have_higher_than_11mbit = false, newsta = false;
83863 u16 ap_ht_cap_flags;
83864
83865 + pax_track_stack();
83866 +
83867 /*
83868 * AssocResp and ReassocResp have identical structure, so process both
83869 * of them in this function.
83870 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
83871 index e535f1c..4d733d1 100644
83872 --- a/net/mac80211/pm.c
83873 +++ b/net/mac80211/pm.c
83874 @@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
83875 }
83876
83877 /* stop hardware - this must stop RX */
83878 - if (local->open_count)
83879 + if (local_read(&local->open_count))
83880 ieee80211_stop_device(local);
83881
83882 local->suspended = true;
83883 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
83884 index b33efc4..0a2efb6 100644
83885 --- a/net/mac80211/rate.c
83886 +++ b/net/mac80211/rate.c
83887 @@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
83888 struct rate_control_ref *ref, *old;
83889
83890 ASSERT_RTNL();
83891 - if (local->open_count)
83892 + if (local_read(&local->open_count))
83893 return -EBUSY;
83894
83895 ref = rate_control_alloc(name, local);
83896 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
83897 index b1d7904..57e4da7 100644
83898 --- a/net/mac80211/tx.c
83899 +++ b/net/mac80211/tx.c
83900 @@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
83901 return cpu_to_le16(dur);
83902 }
83903
83904 -static int inline is_ieee80211_device(struct ieee80211_local *local,
83905 +static inline int is_ieee80211_device(struct ieee80211_local *local,
83906 struct net_device *dev)
83907 {
83908 return local == wdev_priv(dev->ieee80211_ptr);
83909 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
83910 index 31b1085..48fb26d 100644
83911 --- a/net/mac80211/util.c
83912 +++ b/net/mac80211/util.c
83913 @@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
83914 local->resuming = true;
83915
83916 /* restart hardware */
83917 - if (local->open_count) {
83918 + if (local_read(&local->open_count)) {
83919 /*
83920 * Upon resume hardware can sometimes be goofy due to
83921 * various platform / driver / bus issues, so restarting
83922 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
83923 index 634d14a..b35a608 100644
83924 --- a/net/netfilter/Kconfig
83925 +++ b/net/netfilter/Kconfig
83926 @@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
83927
83928 To compile it as a module, choose M here. If unsure, say N.
83929
83930 +config NETFILTER_XT_MATCH_GRADM
83931 + tristate '"gradm" match support'
83932 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
83933 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
83934 + ---help---
83935 + The gradm match allows to match on grsecurity RBAC being enabled.
83936 + It is useful when iptables rules are applied early on bootup to
83937 + prevent connections to the machine (except from a trusted host)
83938 + while the RBAC system is disabled.
83939 +
83940 config NETFILTER_XT_MATCH_HASHLIMIT
83941 tristate '"hashlimit" match support'
83942 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
83943 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
83944 index 49f62ee..a17b2c6 100644
83945 --- a/net/netfilter/Makefile
83946 +++ b/net/netfilter/Makefile
83947 @@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
83948 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
83949 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
83950 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
83951 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
83952 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
83953 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
83954 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
83955 diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
83956 index 3c7e427..724043c 100644
83957 --- a/net/netfilter/ipvs/ip_vs_app.c
83958 +++ b/net/netfilter/ipvs/ip_vs_app.c
83959 @@ -564,7 +564,7 @@ static const struct file_operations ip_vs_app_fops = {
83960 .open = ip_vs_app_open,
83961 .read = seq_read,
83962 .llseek = seq_lseek,
83963 - .release = seq_release,
83964 + .release = seq_release_net,
83965 };
83966 #endif
83967
83968 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
83969 index 95682e5..457dbac 100644
83970 --- a/net/netfilter/ipvs/ip_vs_conn.c
83971 +++ b/net/netfilter/ipvs/ip_vs_conn.c
83972 @@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
83973 /* if the connection is not template and is created
83974 * by sync, preserve the activity flag.
83975 */
83976 - cp->flags |= atomic_read(&dest->conn_flags) &
83977 + cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
83978 (~IP_VS_CONN_F_INACTIVE);
83979 else
83980 - cp->flags |= atomic_read(&dest->conn_flags);
83981 + cp->flags |= atomic_read_unchecked(&dest->conn_flags);
83982 cp->dest = dest;
83983
83984 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
83985 @@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
83986 atomic_set(&cp->refcnt, 1);
83987
83988 atomic_set(&cp->n_control, 0);
83989 - atomic_set(&cp->in_pkts, 0);
83990 + atomic_set_unchecked(&cp->in_pkts, 0);
83991
83992 atomic_inc(&ip_vs_conn_count);
83993 if (flags & IP_VS_CONN_F_NO_CPORT)
83994 @@ -871,7 +871,7 @@ static const struct file_operations ip_vs_conn_fops = {
83995 .open = ip_vs_conn_open,
83996 .read = seq_read,
83997 .llseek = seq_lseek,
83998 - .release = seq_release,
83999 + .release = seq_release_net,
84000 };
84001
84002 static const char *ip_vs_origin_name(unsigned flags)
84003 @@ -934,7 +934,7 @@ static const struct file_operations ip_vs_conn_sync_fops = {
84004 .open = ip_vs_conn_sync_open,
84005 .read = seq_read,
84006 .llseek = seq_lseek,
84007 - .release = seq_release,
84008 + .release = seq_release_net,
84009 };
84010
84011 #endif
84012 @@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
84013
84014 /* Don't drop the entry if its number of incoming packets is not
84015 located in [0, 8] */
84016 - i = atomic_read(&cp->in_pkts);
84017 + i = atomic_read_unchecked(&cp->in_pkts);
84018 if (i > 8 || i < 0) return 0;
84019
84020 if (!todrop_rate[i]) return 0;
84021 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
84022 index b95699f..5fee919 100644
84023 --- a/net/netfilter/ipvs/ip_vs_core.c
84024 +++ b/net/netfilter/ipvs/ip_vs_core.c
84025 @@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
84026 ret = cp->packet_xmit(skb, cp, pp);
84027 /* do not touch skb anymore */
84028
84029 - atomic_inc(&cp->in_pkts);
84030 + atomic_inc_unchecked(&cp->in_pkts);
84031 ip_vs_conn_put(cp);
84032 return ret;
84033 }
84034 @@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
84035 * Sync connection if it is about to close to
84036 * encorage the standby servers to update the connections timeout
84037 */
84038 - pkts = atomic_add_return(1, &cp->in_pkts);
84039 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
84040 if (af == AF_INET &&
84041 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
84042 (((cp->protocol != IPPROTO_TCP ||
84043 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
84044 index 02b2610..2d89424 100644
84045 --- a/net/netfilter/ipvs/ip_vs_ctl.c
84046 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
84047 @@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc,
84048 ip_vs_rs_hash(dest);
84049 write_unlock_bh(&__ip_vs_rs_lock);
84050 }
84051 - atomic_set(&dest->conn_flags, conn_flags);
84052 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
84053
84054 /* bind the service */
84055 if (!dest->svc) {
84056 @@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
84057 " %-7s %-6d %-10d %-10d\n",
84058 &dest->addr.in6,
84059 ntohs(dest->port),
84060 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
84061 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
84062 atomic_read(&dest->weight),
84063 atomic_read(&dest->activeconns),
84064 atomic_read(&dest->inactconns));
84065 @@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
84066 "%-7s %-6d %-10d %-10d\n",
84067 ntohl(dest->addr.ip),
84068 ntohs(dest->port),
84069 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
84070 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
84071 atomic_read(&dest->weight),
84072 atomic_read(&dest->activeconns),
84073 atomic_read(&dest->inactconns));
84074 @@ -1927,7 +1927,7 @@ static const struct file_operations ip_vs_info_fops = {
84075 .open = ip_vs_info_open,
84076 .read = seq_read,
84077 .llseek = seq_lseek,
84078 - .release = seq_release_private,
84079 + .release = seq_release_net,
84080 };
84081
84082 #endif
84083 @@ -1976,7 +1976,7 @@ static const struct file_operations ip_vs_stats_fops = {
84084 .open = ip_vs_stats_seq_open,
84085 .read = seq_read,
84086 .llseek = seq_lseek,
84087 - .release = single_release,
84088 + .release = single_release_net,
84089 };
84090
84091 #endif
84092 @@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
84093
84094 entry.addr = dest->addr.ip;
84095 entry.port = dest->port;
84096 - entry.conn_flags = atomic_read(&dest->conn_flags);
84097 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
84098 entry.weight = atomic_read(&dest->weight);
84099 entry.u_threshold = dest->u_threshold;
84100 entry.l_threshold = dest->l_threshold;
84101 @@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
84102 unsigned char arg[128];
84103 int ret = 0;
84104
84105 + pax_track_stack();
84106 +
84107 if (!capable(CAP_NET_ADMIN))
84108 return -EPERM;
84109
84110 @@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
84111 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
84112
84113 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
84114 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
84115 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
84116 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
84117 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
84118 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
84119 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
84120 index e177f0d..55e8581 100644
84121 --- a/net/netfilter/ipvs/ip_vs_sync.c
84122 +++ b/net/netfilter/ipvs/ip_vs_sync.c
84123 @@ -438,7 +438,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
84124
84125 if (opt)
84126 memcpy(&cp->in_seq, opt, sizeof(*opt));
84127 - atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
84128 + atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
84129 cp->state = state;
84130 cp->old_state = cp->state;
84131 /*
84132 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
84133 index 30b3189..e2e4b55 100644
84134 --- a/net/netfilter/ipvs/ip_vs_xmit.c
84135 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
84136 @@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
84137 else
84138 rc = NF_ACCEPT;
84139 /* do not touch skb anymore */
84140 - atomic_inc(&cp->in_pkts);
84141 + atomic_inc_unchecked(&cp->in_pkts);
84142 goto out;
84143 }
84144
84145 @@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
84146 else
84147 rc = NF_ACCEPT;
84148 /* do not touch skb anymore */
84149 - atomic_inc(&cp->in_pkts);
84150 + atomic_inc_unchecked(&cp->in_pkts);
84151 goto out;
84152 }
84153
84154 diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
84155 index d521718..d0fd7a1 100644
84156 --- a/net/netfilter/nf_conntrack_netlink.c
84157 +++ b/net/netfilter/nf_conntrack_netlink.c
84158 @@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
84159 static int
84160 ctnetlink_parse_tuple(const struct nlattr * const cda[],
84161 struct nf_conntrack_tuple *tuple,
84162 - enum ctattr_tuple type, u_int8_t l3num)
84163 + enum ctattr_type type, u_int8_t l3num)
84164 {
84165 struct nlattr *tb[CTA_TUPLE_MAX+1];
84166 int err;
84167 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
84168 index f900dc3..5e45346 100644
84169 --- a/net/netfilter/nfnetlink_log.c
84170 +++ b/net/netfilter/nfnetlink_log.c
84171 @@ -68,7 +68,7 @@ struct nfulnl_instance {
84172 };
84173
84174 static DEFINE_RWLOCK(instances_lock);
84175 -static atomic_t global_seq;
84176 +static atomic_unchecked_t global_seq;
84177
84178 #define INSTANCE_BUCKETS 16
84179 static struct hlist_head instance_table[INSTANCE_BUCKETS];
84180 @@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_instance *inst,
84181 /* global sequence number */
84182 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
84183 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
84184 - htonl(atomic_inc_return(&global_seq)));
84185 + htonl(atomic_inc_return_unchecked(&global_seq)));
84186
84187 if (data_len) {
84188 struct nlattr *nla;
84189 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
84190 new file mode 100644
84191 index 0000000..b1bac76
84192 --- /dev/null
84193 +++ b/net/netfilter/xt_gradm.c
84194 @@ -0,0 +1,51 @@
84195 +/*
84196 + * gradm match for netfilter
84197 + * Copyright © Zbigniew Krzystolik, 2010
84198 + *
84199 + * This program is free software; you can redistribute it and/or modify
84200 + * it under the terms of the GNU General Public License; either version
84201 + * 2 or 3 as published by the Free Software Foundation.
84202 + */
84203 +#include <linux/module.h>
84204 +#include <linux/moduleparam.h>
84205 +#include <linux/skbuff.h>
84206 +#include <linux/netfilter/x_tables.h>
84207 +#include <linux/grsecurity.h>
84208 +#include <linux/netfilter/xt_gradm.h>
84209 +
84210 +static bool
84211 +gradm_mt(const struct sk_buff *skb, const struct xt_match_param *par)
84212 +{
84213 + const struct xt_gradm_mtinfo *info = par->matchinfo;
84214 + bool retval = false;
84215 + if (gr_acl_is_enabled())
84216 + retval = true;
84217 + return retval ^ info->invflags;
84218 +}
84219 +
84220 +static struct xt_match gradm_mt_reg __read_mostly = {
84221 + .name = "gradm",
84222 + .revision = 0,
84223 + .family = NFPROTO_UNSPEC,
84224 + .match = gradm_mt,
84225 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
84226 + .me = THIS_MODULE,
84227 +};
84228 +
84229 +static int __init gradm_mt_init(void)
84230 +{
84231 + return xt_register_match(&gradm_mt_reg);
84232 +}
84233 +
84234 +static void __exit gradm_mt_exit(void)
84235 +{
84236 + xt_unregister_match(&gradm_mt_reg);
84237 +}
84238 +
84239 +module_init(gradm_mt_init);
84240 +module_exit(gradm_mt_exit);
84241 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
84242 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
84243 +MODULE_LICENSE("GPL");
84244 +MODULE_ALIAS("ipt_gradm");
84245 +MODULE_ALIAS("ip6t_gradm");
84246 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
84247 index 5a7dcdf..24a3578 100644
84248 --- a/net/netlink/af_netlink.c
84249 +++ b/net/netlink/af_netlink.c
84250 @@ -733,7 +733,7 @@ static void netlink_overrun(struct sock *sk)
84251 sk->sk_error_report(sk);
84252 }
84253 }
84254 - atomic_inc(&sk->sk_drops);
84255 + atomic_inc_unchecked(&sk->sk_drops);
84256 }
84257
84258 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
84259 @@ -1964,15 +1964,23 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
84260 struct netlink_sock *nlk = nlk_sk(s);
84261
84262 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d\n",
84263 +#ifdef CONFIG_GRKERNSEC_HIDESYM
84264 + NULL,
84265 +#else
84266 s,
84267 +#endif
84268 s->sk_protocol,
84269 nlk->pid,
84270 nlk->groups ? (u32)nlk->groups[0] : 0,
84271 sk_rmem_alloc_get(s),
84272 sk_wmem_alloc_get(s),
84273 +#ifdef CONFIG_GRKERNSEC_HIDESYM
84274 + NULL,
84275 +#else
84276 nlk->cb,
84277 +#endif
84278 atomic_read(&s->sk_refcnt),
84279 - atomic_read(&s->sk_drops)
84280 + atomic_read_unchecked(&s->sk_drops)
84281 );
84282
84283 }
84284 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
84285 index 7a83495..ab0062f 100644
84286 --- a/net/netrom/af_netrom.c
84287 +++ b/net/netrom/af_netrom.c
84288 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
84289 struct sock *sk = sock->sk;
84290 struct nr_sock *nr = nr_sk(sk);
84291
84292 + memset(sax, 0, sizeof(*sax));
84293 lock_sock(sk);
84294 if (peer != 0) {
84295 if (sk->sk_state != TCP_ESTABLISHED) {
84296 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
84297 *uaddr_len = sizeof(struct full_sockaddr_ax25);
84298 } else {
84299 sax->fsa_ax25.sax25_family = AF_NETROM;
84300 - sax->fsa_ax25.sax25_ndigis = 0;
84301 sax->fsa_ax25.sax25_call = nr->source_addr;
84302 *uaddr_len = sizeof(struct sockaddr_ax25);
84303 }
84304 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
84305 index 35cfa79..4e78ff7 100644
84306 --- a/net/packet/af_packet.c
84307 +++ b/net/packet/af_packet.c
84308 @@ -2429,7 +2429,11 @@ static int packet_seq_show(struct seq_file *seq, void *v)
84309
84310 seq_printf(seq,
84311 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
84312 +#ifdef CONFIG_GRKERNSEC_HIDESYM
84313 + NULL,
84314 +#else
84315 s,
84316 +#endif
84317 atomic_read(&s->sk_refcnt),
84318 s->sk_type,
84319 ntohs(po->num),
84320 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
84321 index 519ff9d..a422a90 100644
84322 --- a/net/phonet/af_phonet.c
84323 +++ b/net/phonet/af_phonet.c
84324 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(int protocol)
84325 {
84326 struct phonet_protocol *pp;
84327
84328 - if (protocol >= PHONET_NPROTO)
84329 + if (protocol < 0 || protocol >= PHONET_NPROTO)
84330 return NULL;
84331
84332 spin_lock(&proto_tab_lock);
84333 @@ -402,7 +402,7 @@ int __init_or_module phonet_proto_register(int protocol,
84334 {
84335 int err = 0;
84336
84337 - if (protocol >= PHONET_NPROTO)
84338 + if (protocol < 0 || protocol >= PHONET_NPROTO)
84339 return -EINVAL;
84340
84341 err = proto_register(pp->prot, 1);
84342 diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
84343 index ef5c75c..2b6c2fa 100644
84344 --- a/net/phonet/datagram.c
84345 +++ b/net/phonet/datagram.c
84346 @@ -162,7 +162,7 @@ static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb)
84347 if (err < 0) {
84348 kfree_skb(skb);
84349 if (err == -ENOMEM)
84350 - atomic_inc(&sk->sk_drops);
84351 + atomic_inc_unchecked(&sk->sk_drops);
84352 }
84353 return err ? NET_RX_DROP : NET_RX_SUCCESS;
84354 }
84355 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
84356 index 9cdd35e..16cd850 100644
84357 --- a/net/phonet/pep.c
84358 +++ b/net/phonet/pep.c
84359 @@ -348,7 +348,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
84360
84361 case PNS_PEP_CTRL_REQ:
84362 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
84363 - atomic_inc(&sk->sk_drops);
84364 + atomic_inc_unchecked(&sk->sk_drops);
84365 break;
84366 }
84367 __skb_pull(skb, 4);
84368 @@ -362,12 +362,12 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
84369 if (!err)
84370 return 0;
84371 if (err == -ENOMEM)
84372 - atomic_inc(&sk->sk_drops);
84373 + atomic_inc_unchecked(&sk->sk_drops);
84374 break;
84375 }
84376
84377 if (pn->rx_credits == 0) {
84378 - atomic_inc(&sk->sk_drops);
84379 + atomic_inc_unchecked(&sk->sk_drops);
84380 err = -ENOBUFS;
84381 break;
84382 }
84383 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
84384 index aa5b5a9..c09b4f8 100644
84385 --- a/net/phonet/socket.c
84386 +++ b/net/phonet/socket.c
84387 @@ -482,8 +482,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
84388 sk->sk_state,
84389 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
84390 sock_i_uid(sk), sock_i_ino(sk),
84391 - atomic_read(&sk->sk_refcnt), sk,
84392 - atomic_read(&sk->sk_drops), &len);
84393 + atomic_read(&sk->sk_refcnt),
84394 +#ifdef CONFIG_GRKERNSEC_HIDESYM
84395 + NULL,
84396 +#else
84397 + sk,
84398 +#endif
84399 + atomic_read_unchecked(&sk->sk_drops), &len);
84400 }
84401 seq_printf(seq, "%*s\n", 127 - len, "");
84402 return 0;
84403 diff --git a/net/rds/Kconfig b/net/rds/Kconfig
84404 index ec753b3..821187c 100644
84405 --- a/net/rds/Kconfig
84406 +++ b/net/rds/Kconfig
84407 @@ -1,7 +1,7 @@
84408
84409 config RDS
84410 tristate "The RDS Protocol (EXPERIMENTAL)"
84411 - depends on INET && EXPERIMENTAL
84412 + depends on INET && EXPERIMENTAL && BROKEN
84413 ---help---
84414 The RDS (Reliable Datagram Sockets) protocol provides reliable,
84415 sequenced delivery of datagrams over Infiniband, iWARP,
84416 diff --git a/net/rds/cong.c b/net/rds/cong.c
84417 index dd2711d..1c7ed12 100644
84418 --- a/net/rds/cong.c
84419 +++ b/net/rds/cong.c
84420 @@ -77,7 +77,7 @@
84421 * finds that the saved generation number is smaller than the global generation
84422 * number, it wakes up the process.
84423 */
84424 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
84425 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
84426
84427 /*
84428 * Congestion monitoring
84429 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
84430 rdsdebug("waking map %p for %pI4\n",
84431 map, &map->m_addr);
84432 rds_stats_inc(s_cong_update_received);
84433 - atomic_inc(&rds_cong_generation);
84434 + atomic_inc_unchecked(&rds_cong_generation);
84435 if (waitqueue_active(&map->m_waitq))
84436 wake_up(&map->m_waitq);
84437 if (waitqueue_active(&rds_poll_waitq))
84438 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
84439
84440 int rds_cong_updated_since(unsigned long *recent)
84441 {
84442 - unsigned long gen = atomic_read(&rds_cong_generation);
84443 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
84444
84445 if (likely(*recent == gen))
84446 return 0;
84447 diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
84448 index de4a1b1..94ec861 100644
84449 --- a/net/rds/iw_rdma.c
84450 +++ b/net/rds/iw_rdma.c
84451 @@ -181,6 +181,8 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_i
84452 struct rdma_cm_id *pcm_id;
84453 int rc;
84454
84455 + pax_track_stack();
84456 +
84457 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
84458 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
84459
84460 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
84461 index b5198ae..8b9fb90 100644
84462 --- a/net/rds/tcp.c
84463 +++ b/net/rds/tcp.c
84464 @@ -57,7 +57,7 @@ void rds_tcp_nonagle(struct socket *sock)
84465 int val = 1;
84466
84467 set_fs(KERNEL_DS);
84468 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
84469 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
84470 sizeof(val));
84471 set_fs(oldfs);
84472 }
84473 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
84474 index ab545e0..4079b3b 100644
84475 --- a/net/rds/tcp_send.c
84476 +++ b/net/rds/tcp_send.c
84477 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
84478
84479 oldfs = get_fs();
84480 set_fs(KERNEL_DS);
84481 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
84482 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
84483 sizeof(val));
84484 set_fs(oldfs);
84485 }
84486 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
84487 index a86afce..8657bce 100644
84488 --- a/net/rxrpc/af_rxrpc.c
84489 +++ b/net/rxrpc/af_rxrpc.c
84490 @@ -38,7 +38,7 @@ static const struct proto_ops rxrpc_rpc_ops;
84491 __be32 rxrpc_epoch;
84492
84493 /* current debugging ID */
84494 -atomic_t rxrpc_debug_id;
84495 +atomic_unchecked_t rxrpc_debug_id;
84496
84497 /* count of skbs currently in use */
84498 atomic_t rxrpc_n_skbs;
84499 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
84500 index b4a2209..539106c 100644
84501 --- a/net/rxrpc/ar-ack.c
84502 +++ b/net/rxrpc/ar-ack.c
84503 @@ -174,7 +174,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
84504
84505 _enter("{%d,%d,%d,%d},",
84506 call->acks_hard, call->acks_unacked,
84507 - atomic_read(&call->sequence),
84508 + atomic_read_unchecked(&call->sequence),
84509 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
84510
84511 stop = 0;
84512 @@ -198,7 +198,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
84513
84514 /* each Tx packet has a new serial number */
84515 sp->hdr.serial =
84516 - htonl(atomic_inc_return(&call->conn->serial));
84517 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
84518
84519 hdr = (struct rxrpc_header *) txb->head;
84520 hdr->serial = sp->hdr.serial;
84521 @@ -401,7 +401,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
84522 */
84523 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
84524 {
84525 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
84526 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
84527 }
84528
84529 /*
84530 @@ -627,7 +627,7 @@ process_further:
84531
84532 latest = ntohl(sp->hdr.serial);
84533 hard = ntohl(ack.firstPacket);
84534 - tx = atomic_read(&call->sequence);
84535 + tx = atomic_read_unchecked(&call->sequence);
84536
84537 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
84538 latest,
84539 @@ -840,6 +840,8 @@ void rxrpc_process_call(struct work_struct *work)
84540 u32 abort_code = RX_PROTOCOL_ERROR;
84541 u8 *acks = NULL;
84542
84543 + pax_track_stack();
84544 +
84545 //printk("\n--------------------\n");
84546 _enter("{%d,%s,%lx} [%lu]",
84547 call->debug_id, rxrpc_call_states[call->state], call->events,
84548 @@ -1159,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
84549 goto maybe_reschedule;
84550
84551 send_ACK_with_skew:
84552 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
84553 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
84554 ntohl(ack.serial));
84555 send_ACK:
84556 mtu = call->conn->trans->peer->if_mtu;
84557 @@ -1171,7 +1173,7 @@ send_ACK:
84558 ackinfo.rxMTU = htonl(5692);
84559 ackinfo.jumbo_max = htonl(4);
84560
84561 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
84562 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
84563 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
84564 ntohl(hdr.serial),
84565 ntohs(ack.maxSkew),
84566 @@ -1189,7 +1191,7 @@ send_ACK:
84567 send_message:
84568 _debug("send message");
84569
84570 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
84571 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
84572 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
84573 send_message_2:
84574
84575 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
84576 index bc0019f..e1b4b24 100644
84577 --- a/net/rxrpc/ar-call.c
84578 +++ b/net/rxrpc/ar-call.c
84579 @@ -82,7 +82,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
84580 spin_lock_init(&call->lock);
84581 rwlock_init(&call->state_lock);
84582 atomic_set(&call->usage, 1);
84583 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
84584 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
84585 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
84586
84587 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
84588 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
84589 index 9f1ce84..ff8d061 100644
84590 --- a/net/rxrpc/ar-connection.c
84591 +++ b/net/rxrpc/ar-connection.c
84592 @@ -205,7 +205,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
84593 rwlock_init(&conn->lock);
84594 spin_lock_init(&conn->state_lock);
84595 atomic_set(&conn->usage, 1);
84596 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
84597 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
84598 conn->avail_calls = RXRPC_MAXCALLS;
84599 conn->size_align = 4;
84600 conn->header_size = sizeof(struct rxrpc_header);
84601 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
84602 index 0505cdc..f0748ce 100644
84603 --- a/net/rxrpc/ar-connevent.c
84604 +++ b/net/rxrpc/ar-connevent.c
84605 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
84606
84607 len = iov[0].iov_len + iov[1].iov_len;
84608
84609 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
84610 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
84611 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
84612
84613 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
84614 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
84615 index f98c802..9e8488e 100644
84616 --- a/net/rxrpc/ar-input.c
84617 +++ b/net/rxrpc/ar-input.c
84618 @@ -339,9 +339,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
84619 /* track the latest serial number on this connection for ACK packet
84620 * information */
84621 serial = ntohl(sp->hdr.serial);
84622 - hi_serial = atomic_read(&call->conn->hi_serial);
84623 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
84624 while (serial > hi_serial)
84625 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
84626 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
84627 serial);
84628
84629 /* request ACK generation for any ACK or DATA packet that requests
84630 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
84631 index 7043b29..06edcdf 100644
84632 --- a/net/rxrpc/ar-internal.h
84633 +++ b/net/rxrpc/ar-internal.h
84634 @@ -272,8 +272,8 @@ struct rxrpc_connection {
84635 int error; /* error code for local abort */
84636 int debug_id; /* debug ID for printks */
84637 unsigned call_counter; /* call ID counter */
84638 - atomic_t serial; /* packet serial number counter */
84639 - atomic_t hi_serial; /* highest serial number received */
84640 + atomic_unchecked_t serial; /* packet serial number counter */
84641 + atomic_unchecked_t hi_serial; /* highest serial number received */
84642 u8 avail_calls; /* number of calls available */
84643 u8 size_align; /* data size alignment (for security) */
84644 u8 header_size; /* rxrpc + security header size */
84645 @@ -346,7 +346,7 @@ struct rxrpc_call {
84646 spinlock_t lock;
84647 rwlock_t state_lock; /* lock for state transition */
84648 atomic_t usage;
84649 - atomic_t sequence; /* Tx data packet sequence counter */
84650 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
84651 u32 abort_code; /* local/remote abort code */
84652 enum { /* current state of call */
84653 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
84654 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
84655 */
84656 extern atomic_t rxrpc_n_skbs;
84657 extern __be32 rxrpc_epoch;
84658 -extern atomic_t rxrpc_debug_id;
84659 +extern atomic_unchecked_t rxrpc_debug_id;
84660 extern struct workqueue_struct *rxrpc_workqueue;
84661
84662 /*
84663 diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
84664 index 74697b2..10f9b77 100644
84665 --- a/net/rxrpc/ar-key.c
84666 +++ b/net/rxrpc/ar-key.c
84667 @@ -88,11 +88,11 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
84668 return ret;
84669
84670 plen -= sizeof(*token);
84671 - token = kmalloc(sizeof(*token), GFP_KERNEL);
84672 + token = kzalloc(sizeof(*token), GFP_KERNEL);
84673 if (!token)
84674 return -ENOMEM;
84675
84676 - token->kad = kmalloc(plen, GFP_KERNEL);
84677 + token->kad = kzalloc(plen, GFP_KERNEL);
84678 if (!token->kad) {
84679 kfree(token);
84680 return -ENOMEM;
84681 @@ -730,10 +730,10 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen)
84682 goto error;
84683
84684 ret = -ENOMEM;
84685 - token = kmalloc(sizeof(*token), GFP_KERNEL);
84686 + token = kzalloc(sizeof(*token), GFP_KERNEL);
84687 if (!token)
84688 goto error;
84689 - token->kad = kmalloc(plen, GFP_KERNEL);
84690 + token->kad = kzalloc(plen, GFP_KERNEL);
84691 if (!token->kad)
84692 goto error_free;
84693
84694 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
84695 index 807535f..5b7f19e 100644
84696 --- a/net/rxrpc/ar-local.c
84697 +++ b/net/rxrpc/ar-local.c
84698 @@ -44,7 +44,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
84699 spin_lock_init(&local->lock);
84700 rwlock_init(&local->services_lock);
84701 atomic_set(&local->usage, 1);
84702 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
84703 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
84704 memcpy(&local->srx, srx, sizeof(*srx));
84705 }
84706
84707 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
84708 index cc9102c..7d3888e 100644
84709 --- a/net/rxrpc/ar-output.c
84710 +++ b/net/rxrpc/ar-output.c
84711 @@ -680,9 +680,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
84712 sp->hdr.cid = call->cid;
84713 sp->hdr.callNumber = call->call_id;
84714 sp->hdr.seq =
84715 - htonl(atomic_inc_return(&call->sequence));
84716 + htonl(atomic_inc_return_unchecked(&call->sequence));
84717 sp->hdr.serial =
84718 - htonl(atomic_inc_return(&conn->serial));
84719 + htonl(atomic_inc_return_unchecked(&conn->serial));
84720 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
84721 sp->hdr.userStatus = 0;
84722 sp->hdr.securityIndex = conn->security_ix;
84723 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
84724 index edc026c..4bd4e2d 100644
84725 --- a/net/rxrpc/ar-peer.c
84726 +++ b/net/rxrpc/ar-peer.c
84727 @@ -86,7 +86,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
84728 INIT_LIST_HEAD(&peer->error_targets);
84729 spin_lock_init(&peer->lock);
84730 atomic_set(&peer->usage, 1);
84731 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
84732 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
84733 memcpy(&peer->srx, srx, sizeof(*srx));
84734
84735 rxrpc_assess_MTU_size(peer);
84736 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
84737 index 38047f7..9f48511 100644
84738 --- a/net/rxrpc/ar-proc.c
84739 +++ b/net/rxrpc/ar-proc.c
84740 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
84741 atomic_read(&conn->usage),
84742 rxrpc_conn_states[conn->state],
84743 key_serial(conn->key),
84744 - atomic_read(&conn->serial),
84745 - atomic_read(&conn->hi_serial));
84746 + atomic_read_unchecked(&conn->serial),
84747 + atomic_read_unchecked(&conn->hi_serial));
84748
84749 return 0;
84750 }
84751 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
84752 index 0936e1a..437c640 100644
84753 --- a/net/rxrpc/ar-transport.c
84754 +++ b/net/rxrpc/ar-transport.c
84755 @@ -46,7 +46,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
84756 spin_lock_init(&trans->client_lock);
84757 rwlock_init(&trans->conn_lock);
84758 atomic_set(&trans->usage, 1);
84759 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
84760 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
84761
84762 if (peer->srx.transport.family == AF_INET) {
84763 switch (peer->srx.transport_type) {
84764 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
84765 index 713ac59..306f6ae 100644
84766 --- a/net/rxrpc/rxkad.c
84767 +++ b/net/rxrpc/rxkad.c
84768 @@ -210,6 +210,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
84769 u16 check;
84770 int nsg;
84771
84772 + pax_track_stack();
84773 +
84774 sp = rxrpc_skb(skb);
84775
84776 _enter("");
84777 @@ -337,6 +339,8 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
84778 u16 check;
84779 int nsg;
84780
84781 + pax_track_stack();
84782 +
84783 _enter("");
84784
84785 sp = rxrpc_skb(skb);
84786 @@ -609,7 +613,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
84787
84788 len = iov[0].iov_len + iov[1].iov_len;
84789
84790 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
84791 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
84792 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
84793
84794 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
84795 @@ -659,7 +663,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
84796
84797 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
84798
84799 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
84800 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
84801 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
84802
84803 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
84804 diff --git a/net/sctp/auth.c b/net/sctp/auth.c
84805 index 914c419..7a16d2c 100644
84806 --- a/net/sctp/auth.c
84807 +++ b/net/sctp/auth.c
84808 @@ -81,7 +81,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
84809 struct sctp_auth_bytes *key;
84810
84811 /* Verify that we are not going to overflow INT_MAX */
84812 - if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes))
84813 + if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes)))
84814 return NULL;
84815
84816 /* Allocate the shared key */
84817 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
84818 index d093cbf..9fc36fc 100644
84819 --- a/net/sctp/proc.c
84820 +++ b/net/sctp/proc.c
84821 @@ -213,7 +213,12 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
84822 sctp_for_each_hentry(epb, node, &head->chain) {
84823 ep = sctp_ep(epb);
84824 sk = epb->sk;
84825 - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
84826 + seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
84827 +#ifdef CONFIG_GRKERNSEC_HIDESYM
84828 + NULL, NULL,
84829 +#else
84830 + ep, sk,
84831 +#endif
84832 sctp_sk(sk)->type, sk->sk_state, hash,
84833 epb->bind_addr.port,
84834 sock_i_uid(sk), sock_i_ino(sk));
84835 @@ -320,7 +325,12 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
84836 seq_printf(seq,
84837 "%8p %8p %-3d %-3d %-2d %-4d "
84838 "%4d %8d %8d %7d %5lu %-5d %5d ",
84839 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
84840 +#ifdef CONFIG_GRKERNSEC_HIDESYM
84841 + NULL, NULL,
84842 +#else
84843 + assoc, sk,
84844 +#endif
84845 + sctp_sk(sk)->type, sk->sk_state,
84846 assoc->state, hash,
84847 assoc->assoc_id,
84848 assoc->sndbuf_used,
84849 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
84850 index 3a95fcb..c40fc1d 100644
84851 --- a/net/sctp/socket.c
84852 +++ b/net/sctp/socket.c
84853 @@ -5802,7 +5802,6 @@ pp_found:
84854 */
84855 int reuse = sk->sk_reuse;
84856 struct sock *sk2;
84857 - struct hlist_node *node;
84858
84859 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
84860 if (pp->fastreuse && sk->sk_reuse &&
84861 diff --git a/net/socket.c b/net/socket.c
84862 index d449812..4ac08d3c 100644
84863 --- a/net/socket.c
84864 +++ b/net/socket.c
84865 @@ -87,6 +87,7 @@
84866 #include <linux/wireless.h>
84867 #include <linux/nsproxy.h>
84868 #include <linux/magic.h>
84869 +#include <linux/in.h>
84870
84871 #include <asm/uaccess.h>
84872 #include <asm/unistd.h>
84873 @@ -97,6 +98,21 @@
84874 #include <net/sock.h>
84875 #include <linux/netfilter.h>
84876
84877 +extern void gr_attach_curr_ip(const struct sock *sk);
84878 +extern int gr_handle_sock_all(const int family, const int type,
84879 + const int protocol);
84880 +extern int gr_handle_sock_server(const struct sockaddr *sck);
84881 +extern int gr_handle_sock_server_other(const struct sock *sck);
84882 +extern int gr_handle_sock_client(const struct sockaddr *sck);
84883 +extern int gr_search_connect(struct socket * sock,
84884 + struct sockaddr_in * addr);
84885 +extern int gr_search_bind(struct socket * sock,
84886 + struct sockaddr_in * addr);
84887 +extern int gr_search_listen(struct socket * sock);
84888 +extern int gr_search_accept(struct socket * sock);
84889 +extern int gr_search_socket(const int domain, const int type,
84890 + const int protocol);
84891 +
84892 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
84893 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
84894 unsigned long nr_segs, loff_t pos);
84895 @@ -298,7 +314,7 @@ static int sockfs_get_sb(struct file_system_type *fs_type,
84896 mnt);
84897 }
84898
84899 -static struct vfsmount *sock_mnt __read_mostly;
84900 +struct vfsmount *sock_mnt __read_mostly;
84901
84902 static struct file_system_type sock_fs_type = {
84903 .name = "sockfs",
84904 @@ -1154,6 +1170,8 @@ static int __sock_create(struct net *net, int family, int type, int protocol,
84905 return -EAFNOSUPPORT;
84906 if (type < 0 || type >= SOCK_MAX)
84907 return -EINVAL;
84908 + if (protocol < 0)
84909 + return -EINVAL;
84910
84911 /* Compatibility.
84912
84913 @@ -1283,6 +1301,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
84914 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
84915 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
84916
84917 + if(!gr_search_socket(family, type, protocol)) {
84918 + retval = -EACCES;
84919 + goto out;
84920 + }
84921 +
84922 + if (gr_handle_sock_all(family, type, protocol)) {
84923 + retval = -EACCES;
84924 + goto out;
84925 + }
84926 +
84927 retval = sock_create(family, type, protocol, &sock);
84928 if (retval < 0)
84929 goto out;
84930 @@ -1415,6 +1443,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
84931 if (sock) {
84932 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
84933 if (err >= 0) {
84934 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
84935 + err = -EACCES;
84936 + goto error;
84937 + }
84938 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
84939 + if (err)
84940 + goto error;
84941 +
84942 err = security_socket_bind(sock,
84943 (struct sockaddr *)&address,
84944 addrlen);
84945 @@ -1423,6 +1459,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
84946 (struct sockaddr *)
84947 &address, addrlen);
84948 }
84949 +error:
84950 fput_light(sock->file, fput_needed);
84951 }
84952 return err;
84953 @@ -1446,10 +1483,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
84954 if ((unsigned)backlog > somaxconn)
84955 backlog = somaxconn;
84956
84957 + if (gr_handle_sock_server_other(sock->sk)) {
84958 + err = -EPERM;
84959 + goto error;
84960 + }
84961 +
84962 + err = gr_search_listen(sock);
84963 + if (err)
84964 + goto error;
84965 +
84966 err = security_socket_listen(sock, backlog);
84967 if (!err)
84968 err = sock->ops->listen(sock, backlog);
84969
84970 +error:
84971 fput_light(sock->file, fput_needed);
84972 }
84973 return err;
84974 @@ -1492,6 +1539,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
84975 newsock->type = sock->type;
84976 newsock->ops = sock->ops;
84977
84978 + if (gr_handle_sock_server_other(sock->sk)) {
84979 + err = -EPERM;
84980 + sock_release(newsock);
84981 + goto out_put;
84982 + }
84983 +
84984 + err = gr_search_accept(sock);
84985 + if (err) {
84986 + sock_release(newsock);
84987 + goto out_put;
84988 + }
84989 +
84990 /*
84991 * We don't need try_module_get here, as the listening socket (sock)
84992 * has the protocol module (sock->ops->owner) held.
84993 @@ -1534,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
84994 fd_install(newfd, newfile);
84995 err = newfd;
84996
84997 + gr_attach_curr_ip(newsock->sk);
84998 +
84999 out_put:
85000 fput_light(sock->file, fput_needed);
85001 out:
85002 @@ -1571,6 +1632,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
85003 int, addrlen)
85004 {
85005 struct socket *sock;
85006 + struct sockaddr *sck;
85007 struct sockaddr_storage address;
85008 int err, fput_needed;
85009
85010 @@ -1581,6 +1643,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
85011 if (err < 0)
85012 goto out_put;
85013
85014 + sck = (struct sockaddr *)&address;
85015 +
85016 + if (gr_handle_sock_client(sck)) {
85017 + err = -EACCES;
85018 + goto out_put;
85019 + }
85020 +
85021 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
85022 + if (err)
85023 + goto out_put;
85024 +
85025 err =
85026 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
85027 if (err)
85028 @@ -1882,6 +1955,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
85029 int err, ctl_len, iov_size, total_len;
85030 int fput_needed;
85031
85032 + pax_track_stack();
85033 +
85034 err = -EFAULT;
85035 if (MSG_CMSG_COMPAT & flags) {
85036 if (get_compat_msghdr(&msg_sys, msg_compat))
85037 @@ -2022,7 +2097,7 @@ SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
85038 * kernel msghdr to use the kernel address space)
85039 */
85040
85041 - uaddr = (__force void __user *)msg_sys.msg_name;
85042 + uaddr = (void __force_user *)msg_sys.msg_name;
85043 uaddr_len = COMPAT_NAMELEN(msg);
85044 if (MSG_CMSG_COMPAT & flags) {
85045 err = verify_compat_iovec(&msg_sys, iov,
85046 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
85047 index ac94477..8afe5c3 100644
85048 --- a/net/sunrpc/sched.c
85049 +++ b/net/sunrpc/sched.c
85050 @@ -234,10 +234,10 @@ static int rpc_wait_bit_killable(void *word)
85051 #ifdef RPC_DEBUG
85052 static void rpc_task_set_debuginfo(struct rpc_task *task)
85053 {
85054 - static atomic_t rpc_pid;
85055 + static atomic_unchecked_t rpc_pid;
85056
85057 task->tk_magic = RPC_TASK_MAGIC_ID;
85058 - task->tk_pid = atomic_inc_return(&rpc_pid);
85059 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
85060 }
85061 #else
85062 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
85063 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
85064 index 35fb68b..236a8bf 100644
85065 --- a/net/sunrpc/xprtrdma/svc_rdma.c
85066 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
85067 @@ -59,15 +59,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
85068 static unsigned int min_max_inline = 4096;
85069 static unsigned int max_max_inline = 65536;
85070
85071 -atomic_t rdma_stat_recv;
85072 -atomic_t rdma_stat_read;
85073 -atomic_t rdma_stat_write;
85074 -atomic_t rdma_stat_sq_starve;
85075 -atomic_t rdma_stat_rq_starve;
85076 -atomic_t rdma_stat_rq_poll;
85077 -atomic_t rdma_stat_rq_prod;
85078 -atomic_t rdma_stat_sq_poll;
85079 -atomic_t rdma_stat_sq_prod;
85080 +atomic_unchecked_t rdma_stat_recv;
85081 +atomic_unchecked_t rdma_stat_read;
85082 +atomic_unchecked_t rdma_stat_write;
85083 +atomic_unchecked_t rdma_stat_sq_starve;
85084 +atomic_unchecked_t rdma_stat_rq_starve;
85085 +atomic_unchecked_t rdma_stat_rq_poll;
85086 +atomic_unchecked_t rdma_stat_rq_prod;
85087 +atomic_unchecked_t rdma_stat_sq_poll;
85088 +atomic_unchecked_t rdma_stat_sq_prod;
85089
85090 /* Temporary NFS request map and context caches */
85091 struct kmem_cache *svc_rdma_map_cachep;
85092 @@ -105,7 +105,7 @@ static int read_reset_stat(ctl_table *table, int write,
85093 len -= *ppos;
85094 if (len > *lenp)
85095 len = *lenp;
85096 - if (len && copy_to_user(buffer, str_buf, len))
85097 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
85098 return -EFAULT;
85099 *lenp = len;
85100 *ppos += len;
85101 @@ -149,63 +149,63 @@ static ctl_table svcrdma_parm_table[] = {
85102 {
85103 .procname = "rdma_stat_read",
85104 .data = &rdma_stat_read,
85105 - .maxlen = sizeof(atomic_t),
85106 + .maxlen = sizeof(atomic_unchecked_t),
85107 .mode = 0644,
85108 .proc_handler = &read_reset_stat,
85109 },
85110 {
85111 .procname = "rdma_stat_recv",
85112 .data = &rdma_stat_recv,
85113 - .maxlen = sizeof(atomic_t),
85114 + .maxlen = sizeof(atomic_unchecked_t),
85115 .mode = 0644,
85116 .proc_handler = &read_reset_stat,
85117 },
85118 {
85119 .procname = "rdma_stat_write",
85120 .data = &rdma_stat_write,
85121 - .maxlen = sizeof(atomic_t),
85122 + .maxlen = sizeof(atomic_unchecked_t),
85123 .mode = 0644,
85124 .proc_handler = &read_reset_stat,
85125 },
85126 {
85127 .procname = "rdma_stat_sq_starve",
85128 .data = &rdma_stat_sq_starve,
85129 - .maxlen = sizeof(atomic_t),
85130 + .maxlen = sizeof(atomic_unchecked_t),
85131 .mode = 0644,
85132 .proc_handler = &read_reset_stat,
85133 },
85134 {
85135 .procname = "rdma_stat_rq_starve",
85136 .data = &rdma_stat_rq_starve,
85137 - .maxlen = sizeof(atomic_t),
85138 + .maxlen = sizeof(atomic_unchecked_t),
85139 .mode = 0644,
85140 .proc_handler = &read_reset_stat,
85141 },
85142 {
85143 .procname = "rdma_stat_rq_poll",
85144 .data = &rdma_stat_rq_poll,
85145 - .maxlen = sizeof(atomic_t),
85146 + .maxlen = sizeof(atomic_unchecked_t),
85147 .mode = 0644,
85148 .proc_handler = &read_reset_stat,
85149 },
85150 {
85151 .procname = "rdma_stat_rq_prod",
85152 .data = &rdma_stat_rq_prod,
85153 - .maxlen = sizeof(atomic_t),
85154 + .maxlen = sizeof(atomic_unchecked_t),
85155 .mode = 0644,
85156 .proc_handler = &read_reset_stat,
85157 },
85158 {
85159 .procname = "rdma_stat_sq_poll",
85160 .data = &rdma_stat_sq_poll,
85161 - .maxlen = sizeof(atomic_t),
85162 + .maxlen = sizeof(atomic_unchecked_t),
85163 .mode = 0644,
85164 .proc_handler = &read_reset_stat,
85165 },
85166 {
85167 .procname = "rdma_stat_sq_prod",
85168 .data = &rdma_stat_sq_prod,
85169 - .maxlen = sizeof(atomic_t),
85170 + .maxlen = sizeof(atomic_unchecked_t),
85171 .mode = 0644,
85172 .proc_handler = &read_reset_stat,
85173 },
85174 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
85175 index 9e88438..8ed5cf0 100644
85176 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
85177 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
85178 @@ -495,7 +495,7 @@ next_sge:
85179 svc_rdma_put_context(ctxt, 0);
85180 goto out;
85181 }
85182 - atomic_inc(&rdma_stat_read);
85183 + atomic_inc_unchecked(&rdma_stat_read);
85184
85185 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
85186 chl_map->ch[ch_no].count -= read_wr.num_sge;
85187 @@ -606,7 +606,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
85188 dto_q);
85189 list_del_init(&ctxt->dto_q);
85190 } else {
85191 - atomic_inc(&rdma_stat_rq_starve);
85192 + atomic_inc_unchecked(&rdma_stat_rq_starve);
85193 clear_bit(XPT_DATA, &xprt->xpt_flags);
85194 ctxt = NULL;
85195 }
85196 @@ -626,7 +626,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
85197 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
85198 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
85199 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
85200 - atomic_inc(&rdma_stat_recv);
85201 + atomic_inc_unchecked(&rdma_stat_recv);
85202
85203 /* Build up the XDR from the receive buffers. */
85204 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
85205 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
85206 index f11be72..7aad4e8 100644
85207 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
85208 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
85209 @@ -328,7 +328,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
85210 write_wr.wr.rdma.remote_addr = to;
85211
85212 /* Post It */
85213 - atomic_inc(&rdma_stat_write);
85214 + atomic_inc_unchecked(&rdma_stat_write);
85215 if (svc_rdma_send(xprt, &write_wr))
85216 goto err;
85217 return 0;
85218 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
85219 index 3fa5751..030ba89 100644
85220 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
85221 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
85222 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
85223 return;
85224
85225 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
85226 - atomic_inc(&rdma_stat_rq_poll);
85227 + atomic_inc_unchecked(&rdma_stat_rq_poll);
85228
85229 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
85230 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
85231 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
85232 }
85233
85234 if (ctxt)
85235 - atomic_inc(&rdma_stat_rq_prod);
85236 + atomic_inc_unchecked(&rdma_stat_rq_prod);
85237
85238 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
85239 /*
85240 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
85241 return;
85242
85243 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
85244 - atomic_inc(&rdma_stat_sq_poll);
85245 + atomic_inc_unchecked(&rdma_stat_sq_poll);
85246 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
85247 if (wc.status != IB_WC_SUCCESS)
85248 /* Close the transport */
85249 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
85250 }
85251
85252 if (ctxt)
85253 - atomic_inc(&rdma_stat_sq_prod);
85254 + atomic_inc_unchecked(&rdma_stat_sq_prod);
85255 }
85256
85257 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
85258 @@ -1260,7 +1260,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
85259 spin_lock_bh(&xprt->sc_lock);
85260 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
85261 spin_unlock_bh(&xprt->sc_lock);
85262 - atomic_inc(&rdma_stat_sq_starve);
85263 + atomic_inc_unchecked(&rdma_stat_sq_starve);
85264
85265 /* See if we can opportunistically reap SQ WR to make room */
85266 sq_cq_reap(xprt);
85267 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
85268 index 0b15d72..7934fbb 100644
85269 --- a/net/sysctl_net.c
85270 +++ b/net/sysctl_net.c
85271 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
85272 struct ctl_table *table)
85273 {
85274 /* Allow network administrator to have same access as root. */
85275 - if (capable(CAP_NET_ADMIN)) {
85276 + if (capable_nolog(CAP_NET_ADMIN)) {
85277 int mode = (table->mode >> 6) & 7;
85278 return (mode << 6) | (mode << 3) | mode;
85279 }
85280 diff --git a/net/tipc/link.c b/net/tipc/link.c
85281 index dd4c18b..f40d38d 100644
85282 --- a/net/tipc/link.c
85283 +++ b/net/tipc/link.c
85284 @@ -1418,7 +1418,7 @@ again:
85285
85286 if (!sect_rest) {
85287 sect_rest = msg_sect[++curr_sect].iov_len;
85288 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
85289 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
85290 }
85291
85292 if (sect_rest < fragm_rest)
85293 @@ -1437,7 +1437,7 @@ error:
85294 }
85295 } else
85296 skb_copy_to_linear_data_offset(buf, fragm_crs,
85297 - sect_crs, sz);
85298 + (const void __force_kernel *)sect_crs, sz);
85299 sect_crs += sz;
85300 sect_rest -= sz;
85301 fragm_crs += sz;
85302 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
85303 index 0747d8a..e8bf3f3 100644
85304 --- a/net/tipc/subscr.c
85305 +++ b/net/tipc/subscr.c
85306 @@ -104,7 +104,7 @@ static void subscr_send_event(struct subscription *sub,
85307 {
85308 struct iovec msg_sect;
85309
85310 - msg_sect.iov_base = (void *)&sub->evt;
85311 + msg_sect.iov_base = (void __force_user *)&sub->evt;
85312 msg_sect.iov_len = sizeof(struct tipc_event);
85313
85314 sub->evt.event = htohl(event, sub->swap);
85315 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
85316 index db8d51a..608692d 100644
85317 --- a/net/unix/af_unix.c
85318 +++ b/net/unix/af_unix.c
85319 @@ -745,6 +745,12 @@ static struct sock *unix_find_other(struct net *net,
85320 err = -ECONNREFUSED;
85321 if (!S_ISSOCK(inode->i_mode))
85322 goto put_fail;
85323 +
85324 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
85325 + err = -EACCES;
85326 + goto put_fail;
85327 + }
85328 +
85329 u = unix_find_socket_byinode(net, inode);
85330 if (!u)
85331 goto put_fail;
85332 @@ -765,6 +771,13 @@ static struct sock *unix_find_other(struct net *net,
85333 if (u) {
85334 struct dentry *dentry;
85335 dentry = unix_sk(u)->dentry;
85336 +
85337 + if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
85338 + err = -EPERM;
85339 + sock_put(u);
85340 + goto fail;
85341 + }
85342 +
85343 if (dentry)
85344 touch_atime(unix_sk(u)->mnt, dentry);
85345 } else
85346 @@ -850,11 +863,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
85347 err = security_path_mknod(&nd.path, dentry, mode, 0);
85348 if (err)
85349 goto out_mknod_drop_write;
85350 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
85351 + err = -EACCES;
85352 + goto out_mknod_drop_write;
85353 + }
85354 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
85355 out_mknod_drop_write:
85356 mnt_drop_write(nd.path.mnt);
85357 if (err)
85358 goto out_mknod_dput;
85359 +
85360 + gr_handle_create(dentry, nd.path.mnt);
85361 +
85362 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
85363 dput(nd.path.dentry);
85364 nd.path.dentry = dentry;
85365 @@ -2211,7 +2231,11 @@ static int unix_seq_show(struct seq_file *seq, void *v)
85366 unix_state_lock(s);
85367
85368 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
85369 +#ifdef CONFIG_GRKERNSEC_HIDESYM
85370 + NULL,
85371 +#else
85372 s,
85373 +#endif
85374 atomic_read(&s->sk_refcnt),
85375 0,
85376 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
85377 diff --git a/net/wireless/core.h b/net/wireless/core.h
85378 index 376798f..109a61f 100644
85379 --- a/net/wireless/core.h
85380 +++ b/net/wireless/core.h
85381 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
85382 struct mutex mtx;
85383
85384 /* rfkill support */
85385 - struct rfkill_ops rfkill_ops;
85386 + rfkill_ops_no_const rfkill_ops;
85387 struct rfkill *rfkill;
85388 struct work_struct rfkill_sync;
85389
85390 diff --git a/net/wireless/wext.c b/net/wireless/wext.c
85391 index a2e4c60..0979cbe 100644
85392 --- a/net/wireless/wext.c
85393 +++ b/net/wireless/wext.c
85394 @@ -816,8 +816,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
85395 */
85396
85397 /* Support for very large requests */
85398 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
85399 - (user_length > descr->max_tokens)) {
85400 + if (user_length > descr->max_tokens) {
85401 /* Allow userspace to GET more than max so
85402 * we can support any size GET requests.
85403 * There is still a limit : -ENOMEM.
85404 @@ -854,22 +853,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
85405 }
85406 }
85407
85408 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
85409 - /*
85410 - * If this is a GET, but not NOMAX, it means that the extra
85411 - * data is not bounded by userspace, but by max_tokens. Thus
85412 - * set the length to max_tokens. This matches the extra data
85413 - * allocation.
85414 - * The driver should fill it with the number of tokens it
85415 - * provided, and it may check iwp->length rather than having
85416 - * knowledge of max_tokens. If the driver doesn't change the
85417 - * iwp->length, this ioctl just copies back max_token tokens
85418 - * filled with zeroes. Hopefully the driver isn't claiming
85419 - * them to be valid data.
85420 - */
85421 - iwp->length = descr->max_tokens;
85422 - }
85423 -
85424 err = handler(dev, info, (union iwreq_data *) iwp, extra);
85425
85426 iwp->length += essid_compat;
85427 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
85428 index cb81ca3..e15d49a 100644
85429 --- a/net/xfrm/xfrm_policy.c
85430 +++ b/net/xfrm/xfrm_policy.c
85431 @@ -586,7 +586,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
85432 hlist_add_head(&policy->bydst, chain);
85433 xfrm_pol_hold(policy);
85434 net->xfrm.policy_count[dir]++;
85435 - atomic_inc(&flow_cache_genid);
85436 + atomic_inc_unchecked(&flow_cache_genid);
85437 if (delpol)
85438 __xfrm_policy_unlink(delpol, dir);
85439 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
85440 @@ -669,7 +669,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u8 type, int dir,
85441 write_unlock_bh(&xfrm_policy_lock);
85442
85443 if (ret && delete) {
85444 - atomic_inc(&flow_cache_genid);
85445 + atomic_inc_unchecked(&flow_cache_genid);
85446 xfrm_policy_kill(ret);
85447 }
85448 return ret;
85449 @@ -710,7 +710,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u8 type, int dir, u32 id,
85450 write_unlock_bh(&xfrm_policy_lock);
85451
85452 if (ret && delete) {
85453 - atomic_inc(&flow_cache_genid);
85454 + atomic_inc_unchecked(&flow_cache_genid);
85455 xfrm_policy_kill(ret);
85456 }
85457 return ret;
85458 @@ -824,7 +824,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
85459 }
85460
85461 }
85462 - atomic_inc(&flow_cache_genid);
85463 + atomic_inc_unchecked(&flow_cache_genid);
85464 out:
85465 write_unlock_bh(&xfrm_policy_lock);
85466 return err;
85467 @@ -1088,7 +1088,7 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
85468 write_unlock_bh(&xfrm_policy_lock);
85469 if (pol) {
85470 if (dir < XFRM_POLICY_MAX)
85471 - atomic_inc(&flow_cache_genid);
85472 + atomic_inc_unchecked(&flow_cache_genid);
85473 xfrm_policy_kill(pol);
85474 return 0;
85475 }
85476 @@ -1477,7 +1477,7 @@ free_dst:
85477 goto out;
85478 }
85479
85480 -static int inline
85481 +static inline int
85482 xfrm_dst_alloc_copy(void **target, void *src, int size)
85483 {
85484 if (!*target) {
85485 @@ -1489,7 +1489,7 @@ xfrm_dst_alloc_copy(void **target, void *src, int size)
85486 return 0;
85487 }
85488
85489 -static int inline
85490 +static inline int
85491 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
85492 {
85493 #ifdef CONFIG_XFRM_SUB_POLICY
85494 @@ -1501,7 +1501,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
85495 #endif
85496 }
85497
85498 -static int inline
85499 +static inline int
85500 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
85501 {
85502 #ifdef CONFIG_XFRM_SUB_POLICY
85503 @@ -1537,7 +1537,7 @@ int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
85504 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
85505
85506 restart:
85507 - genid = atomic_read(&flow_cache_genid);
85508 + genid = atomic_read_unchecked(&flow_cache_genid);
85509 policy = NULL;
85510 for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
85511 pols[pi] = NULL;
85512 @@ -1680,7 +1680,7 @@ restart:
85513 goto error;
85514 }
85515 if (nx == -EAGAIN ||
85516 - genid != atomic_read(&flow_cache_genid)) {
85517 + genid != atomic_read_unchecked(&flow_cache_genid)) {
85518 xfrm_pols_put(pols, npols);
85519 goto restart;
85520 }
85521 diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
85522 index b95a2d6..85c4d78 100644
85523 --- a/net/xfrm/xfrm_user.c
85524 +++ b/net/xfrm/xfrm_user.c
85525 @@ -1169,6 +1169,8 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
85526 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
85527 int i;
85528
85529 + pax_track_stack();
85530 +
85531 if (xp->xfrm_nr == 0)
85532 return 0;
85533
85534 @@ -1784,6 +1786,8 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
85535 int err;
85536 int n = 0;
85537
85538 + pax_track_stack();
85539 +
85540 if (attrs[XFRMA_MIGRATE] == NULL)
85541 return -EINVAL;
85542
85543 diff --git a/samples/kobject/kset-example.c b/samples/kobject/kset-example.c
85544 index 45b7d56..19e828c 100644
85545 --- a/samples/kobject/kset-example.c
85546 +++ b/samples/kobject/kset-example.c
85547 @@ -87,7 +87,7 @@ static ssize_t foo_attr_store(struct kobject *kobj,
85548 }
85549
85550 /* Our custom sysfs_ops that we will associate with our ktype later on */
85551 -static struct sysfs_ops foo_sysfs_ops = {
85552 +static const struct sysfs_ops foo_sysfs_ops = {
85553 .show = foo_attr_show,
85554 .store = foo_attr_store,
85555 };
85556 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
85557 index 341b589..405aed3 100644
85558 --- a/scripts/Makefile.build
85559 +++ b/scripts/Makefile.build
85560 @@ -59,7 +59,7 @@ endif
85561 endif
85562
85563 # Do not include host rules unless needed
85564 -ifneq ($(hostprogs-y)$(hostprogs-m),)
85565 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
85566 include scripts/Makefile.host
85567 endif
85568
85569 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
85570 index 6f89fbb..53adc9c 100644
85571 --- a/scripts/Makefile.clean
85572 +++ b/scripts/Makefile.clean
85573 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
85574 __clean-files := $(extra-y) $(always) \
85575 $(targets) $(clean-files) \
85576 $(host-progs) \
85577 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
85578 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
85579 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
85580
85581 # as clean-files is given relative to the current directory, this adds
85582 # a $(obj) prefix, except for absolute paths
85583 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
85584 index 1ac414f..a1c1451 100644
85585 --- a/scripts/Makefile.host
85586 +++ b/scripts/Makefile.host
85587 @@ -31,6 +31,7 @@
85588 # Note: Shared libraries consisting of C++ files are not supported
85589
85590 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
85591 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
85592
85593 # C code
85594 # Executables compiled from a single .c file
85595 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
85596 # Shared libaries (only .c supported)
85597 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
85598 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
85599 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
85600 # Remove .so files from "xxx-objs"
85601 host-cobjs := $(filter-out %.so,$(host-cobjs))
85602
85603 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
85604 index 6bf21f8..c0546b3 100644
85605 --- a/scripts/basic/fixdep.c
85606 +++ b/scripts/basic/fixdep.c
85607 @@ -162,7 +162,7 @@ static void grow_config(int len)
85608 /*
85609 * Lookup a value in the configuration string.
85610 */
85611 -static int is_defined_config(const char * name, int len)
85612 +static int is_defined_config(const char * name, unsigned int len)
85613 {
85614 const char * pconfig;
85615 const char * plast = str_config + len_config - len;
85616 @@ -199,7 +199,7 @@ static void clear_config(void)
85617 /*
85618 * Record the use of a CONFIG_* word.
85619 */
85620 -static void use_config(char *m, int slen)
85621 +static void use_config(char *m, unsigned int slen)
85622 {
85623 char s[PATH_MAX];
85624 char *p;
85625 @@ -222,9 +222,9 @@ static void use_config(char *m, int slen)
85626
85627 static void parse_config_file(char *map, size_t len)
85628 {
85629 - int *end = (int *) (map + len);
85630 + unsigned int *end = (unsigned int *) (map + len);
85631 /* start at +1, so that p can never be < map */
85632 - int *m = (int *) map + 1;
85633 + unsigned int *m = (unsigned int *) map + 1;
85634 char *p, *q;
85635
85636 for (; m < end; m++) {
85637 @@ -371,7 +371,7 @@ static void print_deps(void)
85638 static void traps(void)
85639 {
85640 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
85641 - int *p = (int *)test;
85642 + unsigned int *p = (unsigned int *)test;
85643
85644 if (*p != INT_CONF) {
85645 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
85646 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
85647 new file mode 100644
85648 index 0000000..8729101
85649 --- /dev/null
85650 +++ b/scripts/gcc-plugin.sh
85651 @@ -0,0 +1,2 @@
85652 +#!/bin/sh
85653 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
85654 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
85655 index 62a9025..65b82ad 100644
85656 --- a/scripts/mod/file2alias.c
85657 +++ b/scripts/mod/file2alias.c
85658 @@ -72,7 +72,7 @@ static void device_id_check(const char *modname, const char *device_id,
85659 unsigned long size, unsigned long id_size,
85660 void *symval)
85661 {
85662 - int i;
85663 + unsigned int i;
85664
85665 if (size % id_size || size < id_size) {
85666 if (cross_build != 0)
85667 @@ -102,7 +102,7 @@ static void device_id_check(const char *modname, const char *device_id,
85668 /* USB is special because the bcdDevice can be matched against a numeric range */
85669 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
85670 static void do_usb_entry(struct usb_device_id *id,
85671 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
85672 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
85673 unsigned char range_lo, unsigned char range_hi,
85674 struct module *mod)
85675 {
85676 @@ -151,7 +151,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
85677 {
85678 unsigned int devlo, devhi;
85679 unsigned char chi, clo;
85680 - int ndigits;
85681 + unsigned int ndigits;
85682
85683 id->match_flags = TO_NATIVE(id->match_flags);
85684 id->idVendor = TO_NATIVE(id->idVendor);
85685 @@ -368,7 +368,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
85686 for (i = 0; i < count; i++) {
85687 const char *id = (char *)devs[i].id;
85688 char acpi_id[sizeof(devs[0].id)];
85689 - int j;
85690 + unsigned int j;
85691
85692 buf_printf(&mod->dev_table_buf,
85693 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
85694 @@ -398,7 +398,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
85695
85696 for (j = 0; j < PNP_MAX_DEVICES; j++) {
85697 const char *id = (char *)card->devs[j].id;
85698 - int i2, j2;
85699 + unsigned int i2, j2;
85700 int dup = 0;
85701
85702 if (!id[0])
85703 @@ -424,7 +424,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
85704 /* add an individual alias for every device entry */
85705 if (!dup) {
85706 char acpi_id[sizeof(card->devs[0].id)];
85707 - int k;
85708 + unsigned int k;
85709
85710 buf_printf(&mod->dev_table_buf,
85711 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
85712 @@ -699,7 +699,7 @@ static void dmi_ascii_filter(char *d, const char *s)
85713 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
85714 char *alias)
85715 {
85716 - int i, j;
85717 + unsigned int i, j;
85718
85719 sprintf(alias, "dmi*");
85720
85721 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
85722 index 03efeab..0888989 100644
85723 --- a/scripts/mod/modpost.c
85724 +++ b/scripts/mod/modpost.c
85725 @@ -835,6 +835,7 @@ enum mismatch {
85726 INIT_TO_EXIT,
85727 EXIT_TO_INIT,
85728 EXPORT_TO_INIT_EXIT,
85729 + DATA_TO_TEXT
85730 };
85731
85732 struct sectioncheck {
85733 @@ -920,6 +921,12 @@ const struct sectioncheck sectioncheck[] = {
85734 .fromsec = { "__ksymtab*", NULL },
85735 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
85736 .mismatch = EXPORT_TO_INIT_EXIT
85737 +},
85738 +/* Do not reference code from writable data */
85739 +{
85740 + .fromsec = { DATA_SECTIONS, NULL },
85741 + .tosec = { TEXT_SECTIONS, NULL },
85742 + .mismatch = DATA_TO_TEXT
85743 }
85744 };
85745
85746 @@ -1024,10 +1031,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
85747 continue;
85748 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
85749 continue;
85750 - if (sym->st_value == addr)
85751 - return sym;
85752 /* Find a symbol nearby - addr are maybe negative */
85753 d = sym->st_value - addr;
85754 + if (d == 0)
85755 + return sym;
85756 if (d < 0)
85757 d = addr - sym->st_value;
85758 if (d < distance) {
85759 @@ -1268,6 +1275,14 @@ static void report_sec_mismatch(const char *modname, enum mismatch mismatch,
85760 "Fix this by removing the %sannotation of %s "
85761 "or drop the export.\n",
85762 tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
85763 + case DATA_TO_TEXT:
85764 +/*
85765 + fprintf(stderr,
85766 + "The variable %s references\n"
85767 + "the %s %s%s%s\n",
85768 + fromsym, to, sec2annotation(tosec), tosym, to_p);
85769 +*/
85770 + break;
85771 case NO_MISMATCH:
85772 /* To get warnings on missing members */
85773 break;
85774 @@ -1495,7 +1510,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
85775 static void check_sec_ref(struct module *mod, const char *modname,
85776 struct elf_info *elf)
85777 {
85778 - int i;
85779 + unsigned int i;
85780 Elf_Shdr *sechdrs = elf->sechdrs;
85781
85782 /* Walk through all sections */
85783 @@ -1651,7 +1666,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
85784 va_end(ap);
85785 }
85786
85787 -void buf_write(struct buffer *buf, const char *s, int len)
85788 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
85789 {
85790 if (buf->size - buf->pos < len) {
85791 buf->size += len + SZ;
85792 @@ -1863,7 +1878,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
85793 if (fstat(fileno(file), &st) < 0)
85794 goto close_write;
85795
85796 - if (st.st_size != b->pos)
85797 + if (st.st_size != (off_t)b->pos)
85798 goto close_write;
85799
85800 tmp = NOFAIL(malloc(b->pos));
85801 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
85802 index 09f58e3..4b66092 100644
85803 --- a/scripts/mod/modpost.h
85804 +++ b/scripts/mod/modpost.h
85805 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
85806
85807 struct buffer {
85808 char *p;
85809 - int pos;
85810 - int size;
85811 + unsigned int pos;
85812 + unsigned int size;
85813 };
85814
85815 void __attribute__((format(printf, 2, 3)))
85816 buf_printf(struct buffer *buf, const char *fmt, ...);
85817
85818 void
85819 -buf_write(struct buffer *buf, const char *s, int len);
85820 +buf_write(struct buffer *buf, const char *s, unsigned int len);
85821
85822 struct module {
85823 struct module *next;
85824 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
85825 index ecf9c7d..d52b38e 100644
85826 --- a/scripts/mod/sumversion.c
85827 +++ b/scripts/mod/sumversion.c
85828 @@ -455,7 +455,7 @@ static void write_version(const char *filename, const char *sum,
85829 goto out;
85830 }
85831
85832 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
85833 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
85834 warn("writing sum in %s failed: %s\n",
85835 filename, strerror(errno));
85836 goto out;
85837 diff --git a/scripts/package/mkspec b/scripts/package/mkspec
85838 index 47bdd2f..d4d4e93 100755
85839 --- a/scripts/package/mkspec
85840 +++ b/scripts/package/mkspec
85841 @@ -70,7 +70,7 @@ echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM_BUILD_ROOT/lib/modules'
85842 echo 'mkdir -p $RPM_BUILD_ROOT/lib/firmware'
85843 echo "%endif"
85844
85845 -echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{_smp_mflags} KBUILD_SRC= modules_install'
85846 +echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{?_smp_mflags} KBUILD_SRC= modules_install'
85847 echo "%ifarch ia64"
85848 echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/efi/vmlinuz-$KERNELRELEASE"
85849 echo 'ln -s '"efi/vmlinuz-$KERNELRELEASE" '$RPM_BUILD_ROOT'"/boot/"
85850 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
85851 index 5c11312..72742b5 100644
85852 --- a/scripts/pnmtologo.c
85853 +++ b/scripts/pnmtologo.c
85854 @@ -237,14 +237,14 @@ static void write_header(void)
85855 fprintf(out, " * Linux logo %s\n", logoname);
85856 fputs(" */\n\n", out);
85857 fputs("#include <linux/linux_logo.h>\n\n", out);
85858 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
85859 + fprintf(out, "static unsigned char %s_data[] = {\n",
85860 logoname);
85861 }
85862
85863 static void write_footer(void)
85864 {
85865 fputs("\n};\n\n", out);
85866 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
85867 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
85868 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
85869 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
85870 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
85871 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
85872 fputs("\n};\n\n", out);
85873
85874 /* write logo clut */
85875 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
85876 + fprintf(out, "static unsigned char %s_clut[] = {\n",
85877 logoname);
85878 write_hex_cnt = 0;
85879 for (i = 0; i < logo_clutsize; i++) {
85880 diff --git a/scripts/tags.sh b/scripts/tags.sh
85881 index d52f7a0..269eb1b 100755
85882 --- a/scripts/tags.sh
85883 +++ b/scripts/tags.sh
85884 @@ -93,6 +93,11 @@ docscope()
85885 cscope -b -f cscope.out
85886 }
85887
85888 +dogtags()
85889 +{
85890 + all_sources | gtags -f -
85891 +}
85892 +
85893 exuberant()
85894 {
85895 all_sources | xargs $1 -a \
85896 @@ -164,6 +169,10 @@ case "$1" in
85897 docscope
85898 ;;
85899
85900 + "gtags")
85901 + dogtags
85902 + ;;
85903 +
85904 "tags")
85905 rm -f tags
85906 xtags ctags
85907 diff --git a/security/Kconfig b/security/Kconfig
85908 index fb363cd..886ace4 100644
85909 --- a/security/Kconfig
85910 +++ b/security/Kconfig
85911 @@ -4,6 +4,626 @@
85912
85913 menu "Security options"
85914
85915 +source grsecurity/Kconfig
85916 +
85917 +menu "PaX"
85918 +
85919 + config ARCH_TRACK_EXEC_LIMIT
85920 + bool
85921 +
85922 + config PAX_KERNEXEC_PLUGIN
85923 + bool
85924 +
85925 + config PAX_PER_CPU_PGD
85926 + bool
85927 +
85928 + config TASK_SIZE_MAX_SHIFT
85929 + int
85930 + depends on X86_64
85931 + default 47 if !PAX_PER_CPU_PGD
85932 + default 42 if PAX_PER_CPU_PGD
85933 +
85934 + config PAX_ENABLE_PAE
85935 + bool
85936 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
85937 +
85938 +config PAX
85939 + bool "Enable various PaX features"
85940 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
85941 + help
85942 + This allows you to enable various PaX features. PaX adds
85943 + intrusion prevention mechanisms to the kernel that reduce
85944 + the risks posed by exploitable memory corruption bugs.
85945 +
85946 +menu "PaX Control"
85947 + depends on PAX
85948 +
85949 +config PAX_SOFTMODE
85950 + bool 'Support soft mode'
85951 + help
85952 + Enabling this option will allow you to run PaX in soft mode, that
85953 + is, PaX features will not be enforced by default, only on executables
85954 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
85955 + support as they are the only way to mark executables for soft mode use.
85956 +
85957 + Soft mode can be activated by using the "pax_softmode=1" kernel command
85958 + line option on boot. Furthermore you can control various PaX features
85959 + at runtime via the entries in /proc/sys/kernel/pax.
85960 +
85961 +config PAX_EI_PAX
85962 + bool 'Use legacy ELF header marking'
85963 + help
85964 + Enabling this option will allow you to control PaX features on
85965 + a per executable basis via the 'chpax' utility available at
85966 + http://pax.grsecurity.net/. The control flags will be read from
85967 + an otherwise reserved part of the ELF header. This marking has
85968 + numerous drawbacks (no support for soft-mode, toolchain does not
85969 + know about the non-standard use of the ELF header) therefore it
85970 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
85971 + support.
85972 +
85973 + If you have applications not marked by the PT_PAX_FLAGS ELF program
85974 + header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
85975 + option otherwise they will not get any protection.
85976 +
85977 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
85978 + support as well, they will override the legacy EI_PAX marks.
85979 +
85980 +config PAX_PT_PAX_FLAGS
85981 + bool 'Use ELF program header marking'
85982 + help
85983 + Enabling this option will allow you to control PaX features on
85984 + a per executable basis via the 'paxctl' utility available at
85985 + http://pax.grsecurity.net/. The control flags will be read from
85986 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
85987 + has the benefits of supporting both soft mode and being fully
85988 + integrated into the toolchain (the binutils patch is available
85989 + from http://pax.grsecurity.net).
85990 +
85991 + If you have applications not marked by the PT_PAX_FLAGS ELF program
85992 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
85993 + support otherwise they will not get any protection.
85994 +
85995 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
85996 + must make sure that the marks are the same if a binary has both marks.
85997 +
85998 + Note that if you enable the legacy EI_PAX marking support as well,
85999 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
86000 +
86001 +config PAX_XATTR_PAX_FLAGS
86002 + bool 'Use filesystem extended attributes marking'
86003 + depends on EXPERT
86004 + select CIFS_XATTR if CIFS
86005 + select EXT2_FS_XATTR if EXT2_FS
86006 + select EXT3_FS_XATTR if EXT3_FS
86007 + select EXT4_FS_XATTR if EXT4_FS
86008 + select JFFS2_FS_XATTR if JFFS2_FS
86009 + select REISERFS_FS_XATTR if REISERFS_FS
86010 + select UBIFS_FS_XATTR if UBIFS_FS
86011 + help
86012 + Enabling this option will allow you to control PaX features on
86013 + a per executable basis via the 'setfattr' utility. The control
86014 + flags will be read from the user.pax.flags extended attribute of
86015 + the file. This marking has the benefit of supporting binary-only
86016 + applications that self-check themselves (e.g., skype) and would
86017 + not tolerate chpax/paxctl changes. The main drawback is that
86018 + extended attributes are not supported by some filesystems (e.g.,
86019 + isofs, squashfs, tmpfs, udf, vfat) so copying files through such
86020 + filesystems will lose the extended attributes and these PaX markings.
86021 +
86022 + If you have applications not marked by the PT_PAX_FLAGS ELF program
86023 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
86024 + support otherwise they will not get any protection.
86025 +
86026 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
86027 + must make sure that the marks are the same if a binary has both marks.
86028 +
86029 + Note that if you enable the legacy EI_PAX marking support as well,
86030 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
86031 +
86032 +choice
86033 + prompt 'MAC system integration'
86034 + default PAX_HAVE_ACL_FLAGS
86035 + help
86036 + Mandatory Access Control systems have the option of controlling
86037 + PaX flags on a per executable basis, choose the method supported
86038 + by your particular system.
86039 +
86040 + - "none": if your MAC system does not interact with PaX,
86041 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
86042 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
86043 +
86044 + NOTE: this option is for developers/integrators only.
86045 +
86046 + config PAX_NO_ACL_FLAGS
86047 + bool 'none'
86048 +
86049 + config PAX_HAVE_ACL_FLAGS
86050 + bool 'direct'
86051 +
86052 + config PAX_HOOK_ACL_FLAGS
86053 + bool 'hook'
86054 +endchoice
86055 +
86056 +endmenu
86057 +
86058 +menu "Non-executable pages"
86059 + depends on PAX
86060 +
86061 +config PAX_NOEXEC
86062 + bool "Enforce non-executable pages"
86063 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
86064 + help
86065 + By design some architectures do not allow for protecting memory
86066 + pages against execution or even if they do, Linux does not make
86067 + use of this feature. In practice this means that if a page is
86068 + readable (such as the stack or heap) it is also executable.
86069 +
86070 + There is a well known exploit technique that makes use of this
86071 + fact and a common programming mistake where an attacker can
86072 + introduce code of his choice somewhere in the attacked program's
86073 + memory (typically the stack or the heap) and then execute it.
86074 +
86075 + If the attacked program was running with different (typically
86076 + higher) privileges than that of the attacker, then he can elevate
86077 + his own privilege level (e.g. get a root shell, write to files for
86078 + which he does not have write access to, etc).
86079 +
86080 + Enabling this option will let you choose from various features
86081 + that prevent the injection and execution of 'foreign' code in
86082 + a program.
86083 +
86084 + This will also break programs that rely on the old behaviour and
86085 + expect that dynamically allocated memory via the malloc() family
86086 + of functions is executable (which it is not). Notable examples
86087 + are the XFree86 4.x server, the java runtime and wine.
86088 +
86089 +config PAX_PAGEEXEC
86090 + bool "Paging based non-executable pages"
86091 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
86092 + select S390_SWITCH_AMODE if S390
86093 + select S390_EXEC_PROTECT if S390
86094 + select ARCH_TRACK_EXEC_LIMIT if X86_32
86095 + help
86096 + This implementation is based on the paging feature of the CPU.
86097 + On i386 without hardware non-executable bit support there is a
86098 + variable but usually low performance impact, however on Intel's
86099 + P4 core based CPUs it is very high so you should not enable this
86100 + for kernels meant to be used on such CPUs.
86101 +
86102 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
86103 + with hardware non-executable bit support there is no performance
86104 + impact, on ppc the impact is negligible.
86105 +
86106 + Note that several architectures require various emulations due to
86107 + badly designed userland ABIs, this will cause a performance impact
86108 + but will disappear as soon as userland is fixed. For example, ppc
86109 + userland MUST have been built with secure-plt by a recent toolchain.
86110 +
86111 +config PAX_SEGMEXEC
86112 + bool "Segmentation based non-executable pages"
86113 + depends on PAX_NOEXEC && X86_32
86114 + help
86115 + This implementation is based on the segmentation feature of the
86116 + CPU and has a very small performance impact, however applications
86117 + will be limited to a 1.5 GB address space instead of the normal
86118 + 3 GB.
86119 +
86120 +config PAX_EMUTRAMP
86121 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
86122 + default y if PARISC
86123 + help
86124 + There are some programs and libraries that for one reason or
86125 + another attempt to execute special small code snippets from
86126 + non-executable memory pages. Most notable examples are the
86127 + signal handler return code generated by the kernel itself and
86128 + the GCC trampolines.
86129 +
86130 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
86131 + such programs will no longer work under your kernel.
86132 +
86133 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
86134 + utilities to enable trampoline emulation for the affected programs
86135 + yet still have the protection provided by the non-executable pages.
86136 +
86137 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
86138 + your system will not even boot.
86139 +
86140 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
86141 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
86142 + for the affected files.
86143 +
86144 + NOTE: enabling this feature *may* open up a loophole in the
86145 + protection provided by non-executable pages that an attacker
86146 + could abuse. Therefore the best solution is to not have any
86147 + files on your system that would require this option. This can
86148 + be achieved by not using libc5 (which relies on the kernel
86149 + signal handler return code) and not using or rewriting programs
86150 + that make use of the nested function implementation of GCC.
86151 + Skilled users can just fix GCC itself so that it implements
86152 + nested function calls in a way that does not interfere with PaX.
86153 +
86154 +config PAX_EMUSIGRT
86155 + bool "Automatically emulate sigreturn trampolines"
86156 + depends on PAX_EMUTRAMP && PARISC
86157 + default y
86158 + help
86159 + Enabling this option will have the kernel automatically detect
86160 + and emulate signal return trampolines executing on the stack
86161 + that would otherwise lead to task termination.
86162 +
86163 + This solution is intended as a temporary one for users with
86164 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
86165 + Modula-3 runtime, etc) or executables linked to such, basically
86166 + everything that does not specify its own SA_RESTORER function in
86167 + normal executable memory like glibc 2.1+ does.
86168 +
86169 + On parisc you MUST enable this option, otherwise your system will
86170 + not even boot.
86171 +
86172 + NOTE: this feature cannot be disabled on a per executable basis
86173 + and since it *does* open up a loophole in the protection provided
86174 + by non-executable pages, the best solution is to not have any
86175 + files on your system that would require this option.
86176 +
86177 +config PAX_MPROTECT
86178 + bool "Restrict mprotect()"
86179 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
86180 + help
86181 + Enabling this option will prevent programs from
86182 + - changing the executable status of memory pages that were
86183 + not originally created as executable,
86184 + - making read-only executable pages writable again,
86185 + - creating executable pages from anonymous memory,
86186 + - making read-only-after-relocations (RELRO) data pages writable again.
86187 +
86188 + You should say Y here to complete the protection provided by
86189 + the enforcement of non-executable pages.
86190 +
86191 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
86192 + this feature on a per file basis.
86193 +
86194 +config PAX_MPROTECT_COMPAT
86195 + bool "Use legacy/compat protection demoting (read help)"
86196 + depends on PAX_MPROTECT
86197 + default n
86198 + help
86199 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
86200 + by sending the proper error code to the application. For some broken
86201 + userland, this can cause problems with Python or other applications. The
86202 + current implementation however allows for applications like clamav to
86203 + detect if JIT compilation/execution is allowed and to fall back gracefully
86204 + to an interpreter-based mode if it does not. While we encourage everyone
86205 + to use the current implementation as-is and push upstream to fix broken
86206 + userland (note that the RWX logging option can assist with this), in some
86207 + environments this may not be possible. Having to disable MPROTECT
86208 + completely on certain binaries reduces the security benefit of PaX,
86209 + so this option is provided for those environments to revert to the old
86210 + behavior.
86211 +
86212 +config PAX_ELFRELOCS
86213 + bool "Allow ELF text relocations (read help)"
86214 + depends on PAX_MPROTECT
86215 + default n
86216 + help
86217 + Non-executable pages and mprotect() restrictions are effective
86218 + in preventing the introduction of new executable code into an
86219 + attacked task's address space. There remain only two venues
86220 + for this kind of attack: if the attacker can execute already
86221 + existing code in the attacked task then he can either have it
86222 + create and mmap() a file containing his code or have it mmap()
86223 + an already existing ELF library that does not have position
86224 + independent code in it and use mprotect() on it to make it
86225 + writable and copy his code there. While protecting against
86226 + the former approach is beyond PaX, the latter can be prevented
86227 + by having only PIC ELF libraries on one's system (which do not
86228 + need to relocate their code). If you are sure this is your case,
86229 + as is the case with all modern Linux distributions, then leave
86230 + this option disabled. You should say 'n' here.
86231 +
86232 +config PAX_ETEXECRELOCS
86233 + bool "Allow ELF ET_EXEC text relocations"
86234 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
86235 + select PAX_ELFRELOCS
86236 + default y
86237 + help
86238 + On some architectures there are incorrectly created applications
86239 + that require text relocations and would not work without enabling
86240 + this option. If you are an alpha, ia64 or parisc user, you should
86241 + enable this option and disable it once you have made sure that
86242 + none of your applications need it.
86243 +
86244 +config PAX_EMUPLT
86245 + bool "Automatically emulate ELF PLT"
86246 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
86247 + default y
86248 + help
86249 + Enabling this option will have the kernel automatically detect
86250 + and emulate the Procedure Linkage Table entries in ELF files.
86251 + On some architectures such entries are in writable memory, and
86252 + become non-executable leading to task termination. Therefore
86253 + it is mandatory that you enable this option on alpha, parisc,
86254 + sparc and sparc64, otherwise your system would not even boot.
86255 +
86256 + NOTE: this feature *does* open up a loophole in the protection
86257 + provided by the non-executable pages, therefore the proper
86258 + solution is to modify the toolchain to produce a PLT that does
86259 + not need to be writable.
86260 +
86261 +config PAX_DLRESOLVE
86262 + bool 'Emulate old glibc resolver stub'
86263 + depends on PAX_EMUPLT && SPARC
86264 + default n
86265 + help
86266 + This option is needed if userland has an old glibc (before 2.4)
86267 + that puts a 'save' instruction into the runtime generated resolver
86268 + stub that needs special emulation.
86269 +
86270 +config PAX_KERNEXEC
86271 + bool "Enforce non-executable kernel pages"
86272 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
86273 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
86274 + select PAX_KERNEXEC_PLUGIN if X86_64
86275 + help
86276 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
86277 + that is, enabling this option will make it harder to inject
86278 + and execute 'foreign' code in kernel memory itself.
86279 +
86280 + Note that on x86_64 kernels there is a known regression when
86281 + this feature and KVM/VMX are both enabled in the host kernel.
86282 +
86283 +choice
86284 + prompt "Return Address Instrumentation Method"
86285 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
86286 + depends on PAX_KERNEXEC_PLUGIN
86287 + help
86288 + Select the method used to instrument function pointer dereferences.
86289 + Note that binary modules cannot be instrumented by this approach.
86290 +
86291 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
86292 + bool "bts"
86293 + help
86294 + This method is compatible with binary only modules but has
86295 + a higher runtime overhead.
86296 +
86297 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
86298 + bool "or"
86299 + depends on !PARAVIRT
86300 + help
86301 + This method is incompatible with binary only modules but has
86302 + a lower runtime overhead.
86303 +endchoice
86304 +
86305 +config PAX_KERNEXEC_PLUGIN_METHOD
86306 + string
86307 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
86308 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
86309 + default ""
86310 +
86311 +config PAX_KERNEXEC_MODULE_TEXT
86312 + int "Minimum amount of memory reserved for module code"
86313 + default "4"
86314 + depends on PAX_KERNEXEC && X86_32 && MODULES
86315 + help
86316 + Due to implementation details the kernel must reserve a fixed
86317 + amount of memory for module code at compile time that cannot be
86318 + changed at runtime. Here you can specify the minimum amount
86319 + in MB that will be reserved. Due to the same implementation
86320 + details this size will always be rounded up to the next 2/4 MB
86321 + boundary (depends on PAE) so the actually available memory for
86322 + module code will usually be more than this minimum.
86323 +
86324 + The default 4 MB should be enough for most users but if you have
86325 + an excessive number of modules (e.g., most distribution configs
86326 + compile many drivers as modules) or use huge modules such as
86327 + nvidia's kernel driver, you will need to adjust this amount.
86328 + A good rule of thumb is to look at your currently loaded kernel
86329 + modules and add up their sizes.
86330 +
86331 +endmenu
86332 +
86333 +menu "Address Space Layout Randomization"
86334 + depends on PAX
86335 +
86336 +config PAX_ASLR
86337 + bool "Address Space Layout Randomization"
86338 + help
86339 + Many if not most exploit techniques rely on the knowledge of
86340 + certain addresses in the attacked program. The following options
86341 + will allow the kernel to apply a certain amount of randomization
86342 + to specific parts of the program thereby forcing an attacker to
86343 + guess them in most cases. Any failed guess will most likely crash
86344 + the attacked program which allows the kernel to detect such attempts
86345 + and react on them. PaX itself provides no reaction mechanisms,
86346 + instead it is strongly encouraged that you make use of Nergal's
86347 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
86348 + (http://www.grsecurity.net/) built-in crash detection features or
86349 + develop one yourself.
86350 +
86351 + By saying Y here you can choose to randomize the following areas:
86352 + - top of the task's kernel stack
86353 + - top of the task's userland stack
86354 + - base address for mmap() requests that do not specify one
86355 + (this includes all libraries)
86356 + - base address of the main executable
86357 +
86358 + It is strongly recommended to say Y here as address space layout
86359 + randomization has negligible impact on performance yet it provides
86360 + a very effective protection.
86361 +
86362 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
86363 + this feature on a per file basis.
86364 +
86365 +config PAX_RANDKSTACK
86366 + bool "Randomize kernel stack base"
86367 + depends on X86_TSC && X86
86368 + help
86369 + By saying Y here the kernel will randomize every task's kernel
86370 + stack on every system call. This will not only force an attacker
86371 + to guess it but also prevent him from making use of possible
86372 + leaked information about it.
86373 +
86374 + Since the kernel stack is a rather scarce resource, randomization
86375 + may cause unexpected stack overflows, therefore you should very
86376 + carefully test your system. Note that once enabled in the kernel
86377 + configuration, this feature cannot be disabled on a per file basis.
86378 +
86379 +config PAX_RANDUSTACK
86380 + bool "Randomize user stack base"
86381 + depends on PAX_ASLR
86382 + help
86383 + By saying Y here the kernel will randomize every task's userland
86384 + stack. The randomization is done in two steps where the second
86385 + one may apply a big amount of shift to the top of the stack and
86386 + cause problems for programs that want to use lots of memory (more
86387 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
86388 + For this reason the second step can be controlled by 'chpax' or
86389 + 'paxctl' on a per file basis.
86390 +
86391 +config PAX_RANDMMAP
86392 + bool "Randomize mmap() base"
86393 + depends on PAX_ASLR
86394 + help
86395 + By saying Y here the kernel will use a randomized base address for
86396 + mmap() requests that do not specify one themselves. As a result
86397 + all dynamically loaded libraries will appear at random addresses
86398 + and therefore be harder to exploit by a technique where an attacker
86399 + attempts to execute library code for his purposes (e.g. spawn a
86400 + shell from an exploited program that is running at an elevated
86401 + privilege level).
86402 +
86403 + Furthermore, if a program is relinked as a dynamic ELF file, its
86404 + base address will be randomized as well, completing the full
86405 + randomization of the address space layout. Attacking such programs
86406 + becomes a guess game. You can find an example of doing this at
86407 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
86408 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
86409 +
86410 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
86411 + feature on a per file basis.
86412 +
86413 +endmenu
86414 +
86415 +menu "Miscellaneous hardening features"
86416 +
86417 +config PAX_MEMORY_SANITIZE
86418 + bool "Sanitize all freed memory"
86419 + depends on !HIBERNATION
86420 + help
86421 + By saying Y here the kernel will erase memory pages as soon as they
86422 + are freed. This in turn reduces the lifetime of data stored in the
86423 + pages, making it less likely that sensitive information such as
86424 + passwords, cryptographic secrets, etc stay in memory for too long.
86425 +
86426 + This is especially useful for programs whose runtime is short, long
86427 + lived processes and the kernel itself benefit from this as long as
86428 + they operate on whole memory pages and ensure timely freeing of pages
86429 + that may hold sensitive information.
86430 +
86431 + The tradeoff is performance impact, on a single CPU system kernel
86432 + compilation sees a 3% slowdown, other systems and workloads may vary
86433 + and you are advised to test this feature on your expected workload
86434 + before deploying it.
86435 +
86436 + Note that this feature does not protect data stored in live pages,
86437 + e.g., process memory swapped to disk may stay there for a long time.
86438 +
86439 +config PAX_MEMORY_STACKLEAK
86440 + bool "Sanitize kernel stack"
86441 + depends on X86
86442 + help
86443 + By saying Y here the kernel will erase the kernel stack before it
86444 + returns from a system call. This in turn reduces the information
86445 + that a kernel stack leak bug can reveal.
86446 +
86447 + Note that such a bug can still leak information that was put on
86448 + the stack by the current system call (the one eventually triggering
86449 + the bug) but traces of earlier system calls on the kernel stack
86450 + cannot leak anymore.
86451 +
86452 + The tradeoff is performance impact, on a single CPU system kernel
86453 + compilation sees a 1% slowdown, other systems and workloads may vary
86454 + and you are advised to test this feature on your expected workload
86455 + before deploying it.
86456 +
86457 + Note: full support for this feature requires gcc with plugin support
86458 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
86459 + versions means that functions with large enough stack frames may
86460 + leave uninitialized memory behind that may be exposed to a later
86461 + syscall leaking the stack.
86462 +
86463 +config PAX_MEMORY_UDEREF
86464 + bool "Prevent invalid userland pointer dereference"
86465 + depends on X86 && !UML_X86 && !XEN
86466 + select PAX_PER_CPU_PGD if X86_64
86467 + help
86468 + By saying Y here the kernel will be prevented from dereferencing
86469 + userland pointers in contexts where the kernel expects only kernel
86470 + pointers. This is both a useful runtime debugging feature and a
86471 + security measure that prevents exploiting a class of kernel bugs.
86472 +
86473 + The tradeoff is that some virtualization solutions may experience
86474 + a huge slowdown and therefore you should not enable this feature
86475 + for kernels meant to run in such environments. Whether a given VM
86476 + solution is affected or not is best determined by simply trying it
86477 + out, the performance impact will be obvious right on boot as this
86478 + mechanism engages from very early on. A good rule of thumb is that
86479 + VMs running on CPUs without hardware virtualization support (i.e.,
86480 + the majority of IA-32 CPUs) will likely experience the slowdown.
86481 +
86482 +config PAX_REFCOUNT
86483 + bool "Prevent various kernel object reference counter overflows"
86484 + depends on GRKERNSEC && (X86 || SPARC64)
86485 + help
86486 + By saying Y here the kernel will detect and prevent overflowing
86487 + various (but not all) kinds of object reference counters. Such
86488 + overflows can normally occur due to bugs only and are often, if
86489 + not always, exploitable.
86490 +
86491 + The tradeoff is that data structures protected by an overflowed
86492 + refcount will never be freed and therefore will leak memory. Note
86493 + that this leak also happens even without this protection but in
86494 + that case the overflow can eventually trigger the freeing of the
86495 + data structure while it is still being used elsewhere, resulting
86496 + in the exploitable situation that this feature prevents.
86497 +
86498 + Since this has a negligible performance impact, you should enable
86499 + this feature.
86500 +
86501 +config PAX_USERCOPY
86502 + bool "Harden heap object copies between kernel and userland"
86503 + depends on X86 || PPC || SPARC || ARM
86504 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
86505 + help
86506 + By saying Y here the kernel will enforce the size of heap objects
86507 + when they are copied in either direction between the kernel and
86508 + userland, even if only a part of the heap object is copied.
86509 +
86510 + Specifically, this checking prevents information leaking from the
86511 + kernel heap during kernel to userland copies (if the kernel heap
86512 + object is otherwise fully initialized) and prevents kernel heap
86513 + overflows during userland to kernel copies.
86514 +
86515 + Note that the current implementation provides the strictest bounds
86516 + checks for the SLUB allocator.
86517 +
86518 + Enabling this option also enables per-slab cache protection against
86519 + data in a given cache being copied into/out of via userland
86520 + accessors. Though the whitelist of regions will be reduced over
86521 + time, it notably protects important data structures like task structs.
86522 +
86523 +
86524 + If frame pointers are enabled on x86, this option will also
86525 + restrict copies into and out of the kernel stack to local variables
86526 + within a single frame.
86527 +
86528 + Since this has a negligible performance impact, you should enable
86529 + this feature.
86530 +
86531 +endmenu
86532 +
86533 +endmenu
86534 +
86535 config KEYS
86536 bool "Enable access key retention support"
86537 help
86538 @@ -146,7 +766,7 @@ config INTEL_TXT
86539 config LSM_MMAP_MIN_ADDR
86540 int "Low address space for LSM to protect from user allocation"
86541 depends on SECURITY && SECURITY_SELINUX
86542 - default 65536
86543 + default 32768
86544 help
86545 This is the portion of low virtual memory which should be protected
86546 from userspace allocation. Keeping a user from writing to low pages
86547 diff --git a/security/capability.c b/security/capability.c
86548 index fce07a7..5f12858 100644
86549 --- a/security/capability.c
86550 +++ b/security/capability.c
86551 @@ -890,7 +890,7 @@ static void cap_audit_rule_free(void *lsmrule)
86552 }
86553 #endif /* CONFIG_AUDIT */
86554
86555 -struct security_operations default_security_ops = {
86556 +struct security_operations default_security_ops __read_only = {
86557 .name = "default",
86558 };
86559
86560 diff --git a/security/commoncap.c b/security/commoncap.c
86561 index fe30751..aaba312 100644
86562 --- a/security/commoncap.c
86563 +++ b/security/commoncap.c
86564 @@ -27,6 +27,8 @@
86565 #include <linux/sched.h>
86566 #include <linux/prctl.h>
86567 #include <linux/securebits.h>
86568 +#include <linux/syslog.h>
86569 +#include <net/sock.h>
86570
86571 /*
86572 * If a non-root user executes a setuid-root binary in
86573 @@ -50,9 +52,18 @@ static void warn_setuid_and_fcaps_mixed(char *fname)
86574 }
86575 }
86576
86577 +#ifdef CONFIG_NET
86578 +extern kernel_cap_t gr_cap_rtnetlink(struct sock *sk);
86579 +#endif
86580 +
86581 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
86582 {
86583 +#ifdef CONFIG_NET
86584 + NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(sk);
86585 +#else
86586 NETLINK_CB(skb).eff_cap = current_cap();
86587 +#endif
86588 +
86589 return 0;
86590 }
86591
86592 @@ -582,6 +593,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
86593 {
86594 const struct cred *cred = current_cred();
86595
86596 + if (gr_acl_enable_at_secure())
86597 + return 1;
86598 +
86599 if (cred->uid != 0) {
86600 if (bprm->cap_effective)
86601 return 1;
86602 @@ -956,13 +970,18 @@ error:
86603 /**
86604 * cap_syslog - Determine whether syslog function is permitted
86605 * @type: Function requested
86606 + * @from_file: Whether this request came from an open file (i.e. /proc)
86607 *
86608 * Determine whether the current process is permitted to use a particular
86609 * syslog function, returning 0 if permission is granted, -ve if not.
86610 */
86611 -int cap_syslog(int type)
86612 +int cap_syslog(int type, bool from_file)
86613 {
86614 - if ((type != 3 && type != 10) && !capable(CAP_SYS_ADMIN))
86615 + /* /proc/kmsg can open be opened by CAP_SYS_ADMIN */
86616 + if (type != SYSLOG_ACTION_OPEN && from_file)
86617 + return 0;
86618 + if ((type != SYSLOG_ACTION_READ_ALL &&
86619 + type != SYSLOG_ACTION_SIZE_BUFFER) && !capable(CAP_SYS_ADMIN))
86620 return -EPERM;
86621 return 0;
86622 }
86623 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
86624 index 165eb53..b1db4eb 100644
86625 --- a/security/integrity/ima/ima.h
86626 +++ b/security/integrity/ima/ima.h
86627 @@ -84,8 +84,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
86628 extern spinlock_t ima_queue_lock;
86629
86630 struct ima_h_table {
86631 - atomic_long_t len; /* number of stored measurements in the list */
86632 - atomic_long_t violations;
86633 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
86634 + atomic_long_unchecked_t violations;
86635 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
86636 };
86637 extern struct ima_h_table ima_htable;
86638 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
86639 index 852bf85..35d6df3 100644
86640 --- a/security/integrity/ima/ima_api.c
86641 +++ b/security/integrity/ima/ima_api.c
86642 @@ -74,7 +74,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
86643 int result;
86644
86645 /* can overflow, only indicator */
86646 - atomic_long_inc(&ima_htable.violations);
86647 + atomic_long_inc_unchecked(&ima_htable.violations);
86648
86649 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
86650 if (!entry) {
86651 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
86652 index 0c72c9c..433e29b 100644
86653 --- a/security/integrity/ima/ima_fs.c
86654 +++ b/security/integrity/ima/ima_fs.c
86655 @@ -27,12 +27,12 @@
86656 static int valid_policy = 1;
86657 #define TMPBUFLEN 12
86658 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
86659 - loff_t *ppos, atomic_long_t *val)
86660 + loff_t *ppos, atomic_long_unchecked_t *val)
86661 {
86662 char tmpbuf[TMPBUFLEN];
86663 ssize_t len;
86664
86665 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
86666 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
86667 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
86668 }
86669
86670 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
86671 index e19316d..339f7ae 100644
86672 --- a/security/integrity/ima/ima_queue.c
86673 +++ b/security/integrity/ima/ima_queue.c
86674 @@ -78,7 +78,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
86675 INIT_LIST_HEAD(&qe->later);
86676 list_add_tail_rcu(&qe->later, &ima_measurements);
86677
86678 - atomic_long_inc(&ima_htable.len);
86679 + atomic_long_inc_unchecked(&ima_htable.len);
86680 key = ima_hash_key(entry->digest);
86681 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
86682 return 0;
86683 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
86684 index e031952..c9a535d 100644
86685 --- a/security/keys/keyring.c
86686 +++ b/security/keys/keyring.c
86687 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
86688 ret = -EFAULT;
86689
86690 for (loop = 0; loop < klist->nkeys; loop++) {
86691 + key_serial_t serial;
86692 key = klist->keys[loop];
86693 + serial = key->serial;
86694
86695 tmp = sizeof(key_serial_t);
86696 if (tmp > buflen)
86697 tmp = buflen;
86698
86699 - if (copy_to_user(buffer,
86700 - &key->serial,
86701 - tmp) != 0)
86702 + if (copy_to_user(buffer, &serial, tmp))
86703 goto error;
86704
86705 buflen -= tmp;
86706 diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
86707 index 931cfda..e71808a 100644
86708 --- a/security/keys/process_keys.c
86709 +++ b/security/keys/process_keys.c
86710 @@ -208,7 +208,7 @@ static int install_process_keyring(void)
86711 ret = install_process_keyring_to_cred(new);
86712 if (ret < 0) {
86713 abort_creds(new);
86714 - return ret != -EEXIST ?: 0;
86715 + return ret != -EEXIST ? ret : 0;
86716 }
86717
86718 return commit_creds(new);
86719 diff --git a/security/min_addr.c b/security/min_addr.c
86720 index d9f9425..c28cef4 100644
86721 --- a/security/min_addr.c
86722 +++ b/security/min_addr.c
86723 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
86724 */
86725 static void update_mmap_min_addr(void)
86726 {
86727 +#ifndef SPARC
86728 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
86729 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
86730 mmap_min_addr = dac_mmap_min_addr;
86731 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
86732 #else
86733 mmap_min_addr = dac_mmap_min_addr;
86734 #endif
86735 +#endif
86736 }
86737
86738 /*
86739 diff --git a/security/root_plug.c b/security/root_plug.c
86740 index 2f7ffa6..0455400 100644
86741 --- a/security/root_plug.c
86742 +++ b/security/root_plug.c
86743 @@ -70,7 +70,7 @@ static int rootplug_bprm_check_security (struct linux_binprm *bprm)
86744 return 0;
86745 }
86746
86747 -static struct security_operations rootplug_security_ops = {
86748 +static struct security_operations rootplug_security_ops __read_only = {
86749 .bprm_check_security = rootplug_bprm_check_security,
86750 };
86751
86752 diff --git a/security/security.c b/security/security.c
86753 index c4c6732..7abf13b 100644
86754 --- a/security/security.c
86755 +++ b/security/security.c
86756 @@ -24,7 +24,7 @@ static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1];
86757 extern struct security_operations default_security_ops;
86758 extern void security_fixup_ops(struct security_operations *ops);
86759
86760 -struct security_operations *security_ops; /* Initialized to NULL */
86761 +struct security_operations *security_ops __read_only; /* Initialized to NULL */
86762
86763 static inline int verify(struct security_operations *ops)
86764 {
86765 @@ -106,7 +106,7 @@ int __init security_module_enable(struct security_operations *ops)
86766 * If there is already a security module registered with the kernel,
86767 * an error will be returned. Otherwise %0 is returned on success.
86768 */
86769 -int register_security(struct security_operations *ops)
86770 +int __init register_security(struct security_operations *ops)
86771 {
86772 if (verify(ops)) {
86773 printk(KERN_DEBUG "%s could not verify "
86774 @@ -199,9 +199,9 @@ int security_quota_on(struct dentry *dentry)
86775 return security_ops->quota_on(dentry);
86776 }
86777
86778 -int security_syslog(int type)
86779 +int security_syslog(int type, bool from_file)
86780 {
86781 - return security_ops->syslog(type);
86782 + return security_ops->syslog(type, from_file);
86783 }
86784
86785 int security_settime(struct timespec *ts, struct timezone *tz)
86786 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
86787 index a106754..ca3a589 100644
86788 --- a/security/selinux/hooks.c
86789 +++ b/security/selinux/hooks.c
86790 @@ -76,6 +76,7 @@
86791 #include <linux/selinux.h>
86792 #include <linux/mutex.h>
86793 #include <linux/posix-timers.h>
86794 +#include <linux/syslog.h>
86795
86796 #include "avc.h"
86797 #include "objsec.h"
86798 @@ -131,7 +132,7 @@ int selinux_enabled = 1;
86799 * Minimal support for a secondary security module,
86800 * just to allow the use of the capability module.
86801 */
86802 -static struct security_operations *secondary_ops;
86803 +static struct security_operations *secondary_ops __read_only;
86804
86805 /* Lists of inode and superblock security structures initialized
86806 before the policy was loaded. */
86807 @@ -2050,29 +2051,30 @@ static int selinux_quota_on(struct dentry *dentry)
86808 return dentry_has_perm(cred, NULL, dentry, FILE__QUOTAON);
86809 }
86810
86811 -static int selinux_syslog(int type)
86812 +static int selinux_syslog(int type, bool from_file)
86813 {
86814 int rc;
86815
86816 - rc = cap_syslog(type);
86817 + rc = cap_syslog(type, from_file);
86818 if (rc)
86819 return rc;
86820
86821 switch (type) {
86822 - case 3: /* Read last kernel messages */
86823 - case 10: /* Return size of the log buffer */
86824 + case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */
86825 + case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */
86826 rc = task_has_system(current, SYSTEM__SYSLOG_READ);
86827 break;
86828 - case 6: /* Disable logging to console */
86829 - case 7: /* Enable logging to console */
86830 - case 8: /* Set level of messages printed to console */
86831 + case SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging to console */
86832 + case SYSLOG_ACTION_CONSOLE_ON: /* Enable logging to console */
86833 + /* Set level of messages printed to console */
86834 + case SYSLOG_ACTION_CONSOLE_LEVEL:
86835 rc = task_has_system(current, SYSTEM__SYSLOG_CONSOLE);
86836 break;
86837 - case 0: /* Close log */
86838 - case 1: /* Open log */
86839 - case 2: /* Read from log */
86840 - case 4: /* Read/clear last kernel messages */
86841 - case 5: /* Clear ring buffer */
86842 + case SYSLOG_ACTION_CLOSE: /* Close log */
86843 + case SYSLOG_ACTION_OPEN: /* Open log */
86844 + case SYSLOG_ACTION_READ: /* Read from log */
86845 + case SYSLOG_ACTION_READ_CLEAR: /* Read/clear last kernel messages */
86846 + case SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
86847 default:
86848 rc = task_has_system(current, SYSTEM__SYSLOG_MOD);
86849 break;
86850 @@ -5457,7 +5459,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
86851
86852 #endif
86853
86854 -static struct security_operations selinux_ops = {
86855 +static struct security_operations selinux_ops __read_only = {
86856 .name = "selinux",
86857
86858 .ptrace_access_check = selinux_ptrace_access_check,
86859 @@ -5841,7 +5843,9 @@ int selinux_disable(void)
86860 avc_disable();
86861
86862 /* Reset security_ops to the secondary module, dummy or capability. */
86863 + pax_open_kernel();
86864 security_ops = secondary_ops;
86865 + pax_close_kernel();
86866
86867 /* Unregister netfilter hooks. */
86868 selinux_nf_ip_exit();
86869 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
86870 index 13128f9..c23c736 100644
86871 --- a/security/selinux/include/xfrm.h
86872 +++ b/security/selinux/include/xfrm.h
86873 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
86874
86875 static inline void selinux_xfrm_notify_policyload(void)
86876 {
86877 - atomic_inc(&flow_cache_genid);
86878 + atomic_inc_unchecked(&flow_cache_genid);
86879 }
86880 #else
86881 static inline int selinux_xfrm_enabled(void)
86882 diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
86883 index ff17820..d68084c 100644
86884 --- a/security/selinux/ss/services.c
86885 +++ b/security/selinux/ss/services.c
86886 @@ -1715,6 +1715,8 @@ int security_load_policy(void *data, size_t len)
86887 int rc = 0;
86888 struct policy_file file = { data, len }, *fp = &file;
86889
86890 + pax_track_stack();
86891 +
86892 if (!ss_initialized) {
86893 avtab_cache_init();
86894 if (policydb_read(&policydb, fp)) {
86895 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
86896 index c33b6bb..b51f19e 100644
86897 --- a/security/smack/smack_lsm.c
86898 +++ b/security/smack/smack_lsm.c
86899 @@ -157,12 +157,12 @@ static int smack_ptrace_traceme(struct task_struct *ptp)
86900 *
86901 * Returns 0 on success, error code otherwise.
86902 */
86903 -static int smack_syslog(int type)
86904 +static int smack_syslog(int type, bool from_file)
86905 {
86906 int rc;
86907 char *sp = current_security();
86908
86909 - rc = cap_syslog(type);
86910 + rc = cap_syslog(type, from_file);
86911 if (rc != 0)
86912 return rc;
86913
86914 @@ -3073,7 +3073,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
86915 return 0;
86916 }
86917
86918 -struct security_operations smack_ops = {
86919 +struct security_operations smack_ops __read_only = {
86920 .name = "smack",
86921
86922 .ptrace_access_check = smack_ptrace_access_check,
86923 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
86924 index 9548a09..9a5f384 100644
86925 --- a/security/tomoyo/tomoyo.c
86926 +++ b/security/tomoyo/tomoyo.c
86927 @@ -275,7 +275,7 @@ static int tomoyo_dentry_open(struct file *f, const struct cred *cred)
86928 * tomoyo_security_ops is a "struct security_operations" which is used for
86929 * registering TOMOYO.
86930 */
86931 -static struct security_operations tomoyo_security_ops = {
86932 +static struct security_operations tomoyo_security_ops __read_only = {
86933 .name = "tomoyo",
86934 .cred_alloc_blank = tomoyo_cred_alloc_blank,
86935 .cred_prepare = tomoyo_cred_prepare,
86936 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
86937 index 84bb07d..c2ab6b6 100644
86938 --- a/sound/aoa/codecs/onyx.c
86939 +++ b/sound/aoa/codecs/onyx.c
86940 @@ -53,7 +53,7 @@ struct onyx {
86941 spdif_locked:1,
86942 analog_locked:1,
86943 original_mute:2;
86944 - int open_count;
86945 + local_t open_count;
86946 struct codec_info *codec_info;
86947
86948 /* mutex serializes concurrent access to the device
86949 @@ -752,7 +752,7 @@ static int onyx_open(struct codec_info_item *cii,
86950 struct onyx *onyx = cii->codec_data;
86951
86952 mutex_lock(&onyx->mutex);
86953 - onyx->open_count++;
86954 + local_inc(&onyx->open_count);
86955 mutex_unlock(&onyx->mutex);
86956
86957 return 0;
86958 @@ -764,8 +764,7 @@ static int onyx_close(struct codec_info_item *cii,
86959 struct onyx *onyx = cii->codec_data;
86960
86961 mutex_lock(&onyx->mutex);
86962 - onyx->open_count--;
86963 - if (!onyx->open_count)
86964 + if (local_dec_and_test(&onyx->open_count))
86965 onyx->spdif_locked = onyx->analog_locked = 0;
86966 mutex_unlock(&onyx->mutex);
86967
86968 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
86969 index ffd2025..df062c9 100644
86970 --- a/sound/aoa/codecs/onyx.h
86971 +++ b/sound/aoa/codecs/onyx.h
86972 @@ -11,6 +11,7 @@
86973 #include <linux/i2c.h>
86974 #include <asm/pmac_low_i2c.h>
86975 #include <asm/prom.h>
86976 +#include <asm/local.h>
86977
86978 /* PCM3052 register definitions */
86979
86980 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
86981 index d9c9635..bc0a5a2 100644
86982 --- a/sound/core/oss/pcm_oss.c
86983 +++ b/sound/core/oss/pcm_oss.c
86984 @@ -1395,7 +1395,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
86985 }
86986 } else {
86987 tmp = snd_pcm_oss_write2(substream,
86988 - (const char __force *)buf,
86989 + (const char __force_kernel *)buf,
86990 runtime->oss.period_bytes, 0);
86991 if (tmp <= 0)
86992 goto err;
86993 @@ -1483,7 +1483,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
86994 xfer += tmp;
86995 runtime->oss.buffer_used -= tmp;
86996 } else {
86997 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
86998 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
86999 runtime->oss.period_bytes, 0);
87000 if (tmp <= 0)
87001 goto err;
87002 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
87003 index 038232d..7dd9e5c 100644
87004 --- a/sound/core/pcm_compat.c
87005 +++ b/sound/core/pcm_compat.c
87006 @@ -30,7 +30,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
87007 int err;
87008
87009 fs = snd_enter_user();
87010 - err = snd_pcm_delay(substream, &delay);
87011 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
87012 snd_leave_user(fs);
87013 if (err < 0)
87014 return err;
87015 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
87016 index e6d2d97..4843949 100644
87017 --- a/sound/core/pcm_native.c
87018 +++ b/sound/core/pcm_native.c
87019 @@ -2747,11 +2747,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
87020 switch (substream->stream) {
87021 case SNDRV_PCM_STREAM_PLAYBACK:
87022 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
87023 - (void __user *)arg);
87024 + (void __force_user *)arg);
87025 break;
87026 case SNDRV_PCM_STREAM_CAPTURE:
87027 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
87028 - (void __user *)arg);
87029 + (void __force_user *)arg);
87030 break;
87031 default:
87032 result = -EINVAL;
87033 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
87034 index 1f99767..14636533 100644
87035 --- a/sound/core/seq/seq_device.c
87036 +++ b/sound/core/seq/seq_device.c
87037 @@ -63,7 +63,7 @@ struct ops_list {
87038 int argsize; /* argument size */
87039
87040 /* operators */
87041 - struct snd_seq_dev_ops ops;
87042 + struct snd_seq_dev_ops *ops;
87043
87044 /* registred devices */
87045 struct list_head dev_list; /* list of devices */
87046 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
87047
87048 mutex_lock(&ops->reg_mutex);
87049 /* copy driver operators */
87050 - ops->ops = *entry;
87051 + ops->ops = entry;
87052 ops->driver |= DRIVER_LOADED;
87053 ops->argsize = argsize;
87054
87055 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
87056 dev->name, ops->id, ops->argsize, dev->argsize);
87057 return -EINVAL;
87058 }
87059 - if (ops->ops.init_device(dev) >= 0) {
87060 + if (ops->ops->init_device(dev) >= 0) {
87061 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
87062 ops->num_init_devices++;
87063 } else {
87064 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
87065 dev->name, ops->id, ops->argsize, dev->argsize);
87066 return -EINVAL;
87067 }
87068 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
87069 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
87070 dev->status = SNDRV_SEQ_DEVICE_FREE;
87071 dev->driver_data = NULL;
87072 ops->num_init_devices--;
87073 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
87074 index 9284829..ac8e8b2 100644
87075 --- a/sound/drivers/mts64.c
87076 +++ b/sound/drivers/mts64.c
87077 @@ -27,6 +27,7 @@
87078 #include <sound/initval.h>
87079 #include <sound/rawmidi.h>
87080 #include <sound/control.h>
87081 +#include <asm/local.h>
87082
87083 #define CARD_NAME "Miditerminal 4140"
87084 #define DRIVER_NAME "MTS64"
87085 @@ -65,7 +66,7 @@ struct mts64 {
87086 struct pardevice *pardev;
87087 int pardev_claimed;
87088
87089 - int open_count;
87090 + local_t open_count;
87091 int current_midi_output_port;
87092 int current_midi_input_port;
87093 u8 mode[MTS64_NUM_INPUT_PORTS];
87094 @@ -695,7 +696,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
87095 {
87096 struct mts64 *mts = substream->rmidi->private_data;
87097
87098 - if (mts->open_count == 0) {
87099 + if (local_read(&mts->open_count) == 0) {
87100 /* We don't need a spinlock here, because this is just called
87101 if the device has not been opened before.
87102 So there aren't any IRQs from the device */
87103 @@ -703,7 +704,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
87104
87105 msleep(50);
87106 }
87107 - ++(mts->open_count);
87108 + local_inc(&mts->open_count);
87109
87110 return 0;
87111 }
87112 @@ -713,8 +714,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
87113 struct mts64 *mts = substream->rmidi->private_data;
87114 unsigned long flags;
87115
87116 - --(mts->open_count);
87117 - if (mts->open_count == 0) {
87118 + if (local_dec_return(&mts->open_count) == 0) {
87119 /* We need the spinlock_irqsave here because we can still
87120 have IRQs at this point */
87121 spin_lock_irqsave(&mts->lock, flags);
87122 @@ -723,8 +723,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
87123
87124 msleep(500);
87125
87126 - } else if (mts->open_count < 0)
87127 - mts->open_count = 0;
87128 + } else if (local_read(&mts->open_count) < 0)
87129 + local_set(&mts->open_count, 0);
87130
87131 return 0;
87132 }
87133 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
87134 index 01997f2..cbc1195 100644
87135 --- a/sound/drivers/opl4/opl4_lib.c
87136 +++ b/sound/drivers/opl4/opl4_lib.c
87137 @@ -27,7 +27,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
87138 MODULE_DESCRIPTION("OPL4 driver");
87139 MODULE_LICENSE("GPL");
87140
87141 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
87142 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
87143 {
87144 int timeout = 10;
87145 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
87146 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
87147 index 60158e2..0a0cc1a 100644
87148 --- a/sound/drivers/portman2x4.c
87149 +++ b/sound/drivers/portman2x4.c
87150 @@ -46,6 +46,7 @@
87151 #include <sound/initval.h>
87152 #include <sound/rawmidi.h>
87153 #include <sound/control.h>
87154 +#include <asm/local.h>
87155
87156 #define CARD_NAME "Portman 2x4"
87157 #define DRIVER_NAME "portman"
87158 @@ -83,7 +84,7 @@ struct portman {
87159 struct pardevice *pardev;
87160 int pardev_claimed;
87161
87162 - int open_count;
87163 + local_t open_count;
87164 int mode[PORTMAN_NUM_INPUT_PORTS];
87165 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
87166 };
87167 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
87168 index 02f79d2..8691d43 100644
87169 --- a/sound/isa/cmi8330.c
87170 +++ b/sound/isa/cmi8330.c
87171 @@ -173,7 +173,7 @@ struct snd_cmi8330 {
87172
87173 struct snd_pcm *pcm;
87174 struct snd_cmi8330_stream {
87175 - struct snd_pcm_ops ops;
87176 + snd_pcm_ops_no_const ops;
87177 snd_pcm_open_callback_t open;
87178 void *private_data; /* sb or wss */
87179 } streams[2];
87180 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
87181 index 733b014..56ce96f 100644
87182 --- a/sound/oss/sb_audio.c
87183 +++ b/sound/oss/sb_audio.c
87184 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
87185 buf16 = (signed short *)(localbuf + localoffs);
87186 while (c)
87187 {
87188 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
87189 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
87190 if (copy_from_user(lbuf8,
87191 userbuf+useroffs + p,
87192 locallen))
87193 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
87194 index 3136c88..28ad950 100644
87195 --- a/sound/oss/swarm_cs4297a.c
87196 +++ b/sound/oss/swarm_cs4297a.c
87197 @@ -2577,7 +2577,6 @@ static int __init cs4297a_init(void)
87198 {
87199 struct cs4297a_state *s;
87200 u32 pwr, id;
87201 - mm_segment_t fs;
87202 int rval;
87203 #ifndef CONFIG_BCM_CS4297A_CSWARM
87204 u64 cfg;
87205 @@ -2667,22 +2666,23 @@ static int __init cs4297a_init(void)
87206 if (!rval) {
87207 char *sb1250_duart_present;
87208
87209 +#if 0
87210 + mm_segment_t fs;
87211 fs = get_fs();
87212 set_fs(KERNEL_DS);
87213 -#if 0
87214 val = SOUND_MASK_LINE;
87215 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
87216 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
87217 val = initvol[i].vol;
87218 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
87219 }
87220 + set_fs(fs);
87221 // cs4297a_write_ac97(s, 0x18, 0x0808);
87222 #else
87223 // cs4297a_write_ac97(s, 0x5e, 0x180);
87224 cs4297a_write_ac97(s, 0x02, 0x0808);
87225 cs4297a_write_ac97(s, 0x18, 0x0808);
87226 #endif
87227 - set_fs(fs);
87228
87229 list_add(&s->list, &cs4297a_devs);
87230
87231 diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
87232 index 78288db..0406809 100644
87233 --- a/sound/pci/ac97/ac97_codec.c
87234 +++ b/sound/pci/ac97/ac97_codec.c
87235 @@ -1952,7 +1952,7 @@ static int snd_ac97_dev_disconnect(struct snd_device *device)
87236 }
87237
87238 /* build_ops to do nothing */
87239 -static struct snd_ac97_build_ops null_build_ops;
87240 +static const struct snd_ac97_build_ops null_build_ops;
87241
87242 #ifdef CONFIG_SND_AC97_POWER_SAVE
87243 static void do_update_power(struct work_struct *work)
87244 diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c
87245 index eeb2e23..82bf625 100644
87246 --- a/sound/pci/ac97/ac97_patch.c
87247 +++ b/sound/pci/ac97/ac97_patch.c
87248 @@ -371,7 +371,7 @@ static int patch_yamaha_ymf743_build_spdif(struct snd_ac97 *ac97)
87249 return 0;
87250 }
87251
87252 -static struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
87253 +static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
87254 .build_spdif = patch_yamaha_ymf743_build_spdif,
87255 .build_3d = patch_yamaha_ymf7x3_3d,
87256 };
87257 @@ -455,7 +455,7 @@ static int patch_yamaha_ymf753_post_spdif(struct snd_ac97 * ac97)
87258 return 0;
87259 }
87260
87261 -static struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
87262 +static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
87263 .build_3d = patch_yamaha_ymf7x3_3d,
87264 .build_post_spdif = patch_yamaha_ymf753_post_spdif
87265 };
87266 @@ -502,7 +502,7 @@ static int patch_wolfson_wm9703_specific(struct snd_ac97 * ac97)
87267 return 0;
87268 }
87269
87270 -static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
87271 +static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
87272 .build_specific = patch_wolfson_wm9703_specific,
87273 };
87274
87275 @@ -533,7 +533,7 @@ static int patch_wolfson_wm9704_specific(struct snd_ac97 * ac97)
87276 return 0;
87277 }
87278
87279 -static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
87280 +static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
87281 .build_specific = patch_wolfson_wm9704_specific,
87282 };
87283
87284 @@ -555,7 +555,7 @@ static int patch_wolfson_wm9705_specific(struct snd_ac97 * ac97)
87285 return 0;
87286 }
87287
87288 -static struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
87289 +static const struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
87290 .build_specific = patch_wolfson_wm9705_specific,
87291 };
87292
87293 @@ -692,7 +692,7 @@ static int patch_wolfson_wm9711_specific(struct snd_ac97 * ac97)
87294 return 0;
87295 }
87296
87297 -static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
87298 +static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
87299 .build_specific = patch_wolfson_wm9711_specific,
87300 };
87301
87302 @@ -886,7 +886,7 @@ static void patch_wolfson_wm9713_resume (struct snd_ac97 * ac97)
87303 }
87304 #endif
87305
87306 -static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
87307 +static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
87308 .build_specific = patch_wolfson_wm9713_specific,
87309 .build_3d = patch_wolfson_wm9713_3d,
87310 #ifdef CONFIG_PM
87311 @@ -991,7 +991,7 @@ static int patch_sigmatel_stac97xx_specific(struct snd_ac97 * ac97)
87312 return 0;
87313 }
87314
87315 -static struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
87316 +static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
87317 .build_3d = patch_sigmatel_stac9700_3d,
87318 .build_specific = patch_sigmatel_stac97xx_specific
87319 };
87320 @@ -1038,7 +1038,7 @@ static int patch_sigmatel_stac9708_specific(struct snd_ac97 *ac97)
87321 return patch_sigmatel_stac97xx_specific(ac97);
87322 }
87323
87324 -static struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
87325 +static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
87326 .build_3d = patch_sigmatel_stac9708_3d,
87327 .build_specific = patch_sigmatel_stac9708_specific
87328 };
87329 @@ -1267,7 +1267,7 @@ static int patch_sigmatel_stac9758_specific(struct snd_ac97 *ac97)
87330 return 0;
87331 }
87332
87333 -static struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
87334 +static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
87335 .build_3d = patch_sigmatel_stac9700_3d,
87336 .build_specific = patch_sigmatel_stac9758_specific
87337 };
87338 @@ -1342,7 +1342,7 @@ static int patch_cirrus_build_spdif(struct snd_ac97 * ac97)
87339 return 0;
87340 }
87341
87342 -static struct snd_ac97_build_ops patch_cirrus_ops = {
87343 +static const struct snd_ac97_build_ops patch_cirrus_ops = {
87344 .build_spdif = patch_cirrus_build_spdif
87345 };
87346
87347 @@ -1399,7 +1399,7 @@ static int patch_conexant_build_spdif(struct snd_ac97 * ac97)
87348 return 0;
87349 }
87350
87351 -static struct snd_ac97_build_ops patch_conexant_ops = {
87352 +static const struct snd_ac97_build_ops patch_conexant_ops = {
87353 .build_spdif = patch_conexant_build_spdif
87354 };
87355
87356 @@ -1575,7 +1575,7 @@ static void patch_ad1881_chained(struct snd_ac97 * ac97, int unchained_idx, int
87357 }
87358 }
87359
87360 -static struct snd_ac97_build_ops patch_ad1881_build_ops = {
87361 +static const struct snd_ac97_build_ops patch_ad1881_build_ops = {
87362 #ifdef CONFIG_PM
87363 .resume = ad18xx_resume
87364 #endif
87365 @@ -1662,7 +1662,7 @@ static int patch_ad1885_specific(struct snd_ac97 * ac97)
87366 return 0;
87367 }
87368
87369 -static struct snd_ac97_build_ops patch_ad1885_build_ops = {
87370 +static const struct snd_ac97_build_ops patch_ad1885_build_ops = {
87371 .build_specific = &patch_ad1885_specific,
87372 #ifdef CONFIG_PM
87373 .resume = ad18xx_resume
87374 @@ -1689,7 +1689,7 @@ static int patch_ad1886_specific(struct snd_ac97 * ac97)
87375 return 0;
87376 }
87377
87378 -static struct snd_ac97_build_ops patch_ad1886_build_ops = {
87379 +static const struct snd_ac97_build_ops patch_ad1886_build_ops = {
87380 .build_specific = &patch_ad1886_specific,
87381 #ifdef CONFIG_PM
87382 .resume = ad18xx_resume
87383 @@ -1896,7 +1896,7 @@ static int patch_ad1981a_specific(struct snd_ac97 * ac97)
87384 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
87385 }
87386
87387 -static struct snd_ac97_build_ops patch_ad1981a_build_ops = {
87388 +static const struct snd_ac97_build_ops patch_ad1981a_build_ops = {
87389 .build_post_spdif = patch_ad198x_post_spdif,
87390 .build_specific = patch_ad1981a_specific,
87391 #ifdef CONFIG_PM
87392 @@ -1952,7 +1952,7 @@ static int patch_ad1981b_specific(struct snd_ac97 *ac97)
87393 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
87394 }
87395
87396 -static struct snd_ac97_build_ops patch_ad1981b_build_ops = {
87397 +static const struct snd_ac97_build_ops patch_ad1981b_build_ops = {
87398 .build_post_spdif = patch_ad198x_post_spdif,
87399 .build_specific = patch_ad1981b_specific,
87400 #ifdef CONFIG_PM
87401 @@ -2091,7 +2091,7 @@ static int patch_ad1888_specific(struct snd_ac97 *ac97)
87402 return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls));
87403 }
87404
87405 -static struct snd_ac97_build_ops patch_ad1888_build_ops = {
87406 +static const struct snd_ac97_build_ops patch_ad1888_build_ops = {
87407 .build_post_spdif = patch_ad198x_post_spdif,
87408 .build_specific = patch_ad1888_specific,
87409 #ifdef CONFIG_PM
87410 @@ -2140,7 +2140,7 @@ static int patch_ad1980_specific(struct snd_ac97 *ac97)
87411 return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
87412 }
87413
87414 -static struct snd_ac97_build_ops patch_ad1980_build_ops = {
87415 +static const struct snd_ac97_build_ops patch_ad1980_build_ops = {
87416 .build_post_spdif = patch_ad198x_post_spdif,
87417 .build_specific = patch_ad1980_specific,
87418 #ifdef CONFIG_PM
87419 @@ -2255,7 +2255,7 @@ static int patch_ad1985_specific(struct snd_ac97 *ac97)
87420 ARRAY_SIZE(snd_ac97_ad1985_controls));
87421 }
87422
87423 -static struct snd_ac97_build_ops patch_ad1985_build_ops = {
87424 +static const struct snd_ac97_build_ops patch_ad1985_build_ops = {
87425 .build_post_spdif = patch_ad198x_post_spdif,
87426 .build_specific = patch_ad1985_specific,
87427 #ifdef CONFIG_PM
87428 @@ -2547,7 +2547,7 @@ static int patch_ad1986_specific(struct snd_ac97 *ac97)
87429 ARRAY_SIZE(snd_ac97_ad1985_controls));
87430 }
87431
87432 -static struct snd_ac97_build_ops patch_ad1986_build_ops = {
87433 +static const struct snd_ac97_build_ops patch_ad1986_build_ops = {
87434 .build_post_spdif = patch_ad198x_post_spdif,
87435 .build_specific = patch_ad1986_specific,
87436 #ifdef CONFIG_PM
87437 @@ -2652,7 +2652,7 @@ static int patch_alc650_specific(struct snd_ac97 * ac97)
87438 return 0;
87439 }
87440
87441 -static struct snd_ac97_build_ops patch_alc650_ops = {
87442 +static const struct snd_ac97_build_ops patch_alc650_ops = {
87443 .build_specific = patch_alc650_specific,
87444 .update_jacks = alc650_update_jacks
87445 };
87446 @@ -2804,7 +2804,7 @@ static int patch_alc655_specific(struct snd_ac97 * ac97)
87447 return 0;
87448 }
87449
87450 -static struct snd_ac97_build_ops patch_alc655_ops = {
87451 +static const struct snd_ac97_build_ops patch_alc655_ops = {
87452 .build_specific = patch_alc655_specific,
87453 .update_jacks = alc655_update_jacks
87454 };
87455 @@ -2916,7 +2916,7 @@ static int patch_alc850_specific(struct snd_ac97 *ac97)
87456 return 0;
87457 }
87458
87459 -static struct snd_ac97_build_ops patch_alc850_ops = {
87460 +static const struct snd_ac97_build_ops patch_alc850_ops = {
87461 .build_specific = patch_alc850_specific,
87462 .update_jacks = alc850_update_jacks
87463 };
87464 @@ -2978,7 +2978,7 @@ static int patch_cm9738_specific(struct snd_ac97 * ac97)
87465 return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls));
87466 }
87467
87468 -static struct snd_ac97_build_ops patch_cm9738_ops = {
87469 +static const struct snd_ac97_build_ops patch_cm9738_ops = {
87470 .build_specific = patch_cm9738_specific,
87471 .update_jacks = cm9738_update_jacks
87472 };
87473 @@ -3069,7 +3069,7 @@ static int patch_cm9739_post_spdif(struct snd_ac97 * ac97)
87474 return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif));
87475 }
87476
87477 -static struct snd_ac97_build_ops patch_cm9739_ops = {
87478 +static const struct snd_ac97_build_ops patch_cm9739_ops = {
87479 .build_specific = patch_cm9739_specific,
87480 .build_post_spdif = patch_cm9739_post_spdif,
87481 .update_jacks = cm9739_update_jacks
87482 @@ -3243,7 +3243,7 @@ static int patch_cm9761_specific(struct snd_ac97 * ac97)
87483 return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls));
87484 }
87485
87486 -static struct snd_ac97_build_ops patch_cm9761_ops = {
87487 +static const struct snd_ac97_build_ops patch_cm9761_ops = {
87488 .build_specific = patch_cm9761_specific,
87489 .build_post_spdif = patch_cm9761_post_spdif,
87490 .update_jacks = cm9761_update_jacks
87491 @@ -3339,7 +3339,7 @@ static int patch_cm9780_specific(struct snd_ac97 *ac97)
87492 return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls));
87493 }
87494
87495 -static struct snd_ac97_build_ops patch_cm9780_ops = {
87496 +static const struct snd_ac97_build_ops patch_cm9780_ops = {
87497 .build_specific = patch_cm9780_specific,
87498 .build_post_spdif = patch_cm9761_post_spdif /* identical with CM9761 */
87499 };
87500 @@ -3459,7 +3459,7 @@ static int patch_vt1616_specific(struct snd_ac97 * ac97)
87501 return 0;
87502 }
87503
87504 -static struct snd_ac97_build_ops patch_vt1616_ops = {
87505 +static const struct snd_ac97_build_ops patch_vt1616_ops = {
87506 .build_specific = patch_vt1616_specific
87507 };
87508
87509 @@ -3813,7 +3813,7 @@ static int patch_it2646_specific(struct snd_ac97 * ac97)
87510 return 0;
87511 }
87512
87513 -static struct snd_ac97_build_ops patch_it2646_ops = {
87514 +static const struct snd_ac97_build_ops patch_it2646_ops = {
87515 .build_specific = patch_it2646_specific,
87516 .update_jacks = it2646_update_jacks
87517 };
87518 @@ -3847,7 +3847,7 @@ static int patch_si3036_specific(struct snd_ac97 * ac97)
87519 return 0;
87520 }
87521
87522 -static struct snd_ac97_build_ops patch_si3036_ops = {
87523 +static const struct snd_ac97_build_ops patch_si3036_ops = {
87524 .build_specific = patch_si3036_specific,
87525 };
87526
87527 @@ -3914,7 +3914,7 @@ static int patch_ucb1400_specific(struct snd_ac97 * ac97)
87528 return 0;
87529 }
87530
87531 -static struct snd_ac97_build_ops patch_ucb1400_ops = {
87532 +static const struct snd_ac97_build_ops patch_ucb1400_ops = {
87533 .build_specific = patch_ucb1400_specific,
87534 };
87535
87536 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
87537 index 99552fb..4dcc2c5 100644
87538 --- a/sound/pci/hda/hda_codec.h
87539 +++ b/sound/pci/hda/hda_codec.h
87540 @@ -580,7 +580,7 @@ struct hda_bus_ops {
87541 /* notify power-up/down from codec to controller */
87542 void (*pm_notify)(struct hda_bus *bus);
87543 #endif
87544 -};
87545 +} __no_const;
87546
87547 /* template to pass to the bus constructor */
87548 struct hda_bus_template {
87549 @@ -675,6 +675,7 @@ struct hda_codec_ops {
87550 int (*check_power_status)(struct hda_codec *codec, hda_nid_t nid);
87551 #endif
87552 };
87553 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
87554
87555 /* record for amp information cache */
87556 struct hda_cache_head {
87557 @@ -705,7 +706,7 @@ struct hda_pcm_ops {
87558 struct snd_pcm_substream *substream);
87559 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
87560 struct snd_pcm_substream *substream);
87561 -};
87562 +} __no_const;
87563
87564 /* PCM information for each substream */
87565 struct hda_pcm_stream {
87566 @@ -760,7 +761,7 @@ struct hda_codec {
87567 const char *modelname; /* model name for preset */
87568
87569 /* set by patch */
87570 - struct hda_codec_ops patch_ops;
87571 + hda_codec_ops_no_const patch_ops;
87572
87573 /* PCM to create, set by patch_ops.build_pcms callback */
87574 unsigned int num_pcms;
87575 diff --git a/sound/pci/hda/patch_atihdmi.c b/sound/pci/hda/patch_atihdmi.c
87576 index fb684f0..2b11cea 100644
87577 --- a/sound/pci/hda/patch_atihdmi.c
87578 +++ b/sound/pci/hda/patch_atihdmi.c
87579 @@ -177,7 +177,7 @@ static int patch_atihdmi(struct hda_codec *codec)
87580 */
87581 spec->multiout.dig_out_nid = CVT_NID;
87582
87583 - codec->patch_ops = atihdmi_patch_ops;
87584 + memcpy((void *)&codec->patch_ops, &atihdmi_patch_ops, sizeof(atihdmi_patch_ops));
87585
87586 return 0;
87587 }
87588 diff --git a/sound/pci/hda/patch_intelhdmi.c b/sound/pci/hda/patch_intelhdmi.c
87589 index 7c23016..c5bfdd7 100644
87590 --- a/sound/pci/hda/patch_intelhdmi.c
87591 +++ b/sound/pci/hda/patch_intelhdmi.c
87592 @@ -511,10 +511,10 @@ static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
87593 cp_ready);
87594
87595 /* TODO */
87596 - if (cp_state)
87597 - ;
87598 - if (cp_ready)
87599 - ;
87600 + if (cp_state) {
87601 + }
87602 + if (cp_ready) {
87603 + }
87604 }
87605
87606
87607 @@ -656,7 +656,7 @@ static int do_patch_intel_hdmi(struct hda_codec *codec)
87608 spec->multiout.dig_out_nid = cvt_nid;
87609
87610 codec->spec = spec;
87611 - codec->patch_ops = intel_hdmi_patch_ops;
87612 + memcpy((void *)&codec->patch_ops, &intel_hdmi_patch_ops, sizeof(intel_hdmi_patch_ops));
87613
87614 snd_hda_eld_proc_new(codec, &spec->sink_eld);
87615
87616 diff --git a/sound/pci/hda/patch_nvhdmi.c b/sound/pci/hda/patch_nvhdmi.c
87617 index 6afdab0..68ed352 100644
87618 --- a/sound/pci/hda/patch_nvhdmi.c
87619 +++ b/sound/pci/hda/patch_nvhdmi.c
87620 @@ -367,7 +367,7 @@ static int patch_nvhdmi_8ch(struct hda_codec *codec)
87621 spec->multiout.max_channels = 8;
87622 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
87623
87624 - codec->patch_ops = nvhdmi_patch_ops_8ch;
87625 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_8ch, sizeof(nvhdmi_patch_ops_8ch));
87626
87627 return 0;
87628 }
87629 @@ -386,7 +386,7 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec)
87630 spec->multiout.max_channels = 2;
87631 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
87632
87633 - codec->patch_ops = nvhdmi_patch_ops_2ch;
87634 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_2ch, sizeof(nvhdmi_patch_ops_2ch));
87635
87636 return 0;
87637 }
87638 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
87639 index 2fcd70d..a143eaf 100644
87640 --- a/sound/pci/hda/patch_sigmatel.c
87641 +++ b/sound/pci/hda/patch_sigmatel.c
87642 @@ -5220,7 +5220,7 @@ again:
87643 snd_hda_codec_write_cache(codec, nid, 0,
87644 AC_VERB_SET_CONNECT_SEL, num_dacs);
87645
87646 - codec->patch_ops = stac92xx_patch_ops;
87647 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
87648
87649 codec->proc_widget_hook = stac92hd_proc_hook;
87650
87651 @@ -5294,7 +5294,7 @@ static int patch_stac92hd71bxx(struct hda_codec *codec)
87652 return -ENOMEM;
87653
87654 codec->spec = spec;
87655 - codec->patch_ops = stac92xx_patch_ops;
87656 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
87657 spec->num_pins = STAC92HD71BXX_NUM_PINS;
87658 switch (codec->vendor_id) {
87659 case 0x111d76b6:
87660 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
87661 index d063149..01599a4 100644
87662 --- a/sound/pci/ice1712/ice1712.h
87663 +++ b/sound/pci/ice1712/ice1712.h
87664 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
87665 unsigned int mask_flags; /* total mask bits */
87666 struct snd_akm4xxx_ops {
87667 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
87668 - } ops;
87669 + } __no_const ops;
87670 };
87671
87672 struct snd_ice1712_spdif {
87673 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
87674 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
87675 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
87676 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
87677 - } ops;
87678 + } __no_const ops;
87679 };
87680
87681
87682 diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c
87683 index 9e7d12e..3e3bc64 100644
87684 --- a/sound/pci/intel8x0m.c
87685 +++ b/sound/pci/intel8x0m.c
87686 @@ -1264,7 +1264,7 @@ static struct shortname_table {
87687 { 0x5455, "ALi M5455" },
87688 { 0x746d, "AMD AMD8111" },
87689 #endif
87690 - { 0 },
87691 + { 0, },
87692 };
87693
87694 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
87695 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
87696 index 5518371..45cf7ac 100644
87697 --- a/sound/pci/ymfpci/ymfpci_main.c
87698 +++ b/sound/pci/ymfpci/ymfpci_main.c
87699 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
87700 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
87701 break;
87702 }
87703 - if (atomic_read(&chip->interrupt_sleep_count)) {
87704 - atomic_set(&chip->interrupt_sleep_count, 0);
87705 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
87706 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
87707 wake_up(&chip->interrupt_sleep);
87708 }
87709 __end:
87710 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
87711 continue;
87712 init_waitqueue_entry(&wait, current);
87713 add_wait_queue(&chip->interrupt_sleep, &wait);
87714 - atomic_inc(&chip->interrupt_sleep_count);
87715 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
87716 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
87717 remove_wait_queue(&chip->interrupt_sleep, &wait);
87718 }
87719 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
87720 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
87721 spin_unlock(&chip->reg_lock);
87722
87723 - if (atomic_read(&chip->interrupt_sleep_count)) {
87724 - atomic_set(&chip->interrupt_sleep_count, 0);
87725 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
87726 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
87727 wake_up(&chip->interrupt_sleep);
87728 }
87729 }
87730 @@ -2369,7 +2369,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
87731 spin_lock_init(&chip->reg_lock);
87732 spin_lock_init(&chip->voice_lock);
87733 init_waitqueue_head(&chip->interrupt_sleep);
87734 - atomic_set(&chip->interrupt_sleep_count, 0);
87735 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
87736 chip->card = card;
87737 chip->pci = pci;
87738 chip->irq = -1;
87739 diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
87740 index 0a1b2f6..776bb19 100644
87741 --- a/sound/soc/soc-core.c
87742 +++ b/sound/soc/soc-core.c
87743 @@ -609,7 +609,7 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
87744 }
87745
87746 /* ASoC PCM operations */
87747 -static struct snd_pcm_ops soc_pcm_ops = {
87748 +static snd_pcm_ops_no_const soc_pcm_ops = {
87749 .open = soc_pcm_open,
87750 .close = soc_codec_close,
87751 .hw_params = soc_pcm_hw_params,
87752 diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
87753 index 79633ea..9732e90 100644
87754 --- a/sound/usb/usbaudio.c
87755 +++ b/sound/usb/usbaudio.c
87756 @@ -963,12 +963,12 @@ static int snd_usb_pcm_playback_trigger(struct snd_pcm_substream *substream,
87757 switch (cmd) {
87758 case SNDRV_PCM_TRIGGER_START:
87759 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
87760 - subs->ops.prepare = prepare_playback_urb;
87761 + *(void **)&subs->ops.prepare = prepare_playback_urb;
87762 return 0;
87763 case SNDRV_PCM_TRIGGER_STOP:
87764 return deactivate_urbs(subs, 0, 0);
87765 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
87766 - subs->ops.prepare = prepare_nodata_playback_urb;
87767 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
87768 return 0;
87769 default:
87770 return -EINVAL;
87771 @@ -985,15 +985,15 @@ static int snd_usb_pcm_capture_trigger(struct snd_pcm_substream *substream,
87772
87773 switch (cmd) {
87774 case SNDRV_PCM_TRIGGER_START:
87775 - subs->ops.retire = retire_capture_urb;
87776 + *(void **)&subs->ops.retire = retire_capture_urb;
87777 return start_urbs(subs, substream->runtime);
87778 case SNDRV_PCM_TRIGGER_STOP:
87779 return deactivate_urbs(subs, 0, 0);
87780 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
87781 - subs->ops.retire = retire_paused_capture_urb;
87782 + *(void **)&subs->ops.retire = retire_paused_capture_urb;
87783 return 0;
87784 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
87785 - subs->ops.retire = retire_capture_urb;
87786 + *(void **)&subs->ops.retire = retire_capture_urb;
87787 return 0;
87788 default:
87789 return -EINVAL;
87790 @@ -1542,7 +1542,7 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
87791 /* for playback, submit the URBs now; otherwise, the first hwptr_done
87792 * updates for all URBs would happen at the same time when starting */
87793 if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) {
87794 - subs->ops.prepare = prepare_nodata_playback_urb;
87795 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
87796 return start_urbs(subs, runtime);
87797 } else
87798 return 0;
87799 @@ -2228,14 +2228,14 @@ static void init_substream(struct snd_usb_stream *as, int stream, struct audiofo
87800 subs->direction = stream;
87801 subs->dev = as->chip->dev;
87802 if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) {
87803 - subs->ops = audio_urb_ops[stream];
87804 + memcpy((void *)&subs->ops, &audio_urb_ops[stream], sizeof(subs->ops));
87805 } else {
87806 - subs->ops = audio_urb_ops_high_speed[stream];
87807 + memcpy((void *)&subs->ops, &audio_urb_ops_high_speed[stream], sizeof(subs->ops));
87808 switch (as->chip->usb_id) {
87809 case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */
87810 case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */
87811 case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */
87812 - subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
87813 + *(void **)&subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
87814 break;
87815 }
87816 }
87817 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
87818 new file mode 100644
87819 index 0000000..469b06a
87820 --- /dev/null
87821 +++ b/tools/gcc/Makefile
87822 @@ -0,0 +1,21 @@
87823 +#CC := gcc
87824 +#PLUGIN_SOURCE_FILES := pax_plugin.c
87825 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
87826 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
87827 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
87828 +
87829 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb
87830 +
87831 +hostlibs-y := constify_plugin.so
87832 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
87833 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
87834 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
87835 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
87836 +
87837 +always := $(hostlibs-y)
87838 +
87839 +constify_plugin-objs := constify_plugin.o
87840 +stackleak_plugin-objs := stackleak_plugin.o
87841 +kallocstat_plugin-objs := kallocstat_plugin.o
87842 +kernexec_plugin-objs := kernexec_plugin.o
87843 +checker_plugin-objs := checker_plugin.o
87844 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
87845 new file mode 100644
87846 index 0000000..d41b5af
87847 --- /dev/null
87848 +++ b/tools/gcc/checker_plugin.c
87849 @@ -0,0 +1,171 @@
87850 +/*
87851 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
87852 + * Licensed under the GPL v2
87853 + *
87854 + * Note: the choice of the license means that the compilation process is
87855 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
87856 + * but for the kernel it doesn't matter since it doesn't link against
87857 + * any of the gcc libraries
87858 + *
87859 + * gcc plugin to implement various sparse (source code checker) features
87860 + *
87861 + * TODO:
87862 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
87863 + *
87864 + * BUGS:
87865 + * - none known
87866 + */
87867 +#include "gcc-plugin.h"
87868 +#include "config.h"
87869 +#include "system.h"
87870 +#include "coretypes.h"
87871 +#include "tree.h"
87872 +#include "tree-pass.h"
87873 +#include "flags.h"
87874 +#include "intl.h"
87875 +#include "toplev.h"
87876 +#include "plugin.h"
87877 +//#include "expr.h" where are you...
87878 +#include "diagnostic.h"
87879 +#include "plugin-version.h"
87880 +#include "tm.h"
87881 +#include "function.h"
87882 +#include "basic-block.h"
87883 +#include "gimple.h"
87884 +#include "rtl.h"
87885 +#include "emit-rtl.h"
87886 +#include "tree-flow.h"
87887 +#include "target.h"
87888 +
87889 +extern void c_register_addr_space (const char *str, addr_space_t as);
87890 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
87891 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
87892 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
87893 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
87894 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
87895 +
87896 +extern void print_gimple_stmt(FILE *, gimple, int, int);
87897 +extern rtx emit_move_insn(rtx x, rtx y);
87898 +
87899 +int plugin_is_GPL_compatible;
87900 +
87901 +static struct plugin_info checker_plugin_info = {
87902 + .version = "201111150100",
87903 +};
87904 +
87905 +#define ADDR_SPACE_KERNEL 0
87906 +#define ADDR_SPACE_FORCE_KERNEL 1
87907 +#define ADDR_SPACE_USER 2
87908 +#define ADDR_SPACE_FORCE_USER 3
87909 +#define ADDR_SPACE_IOMEM 0
87910 +#define ADDR_SPACE_FORCE_IOMEM 0
87911 +#define ADDR_SPACE_PERCPU 0
87912 +#define ADDR_SPACE_FORCE_PERCPU 0
87913 +#define ADDR_SPACE_RCU 0
87914 +#define ADDR_SPACE_FORCE_RCU 0
87915 +
87916 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
87917 +{
87918 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
87919 +}
87920 +
87921 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
87922 +{
87923 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
87924 +}
87925 +
87926 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
87927 +{
87928 + return default_addr_space_valid_pointer_mode(mode, as);
87929 +}
87930 +
87931 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
87932 +{
87933 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
87934 +}
87935 +
87936 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
87937 +{
87938 + return default_addr_space_legitimize_address(x, oldx, mode, as);
87939 +}
87940 +
87941 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
87942 +{
87943 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
87944 + return true;
87945 +
87946 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
87947 + return true;
87948 +
87949 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
87950 + return true;
87951 +
87952 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
87953 + return true;
87954 +
87955 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
87956 + return true;
87957 +
87958 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
87959 + return true;
87960 +
87961 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
87962 + return true;
87963 +
87964 + return subset == superset;
87965 +}
87966 +
87967 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
87968 +{
87969 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
87970 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
87971 +
87972 + return op;
87973 +}
87974 +
87975 +static void register_checker_address_spaces(void *event_data, void *data)
87976 +{
87977 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
87978 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
87979 + c_register_addr_space("__user", ADDR_SPACE_USER);
87980 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
87981 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
87982 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
87983 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
87984 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
87985 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
87986 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
87987 +
87988 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
87989 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
87990 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
87991 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
87992 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
87993 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
87994 + targetm.addr_space.convert = checker_addr_space_convert;
87995 +}
87996 +
87997 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
87998 +{
87999 + const char * const plugin_name = plugin_info->base_name;
88000 + const int argc = plugin_info->argc;
88001 + const struct plugin_argument * const argv = plugin_info->argv;
88002 + int i;
88003 +
88004 + if (!plugin_default_version_check(version, &gcc_version)) {
88005 + error(G_("incompatible gcc/plugin versions"));
88006 + return 1;
88007 + }
88008 +
88009 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
88010 +
88011 + for (i = 0; i < argc; ++i)
88012 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
88013 +
88014 + if (TARGET_64BIT == 0)
88015 + return 0;
88016 +
88017 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
88018 +
88019 + return 0;
88020 +}
88021 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
88022 new file mode 100644
88023 index 0000000..704a564
88024 --- /dev/null
88025 +++ b/tools/gcc/constify_plugin.c
88026 @@ -0,0 +1,303 @@
88027 +/*
88028 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
88029 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
88030 + * Licensed under the GPL v2, or (at your option) v3
88031 + *
88032 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
88033 + *
88034 + * Homepage:
88035 + * http://www.grsecurity.net/~ephox/const_plugin/
88036 + *
88037 + * Usage:
88038 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
88039 + * $ gcc -fplugin=constify_plugin.so test.c -O2
88040 + */
88041 +
88042 +#include "gcc-plugin.h"
88043 +#include "config.h"
88044 +#include "system.h"
88045 +#include "coretypes.h"
88046 +#include "tree.h"
88047 +#include "tree-pass.h"
88048 +#include "flags.h"
88049 +#include "intl.h"
88050 +#include "toplev.h"
88051 +#include "plugin.h"
88052 +#include "diagnostic.h"
88053 +#include "plugin-version.h"
88054 +#include "tm.h"
88055 +#include "function.h"
88056 +#include "basic-block.h"
88057 +#include "gimple.h"
88058 +#include "rtl.h"
88059 +#include "emit-rtl.h"
88060 +#include "tree-flow.h"
88061 +
88062 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
88063 +
88064 +int plugin_is_GPL_compatible;
88065 +
88066 +static struct plugin_info const_plugin_info = {
88067 + .version = "201111150100",
88068 + .help = "no-constify\tturn off constification\n",
88069 +};
88070 +
88071 +static void constify_type(tree type);
88072 +static bool walk_struct(tree node);
88073 +
88074 +static tree deconstify_type(tree old_type)
88075 +{
88076 + tree new_type, field;
88077 +
88078 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
88079 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
88080 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
88081 + DECL_FIELD_CONTEXT(field) = new_type;
88082 + TYPE_READONLY(new_type) = 0;
88083 + C_TYPE_FIELDS_READONLY(new_type) = 0;
88084 + return new_type;
88085 +}
88086 +
88087 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
88088 +{
88089 + tree type;
88090 +
88091 + *no_add_attrs = true;
88092 + if (TREE_CODE(*node) == FUNCTION_DECL) {
88093 + error("%qE attribute does not apply to functions", name);
88094 + return NULL_TREE;
88095 + }
88096 +
88097 + if (TREE_CODE(*node) == VAR_DECL) {
88098 + error("%qE attribute does not apply to variables", name);
88099 + return NULL_TREE;
88100 + }
88101 +
88102 + if (TYPE_P(*node)) {
88103 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
88104 + *no_add_attrs = false;
88105 + else
88106 + error("%qE attribute applies to struct and union types only", name);
88107 + return NULL_TREE;
88108 + }
88109 +
88110 + type = TREE_TYPE(*node);
88111 +
88112 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
88113 + error("%qE attribute applies to struct and union types only", name);
88114 + return NULL_TREE;
88115 + }
88116 +
88117 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
88118 + error("%qE attribute is already applied to the type", name);
88119 + return NULL_TREE;
88120 + }
88121 +
88122 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
88123 + error("%qE attribute used on type that is not constified", name);
88124 + return NULL_TREE;
88125 + }
88126 +
88127 + if (TREE_CODE(*node) == TYPE_DECL) {
88128 + TREE_TYPE(*node) = deconstify_type(type);
88129 + TREE_READONLY(*node) = 0;
88130 + return NULL_TREE;
88131 + }
88132 +
88133 + return NULL_TREE;
88134 +}
88135 +
88136 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
88137 +{
88138 + *no_add_attrs = true;
88139 + if (!TYPE_P(*node)) {
88140 + error("%qE attribute applies to types only", name);
88141 + return NULL_TREE;
88142 + }
88143 +
88144 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
88145 + error("%qE attribute applies to struct and union types only", name);
88146 + return NULL_TREE;
88147 + }
88148 +
88149 + *no_add_attrs = false;
88150 + constify_type(*node);
88151 + return NULL_TREE;
88152 +}
88153 +
88154 +static struct attribute_spec no_const_attr = {
88155 + .name = "no_const",
88156 + .min_length = 0,
88157 + .max_length = 0,
88158 + .decl_required = false,
88159 + .type_required = false,
88160 + .function_type_required = false,
88161 + .handler = handle_no_const_attribute,
88162 +#if BUILDING_GCC_VERSION >= 4007
88163 + .affects_type_identity = true
88164 +#endif
88165 +};
88166 +
88167 +static struct attribute_spec do_const_attr = {
88168 + .name = "do_const",
88169 + .min_length = 0,
88170 + .max_length = 0,
88171 + .decl_required = false,
88172 + .type_required = false,
88173 + .function_type_required = false,
88174 + .handler = handle_do_const_attribute,
88175 +#if BUILDING_GCC_VERSION >= 4007
88176 + .affects_type_identity = true
88177 +#endif
88178 +};
88179 +
88180 +static void register_attributes(void *event_data, void *data)
88181 +{
88182 + register_attribute(&no_const_attr);
88183 + register_attribute(&do_const_attr);
88184 +}
88185 +
88186 +static void constify_type(tree type)
88187 +{
88188 + TYPE_READONLY(type) = 1;
88189 + C_TYPE_FIELDS_READONLY(type) = 1;
88190 +}
88191 +
88192 +static bool is_fptr(tree field)
88193 +{
88194 + tree ptr = TREE_TYPE(field);
88195 +
88196 + if (TREE_CODE(ptr) != POINTER_TYPE)
88197 + return false;
88198 +
88199 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
88200 +}
88201 +
88202 +static bool walk_struct(tree node)
88203 +{
88204 + tree field;
88205 +
88206 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
88207 + return false;
88208 +
88209 + if (TYPE_FIELDS(node) == NULL_TREE)
88210 + return false;
88211 +
88212 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
88213 + tree type = TREE_TYPE(field);
88214 + enum tree_code code = TREE_CODE(type);
88215 + if (code == RECORD_TYPE || code == UNION_TYPE) {
88216 + if (!(walk_struct(type)))
88217 + return false;
88218 + } else if (!is_fptr(field) && !TREE_READONLY(field))
88219 + return false;
88220 + }
88221 + return true;
88222 +}
88223 +
88224 +static void finish_type(void *event_data, void *data)
88225 +{
88226 + tree type = (tree)event_data;
88227 +
88228 + if (type == NULL_TREE)
88229 + return;
88230 +
88231 + if (TYPE_READONLY(type))
88232 + return;
88233 +
88234 + if (walk_struct(type))
88235 + constify_type(type);
88236 +}
88237 +
88238 +static unsigned int check_local_variables(void);
88239 +
88240 +struct gimple_opt_pass pass_local_variable = {
88241 + {
88242 + .type = GIMPLE_PASS,
88243 + .name = "check_local_variables",
88244 + .gate = NULL,
88245 + .execute = check_local_variables,
88246 + .sub = NULL,
88247 + .next = NULL,
88248 + .static_pass_number = 0,
88249 + .tv_id = TV_NONE,
88250 + .properties_required = 0,
88251 + .properties_provided = 0,
88252 + .properties_destroyed = 0,
88253 + .todo_flags_start = 0,
88254 + .todo_flags_finish = 0
88255 + }
88256 +};
88257 +
88258 +static unsigned int check_local_variables(void)
88259 +{
88260 + tree var;
88261 + referenced_var_iterator rvi;
88262 +
88263 +#if BUILDING_GCC_VERSION == 4005
88264 + FOR_EACH_REFERENCED_VAR(var, rvi) {
88265 +#else
88266 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
88267 +#endif
88268 + tree type = TREE_TYPE(var);
88269 +
88270 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
88271 + continue;
88272 +
88273 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
88274 + continue;
88275 +
88276 + if (!TYPE_READONLY(type))
88277 + continue;
88278 +
88279 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
88280 +// continue;
88281 +
88282 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
88283 +// continue;
88284 +
88285 + if (walk_struct(type)) {
88286 + error("constified variable %qE cannot be local", var);
88287 + return 1;
88288 + }
88289 + }
88290 + return 0;
88291 +}
88292 +
88293 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
88294 +{
88295 + const char * const plugin_name = plugin_info->base_name;
88296 + const int argc = plugin_info->argc;
88297 + const struct plugin_argument * const argv = plugin_info->argv;
88298 + int i;
88299 + bool constify = true;
88300 +
88301 + struct register_pass_info local_variable_pass_info = {
88302 + .pass = &pass_local_variable.pass,
88303 + .reference_pass_name = "*referenced_vars",
88304 + .ref_pass_instance_number = 0,
88305 + .pos_op = PASS_POS_INSERT_AFTER
88306 + };
88307 +
88308 + if (!plugin_default_version_check(version, &gcc_version)) {
88309 + error(G_("incompatible gcc/plugin versions"));
88310 + return 1;
88311 + }
88312 +
88313 + for (i = 0; i < argc; ++i) {
88314 + if (!(strcmp(argv[i].key, "no-constify"))) {
88315 + constify = false;
88316 + continue;
88317 + }
88318 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
88319 + }
88320 +
88321 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
88322 + if (constify) {
88323 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
88324 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
88325 + }
88326 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
88327 +
88328 + return 0;
88329 +}
88330 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
88331 new file mode 100644
88332 index 0000000..a5eabce
88333 --- /dev/null
88334 +++ b/tools/gcc/kallocstat_plugin.c
88335 @@ -0,0 +1,167 @@
88336 +/*
88337 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
88338 + * Licensed under the GPL v2
88339 + *
88340 + * Note: the choice of the license means that the compilation process is
88341 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
88342 + * but for the kernel it doesn't matter since it doesn't link against
88343 + * any of the gcc libraries
88344 + *
88345 + * gcc plugin to find the distribution of k*alloc sizes
88346 + *
88347 + * TODO:
88348 + *
88349 + * BUGS:
88350 + * - none known
88351 + */
88352 +#include "gcc-plugin.h"
88353 +#include "config.h"
88354 +#include "system.h"
88355 +#include "coretypes.h"
88356 +#include "tree.h"
88357 +#include "tree-pass.h"
88358 +#include "flags.h"
88359 +#include "intl.h"
88360 +#include "toplev.h"
88361 +#include "plugin.h"
88362 +//#include "expr.h" where are you...
88363 +#include "diagnostic.h"
88364 +#include "plugin-version.h"
88365 +#include "tm.h"
88366 +#include "function.h"
88367 +#include "basic-block.h"
88368 +#include "gimple.h"
88369 +#include "rtl.h"
88370 +#include "emit-rtl.h"
88371 +
88372 +extern void print_gimple_stmt(FILE *, gimple, int, int);
88373 +
88374 +int plugin_is_GPL_compatible;
88375 +
88376 +static const char * const kalloc_functions[] = {
88377 + "__kmalloc",
88378 + "kmalloc",
88379 + "kmalloc_large",
88380 + "kmalloc_node",
88381 + "kmalloc_order",
88382 + "kmalloc_order_trace",
88383 + "kmalloc_slab",
88384 + "kzalloc",
88385 + "kzalloc_node",
88386 +};
88387 +
88388 +static struct plugin_info kallocstat_plugin_info = {
88389 + .version = "201111150100",
88390 +};
88391 +
88392 +static unsigned int execute_kallocstat(void);
88393 +
88394 +static struct gimple_opt_pass kallocstat_pass = {
88395 + .pass = {
88396 + .type = GIMPLE_PASS,
88397 + .name = "kallocstat",
88398 + .gate = NULL,
88399 + .execute = execute_kallocstat,
88400 + .sub = NULL,
88401 + .next = NULL,
88402 + .static_pass_number = 0,
88403 + .tv_id = TV_NONE,
88404 + .properties_required = 0,
88405 + .properties_provided = 0,
88406 + .properties_destroyed = 0,
88407 + .todo_flags_start = 0,
88408 + .todo_flags_finish = 0
88409 + }
88410 +};
88411 +
88412 +static bool is_kalloc(const char *fnname)
88413 +{
88414 + size_t i;
88415 +
88416 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
88417 + if (!strcmp(fnname, kalloc_functions[i]))
88418 + return true;
88419 + return false;
88420 +}
88421 +
88422 +static unsigned int execute_kallocstat(void)
88423 +{
88424 + basic_block bb;
88425 +
88426 + // 1. loop through BBs and GIMPLE statements
88427 + FOR_EACH_BB(bb) {
88428 + gimple_stmt_iterator gsi;
88429 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
88430 + // gimple match:
88431 + tree fndecl, size;
88432 + gimple call_stmt;
88433 + const char *fnname;
88434 +
88435 + // is it a call
88436 + call_stmt = gsi_stmt(gsi);
88437 + if (!is_gimple_call(call_stmt))
88438 + continue;
88439 + fndecl = gimple_call_fndecl(call_stmt);
88440 + if (fndecl == NULL_TREE)
88441 + continue;
88442 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
88443 + continue;
88444 +
88445 + // is it a call to k*alloc
88446 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
88447 + if (!is_kalloc(fnname))
88448 + continue;
88449 +
88450 + // is the size arg the result of a simple const assignment
88451 + size = gimple_call_arg(call_stmt, 0);
88452 + while (true) {
88453 + gimple def_stmt;
88454 + expanded_location xloc;
88455 + size_t size_val;
88456 +
88457 + if (TREE_CODE(size) != SSA_NAME)
88458 + break;
88459 + def_stmt = SSA_NAME_DEF_STMT(size);
88460 + if (!def_stmt || !is_gimple_assign(def_stmt))
88461 + break;
88462 + if (gimple_num_ops(def_stmt) != 2)
88463 + break;
88464 + size = gimple_assign_rhs1(def_stmt);
88465 + if (!TREE_CONSTANT(size))
88466 + continue;
88467 + xloc = expand_location(gimple_location(def_stmt));
88468 + if (!xloc.file)
88469 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
88470 + size_val = TREE_INT_CST_LOW(size);
88471 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
88472 + break;
88473 + }
88474 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
88475 +//debug_tree(gimple_call_fn(call_stmt));
88476 +//print_node(stderr, "pax", fndecl, 4);
88477 + }
88478 + }
88479 +
88480 + return 0;
88481 +}
88482 +
88483 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
88484 +{
88485 + const char * const plugin_name = plugin_info->base_name;
88486 + struct register_pass_info kallocstat_pass_info = {
88487 + .pass = &kallocstat_pass.pass,
88488 + .reference_pass_name = "ssa",
88489 + .ref_pass_instance_number = 0,
88490 + .pos_op = PASS_POS_INSERT_AFTER
88491 + };
88492 +
88493 + if (!plugin_default_version_check(version, &gcc_version)) {
88494 + error(G_("incompatible gcc/plugin versions"));
88495 + return 1;
88496 + }
88497 +
88498 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
88499 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
88500 +
88501 + return 0;
88502 +}
88503 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
88504 new file mode 100644
88505 index 0000000..008f159
88506 --- /dev/null
88507 +++ b/tools/gcc/kernexec_plugin.c
88508 @@ -0,0 +1,427 @@
88509 +/*
88510 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
88511 + * Licensed under the GPL v2
88512 + *
88513 + * Note: the choice of the license means that the compilation process is
88514 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
88515 + * but for the kernel it doesn't matter since it doesn't link against
88516 + * any of the gcc libraries
88517 + *
88518 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
88519 + *
88520 + * TODO:
88521 + *
88522 + * BUGS:
88523 + * - none known
88524 + */
88525 +#include "gcc-plugin.h"
88526 +#include "config.h"
88527 +#include "system.h"
88528 +#include "coretypes.h"
88529 +#include "tree.h"
88530 +#include "tree-pass.h"
88531 +#include "flags.h"
88532 +#include "intl.h"
88533 +#include "toplev.h"
88534 +#include "plugin.h"
88535 +//#include "expr.h" where are you...
88536 +#include "diagnostic.h"
88537 +#include "plugin-version.h"
88538 +#include "tm.h"
88539 +#include "function.h"
88540 +#include "basic-block.h"
88541 +#include "gimple.h"
88542 +#include "rtl.h"
88543 +#include "emit-rtl.h"
88544 +#include "tree-flow.h"
88545 +
88546 +extern void print_gimple_stmt(FILE *, gimple, int, int);
88547 +extern rtx emit_move_insn(rtx x, rtx y);
88548 +
88549 +int plugin_is_GPL_compatible;
88550 +
88551 +static struct plugin_info kernexec_plugin_info = {
88552 + .version = "201111291120",
88553 + .help = "method=[bts|or]\tinstrumentation method\n"
88554 +};
88555 +
88556 +static unsigned int execute_kernexec_reload(void);
88557 +static unsigned int execute_kernexec_fptr(void);
88558 +static unsigned int execute_kernexec_retaddr(void);
88559 +static bool kernexec_cmodel_check(void);
88560 +
88561 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
88562 +static void (*kernexec_instrument_retaddr)(rtx);
88563 +
88564 +static struct gimple_opt_pass kernexec_reload_pass = {
88565 + .pass = {
88566 + .type = GIMPLE_PASS,
88567 + .name = "kernexec_reload",
88568 + .gate = kernexec_cmodel_check,
88569 + .execute = execute_kernexec_reload,
88570 + .sub = NULL,
88571 + .next = NULL,
88572 + .static_pass_number = 0,
88573 + .tv_id = TV_NONE,
88574 + .properties_required = 0,
88575 + .properties_provided = 0,
88576 + .properties_destroyed = 0,
88577 + .todo_flags_start = 0,
88578 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
88579 + }
88580 +};
88581 +
88582 +static struct gimple_opt_pass kernexec_fptr_pass = {
88583 + .pass = {
88584 + .type = GIMPLE_PASS,
88585 + .name = "kernexec_fptr",
88586 + .gate = kernexec_cmodel_check,
88587 + .execute = execute_kernexec_fptr,
88588 + .sub = NULL,
88589 + .next = NULL,
88590 + .static_pass_number = 0,
88591 + .tv_id = TV_NONE,
88592 + .properties_required = 0,
88593 + .properties_provided = 0,
88594 + .properties_destroyed = 0,
88595 + .todo_flags_start = 0,
88596 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
88597 + }
88598 +};
88599 +
88600 +static struct rtl_opt_pass kernexec_retaddr_pass = {
88601 + .pass = {
88602 + .type = RTL_PASS,
88603 + .name = "kernexec_retaddr",
88604 + .gate = kernexec_cmodel_check,
88605 + .execute = execute_kernexec_retaddr,
88606 + .sub = NULL,
88607 + .next = NULL,
88608 + .static_pass_number = 0,
88609 + .tv_id = TV_NONE,
88610 + .properties_required = 0,
88611 + .properties_provided = 0,
88612 + .properties_destroyed = 0,
88613 + .todo_flags_start = 0,
88614 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
88615 + }
88616 +};
88617 +
88618 +static bool kernexec_cmodel_check(void)
88619 +{
88620 + tree section;
88621 +
88622 + if (ix86_cmodel != CM_KERNEL)
88623 + return false;
88624 +
88625 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
88626 + if (!section || !TREE_VALUE(section))
88627 + return true;
88628 +
88629 + section = TREE_VALUE(TREE_VALUE(section));
88630 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
88631 + return true;
88632 +
88633 + return false;
88634 +}
88635 +
88636 +/*
88637 + * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
88638 + */
88639 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
88640 +{
88641 + gimple asm_movabs_stmt;
88642 +
88643 + // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
88644 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
88645 + gimple_asm_set_volatile(asm_movabs_stmt, true);
88646 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
88647 + update_stmt(asm_movabs_stmt);
88648 +}
88649 +
88650 +/*
88651 + * find all asm() stmts that clobber r10 and add a reload of r10
88652 + */
88653 +static unsigned int execute_kernexec_reload(void)
88654 +{
88655 + basic_block bb;
88656 +
88657 + // 1. loop through BBs and GIMPLE statements
88658 + FOR_EACH_BB(bb) {
88659 + gimple_stmt_iterator gsi;
88660 +
88661 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
88662 + // gimple match: __asm__ ("" : : : "r10");
88663 + gimple asm_stmt;
88664 + size_t nclobbers;
88665 +
88666 + // is it an asm ...
88667 + asm_stmt = gsi_stmt(gsi);
88668 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
88669 + continue;
88670 +
88671 + // ... clobbering r10
88672 + nclobbers = gimple_asm_nclobbers(asm_stmt);
88673 + while (nclobbers--) {
88674 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
88675 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
88676 + continue;
88677 + kernexec_reload_fptr_mask(&gsi);
88678 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
88679 + break;
88680 + }
88681 + }
88682 + }
88683 +
88684 + return 0;
88685 +}
88686 +
88687 +/*
88688 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
88689 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
88690 + */
88691 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
88692 +{
88693 + gimple assign_intptr, assign_new_fptr, call_stmt;
88694 + tree intptr, old_fptr, new_fptr, kernexec_mask;
88695 +
88696 + call_stmt = gsi_stmt(*gsi);
88697 + old_fptr = gimple_call_fn(call_stmt);
88698 +
88699 + // create temporary unsigned long variable used for bitops and cast fptr to it
88700 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
88701 + add_referenced_var(intptr);
88702 + mark_sym_for_renaming(intptr);
88703 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
88704 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
88705 + update_stmt(assign_intptr);
88706 +
88707 + // apply logical or to temporary unsigned long and bitmask
88708 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
88709 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
88710 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
88711 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
88712 + update_stmt(assign_intptr);
88713 +
88714 + // cast temporary unsigned long back to a temporary fptr variable
88715 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec");
88716 + add_referenced_var(new_fptr);
88717 + mark_sym_for_renaming(new_fptr);
88718 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
88719 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
88720 + update_stmt(assign_new_fptr);
88721 +
88722 + // replace call stmt fn with the new fptr
88723 + gimple_call_set_fn(call_stmt, new_fptr);
88724 + update_stmt(call_stmt);
88725 +}
88726 +
88727 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
88728 +{
88729 + gimple asm_or_stmt, call_stmt;
88730 + tree old_fptr, new_fptr, input, output;
88731 + VEC(tree, gc) *inputs = NULL;
88732 + VEC(tree, gc) *outputs = NULL;
88733 +
88734 + call_stmt = gsi_stmt(*gsi);
88735 + old_fptr = gimple_call_fn(call_stmt);
88736 +
88737 + // create temporary fptr variable
88738 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
88739 + add_referenced_var(new_fptr);
88740 + mark_sym_for_renaming(new_fptr);
88741 +
88742 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
88743 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
88744 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
88745 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
88746 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
88747 + VEC_safe_push(tree, gc, inputs, input);
88748 + VEC_safe_push(tree, gc, outputs, output);
88749 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
88750 + gimple_asm_set_volatile(asm_or_stmt, true);
88751 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
88752 + update_stmt(asm_or_stmt);
88753 +
88754 + // replace call stmt fn with the new fptr
88755 + gimple_call_set_fn(call_stmt, new_fptr);
88756 + update_stmt(call_stmt);
88757 +}
88758 +
88759 +/*
88760 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
88761 + */
88762 +static unsigned int execute_kernexec_fptr(void)
88763 +{
88764 + basic_block bb;
88765 +
88766 + // 1. loop through BBs and GIMPLE statements
88767 + FOR_EACH_BB(bb) {
88768 + gimple_stmt_iterator gsi;
88769 +
88770 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
88771 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
88772 + tree fn;
88773 + gimple call_stmt;
88774 +
88775 + // is it a call ...
88776 + call_stmt = gsi_stmt(gsi);
88777 + if (!is_gimple_call(call_stmt))
88778 + continue;
88779 + fn = gimple_call_fn(call_stmt);
88780 + if (TREE_CODE(fn) == ADDR_EXPR)
88781 + continue;
88782 + if (TREE_CODE(fn) != SSA_NAME)
88783 + gcc_unreachable();
88784 +
88785 + // ... through a function pointer
88786 + fn = SSA_NAME_VAR(fn);
88787 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
88788 + continue;
88789 + fn = TREE_TYPE(fn);
88790 + if (TREE_CODE(fn) != POINTER_TYPE)
88791 + continue;
88792 + fn = TREE_TYPE(fn);
88793 + if (TREE_CODE(fn) != FUNCTION_TYPE)
88794 + continue;
88795 +
88796 + kernexec_instrument_fptr(&gsi);
88797 +
88798 +//debug_tree(gimple_call_fn(call_stmt));
88799 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
88800 + }
88801 + }
88802 +
88803 + return 0;
88804 +}
88805 +
88806 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
88807 +static void kernexec_instrument_retaddr_bts(rtx insn)
88808 +{
88809 + rtx btsq;
88810 + rtvec argvec, constraintvec, labelvec;
88811 + int line;
88812 +
88813 + // create asm volatile("btsq $63,(%%rsp)":::)
88814 + argvec = rtvec_alloc(0);
88815 + constraintvec = rtvec_alloc(0);
88816 + labelvec = rtvec_alloc(0);
88817 + line = expand_location(RTL_LOCATION(insn)).line;
88818 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
88819 + MEM_VOLATILE_P(btsq) = 1;
88820 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
88821 + emit_insn_before(btsq, insn);
88822 +}
88823 +
88824 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
88825 +static void kernexec_instrument_retaddr_or(rtx insn)
88826 +{
88827 + rtx orq;
88828 + rtvec argvec, constraintvec, labelvec;
88829 + int line;
88830 +
88831 + // create asm volatile("orq %%r10,(%%rsp)":::)
88832 + argvec = rtvec_alloc(0);
88833 + constraintvec = rtvec_alloc(0);
88834 + labelvec = rtvec_alloc(0);
88835 + line = expand_location(RTL_LOCATION(insn)).line;
88836 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
88837 + MEM_VOLATILE_P(orq) = 1;
88838 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
88839 + emit_insn_before(orq, insn);
88840 +}
88841 +
88842 +/*
88843 + * find all asm level function returns and forcibly set the highest bit of the return address
88844 + */
88845 +static unsigned int execute_kernexec_retaddr(void)
88846 +{
88847 + rtx insn;
88848 +
88849 + // 1. find function returns
88850 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
88851 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
88852 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
88853 + rtx body;
88854 +
88855 + // is it a retn
88856 + if (!JUMP_P(insn))
88857 + continue;
88858 + body = PATTERN(insn);
88859 + if (GET_CODE(body) == PARALLEL)
88860 + body = XVECEXP(body, 0, 0);
88861 + if (GET_CODE(body) != RETURN)
88862 + continue;
88863 + kernexec_instrument_retaddr(insn);
88864 + }
88865 +
88866 +// print_simple_rtl(stderr, get_insns());
88867 +// print_rtl(stderr, get_insns());
88868 +
88869 + return 0;
88870 +}
88871 +
88872 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
88873 +{
88874 + const char * const plugin_name = plugin_info->base_name;
88875 + const int argc = plugin_info->argc;
88876 + const struct plugin_argument * const argv = plugin_info->argv;
88877 + int i;
88878 + struct register_pass_info kernexec_reload_pass_info = {
88879 + .pass = &kernexec_reload_pass.pass,
88880 + .reference_pass_name = "ssa",
88881 + .ref_pass_instance_number = 0,
88882 + .pos_op = PASS_POS_INSERT_AFTER
88883 + };
88884 + struct register_pass_info kernexec_fptr_pass_info = {
88885 + .pass = &kernexec_fptr_pass.pass,
88886 + .reference_pass_name = "ssa",
88887 + .ref_pass_instance_number = 0,
88888 + .pos_op = PASS_POS_INSERT_AFTER
88889 + };
88890 + struct register_pass_info kernexec_retaddr_pass_info = {
88891 + .pass = &kernexec_retaddr_pass.pass,
88892 + .reference_pass_name = "pro_and_epilogue",
88893 + .ref_pass_instance_number = 0,
88894 + .pos_op = PASS_POS_INSERT_AFTER
88895 + };
88896 +
88897 + if (!plugin_default_version_check(version, &gcc_version)) {
88898 + error(G_("incompatible gcc/plugin versions"));
88899 + return 1;
88900 + }
88901 +
88902 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
88903 +
88904 + if (TARGET_64BIT == 0)
88905 + return 0;
88906 +
88907 + for (i = 0; i < argc; ++i) {
88908 + if (!strcmp(argv[i].key, "method")) {
88909 + if (!argv[i].value) {
88910 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
88911 + continue;
88912 + }
88913 + if (!strcmp(argv[i].value, "bts")) {
88914 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
88915 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
88916 + } else if (!strcmp(argv[i].value, "or")) {
88917 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
88918 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
88919 + fix_register("r10", 1, 1);
88920 + } else
88921 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
88922 + continue;
88923 + }
88924 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
88925 + }
88926 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
88927 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
88928 +
88929 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
88930 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
88931 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
88932 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
88933 +
88934 + return 0;
88935 +}
88936 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
88937 new file mode 100644
88938 index 0000000..4a9b187
88939 --- /dev/null
88940 +++ b/tools/gcc/stackleak_plugin.c
88941 @@ -0,0 +1,326 @@
88942 +/*
88943 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
88944 + * Licensed under the GPL v2
88945 + *
88946 + * Note: the choice of the license means that the compilation process is
88947 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
88948 + * but for the kernel it doesn't matter since it doesn't link against
88949 + * any of the gcc libraries
88950 + *
88951 + * gcc plugin to help implement various PaX features
88952 + *
88953 + * - track lowest stack pointer
88954 + *
88955 + * TODO:
88956 + * - initialize all local variables
88957 + *
88958 + * BUGS:
88959 + * - none known
88960 + */
88961 +#include "gcc-plugin.h"
88962 +#include "config.h"
88963 +#include "system.h"
88964 +#include "coretypes.h"
88965 +#include "tree.h"
88966 +#include "tree-pass.h"
88967 +#include "flags.h"
88968 +#include "intl.h"
88969 +#include "toplev.h"
88970 +#include "plugin.h"
88971 +//#include "expr.h" where are you...
88972 +#include "diagnostic.h"
88973 +#include "plugin-version.h"
88974 +#include "tm.h"
88975 +#include "function.h"
88976 +#include "basic-block.h"
88977 +#include "gimple.h"
88978 +#include "rtl.h"
88979 +#include "emit-rtl.h"
88980 +
88981 +extern void print_gimple_stmt(FILE *, gimple, int, int);
88982 +
88983 +int plugin_is_GPL_compatible;
88984 +
88985 +static int track_frame_size = -1;
88986 +static const char track_function[] = "pax_track_stack";
88987 +static const char check_function[] = "pax_check_alloca";
88988 +static tree pax_check_alloca_decl;
88989 +static tree pax_track_stack_decl;
88990 +static bool init_locals;
88991 +
88992 +static struct plugin_info stackleak_plugin_info = {
88993 + .version = "201203021600",
88994 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
88995 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
88996 +};
88997 +
88998 +static bool gate_stackleak_track_stack(void);
88999 +static unsigned int execute_stackleak_tree_instrument(void);
89000 +static unsigned int execute_stackleak_final(void);
89001 +
89002 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
89003 + .pass = {
89004 + .type = GIMPLE_PASS,
89005 + .name = "stackleak_tree_instrument",
89006 + .gate = gate_stackleak_track_stack,
89007 + .execute = execute_stackleak_tree_instrument,
89008 + .sub = NULL,
89009 + .next = NULL,
89010 + .static_pass_number = 0,
89011 + .tv_id = TV_NONE,
89012 + .properties_required = PROP_gimple_leh | PROP_cfg,
89013 + .properties_provided = 0,
89014 + .properties_destroyed = 0,
89015 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
89016 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
89017 + }
89018 +};
89019 +
89020 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
89021 + .pass = {
89022 + .type = RTL_PASS,
89023 + .name = "stackleak_final",
89024 + .gate = gate_stackleak_track_stack,
89025 + .execute = execute_stackleak_final,
89026 + .sub = NULL,
89027 + .next = NULL,
89028 + .static_pass_number = 0,
89029 + .tv_id = TV_NONE,
89030 + .properties_required = 0,
89031 + .properties_provided = 0,
89032 + .properties_destroyed = 0,
89033 + .todo_flags_start = 0,
89034 + .todo_flags_finish = TODO_dump_func
89035 + }
89036 +};
89037 +
89038 +static bool gate_stackleak_track_stack(void)
89039 +{
89040 + return track_frame_size >= 0;
89041 +}
89042 +
89043 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
89044 +{
89045 + gimple check_alloca;
89046 + tree alloca_size;
89047 +
89048 + // insert call to void pax_check_alloca(unsigned long size)
89049 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
89050 + check_alloca = gimple_build_call(pax_check_alloca_decl, 1, alloca_size);
89051 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
89052 +}
89053 +
89054 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
89055 +{
89056 + gimple track_stack;
89057 +
89058 + // insert call to void pax_track_stack(void)
89059 + track_stack = gimple_build_call(pax_track_stack_decl, 0);
89060 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
89061 +}
89062 +
89063 +#if BUILDING_GCC_VERSION == 4005
89064 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
89065 +{
89066 + tree fndecl;
89067 +
89068 + if (!is_gimple_call(stmt))
89069 + return false;
89070 + fndecl = gimple_call_fndecl(stmt);
89071 + if (!fndecl)
89072 + return false;
89073 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
89074 + return false;
89075 +// print_node(stderr, "pax", fndecl, 4);
89076 + return DECL_FUNCTION_CODE(fndecl) == code;
89077 +}
89078 +#endif
89079 +
89080 +static bool is_alloca(gimple stmt)
89081 +{
89082 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
89083 + return true;
89084 +
89085 +#if BUILDING_GCC_VERSION >= 4007
89086 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
89087 + return true;
89088 +#endif
89089 +
89090 + return false;
89091 +}
89092 +
89093 +static unsigned int execute_stackleak_tree_instrument(void)
89094 +{
89095 + basic_block bb, entry_bb;
89096 + bool prologue_instrumented = false, is_leaf = true;
89097 +
89098 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
89099 +
89100 + // 1. loop through BBs and GIMPLE statements
89101 + FOR_EACH_BB(bb) {
89102 + gimple_stmt_iterator gsi;
89103 +
89104 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
89105 + gimple stmt;
89106 +
89107 + stmt = gsi_stmt(gsi);
89108 +
89109 + if (is_gimple_call(stmt))
89110 + is_leaf = false;
89111 +
89112 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
89113 + if (!is_alloca(stmt))
89114 + continue;
89115 +
89116 + // 2. insert stack overflow check before each __builtin_alloca call
89117 + stackleak_check_alloca(&gsi);
89118 +
89119 + // 3. insert track call after each __builtin_alloca call
89120 + stackleak_add_instrumentation(&gsi);
89121 + if (bb == entry_bb)
89122 + prologue_instrumented = true;
89123 + }
89124 + }
89125 +
89126 + // special case for some bad linux code: taking the address of static inline functions will materialize them
89127 + // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
89128 + // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
89129 + // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
89130 + if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
89131 + return 0;
89132 +
89133 + // 4. insert track call at the beginning
89134 + if (!prologue_instrumented) {
89135 + gimple_stmt_iterator gsi;
89136 +
89137 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
89138 + if (dom_info_available_p(CDI_DOMINATORS))
89139 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
89140 + gsi = gsi_start_bb(bb);
89141 + stackleak_add_instrumentation(&gsi);
89142 + }
89143 +
89144 + return 0;
89145 +}
89146 +
89147 +static unsigned int execute_stackleak_final(void)
89148 +{
89149 + rtx insn;
89150 +
89151 + if (cfun->calls_alloca)
89152 + return 0;
89153 +
89154 + // keep calls only if function frame is big enough
89155 + if (get_frame_size() >= track_frame_size)
89156 + return 0;
89157 +
89158 + // 1. find pax_track_stack calls
89159 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
89160 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
89161 + rtx body;
89162 +
89163 + if (!CALL_P(insn))
89164 + continue;
89165 + body = PATTERN(insn);
89166 + if (GET_CODE(body) != CALL)
89167 + continue;
89168 + body = XEXP(body, 0);
89169 + if (GET_CODE(body) != MEM)
89170 + continue;
89171 + body = XEXP(body, 0);
89172 + if (GET_CODE(body) != SYMBOL_REF)
89173 + continue;
89174 + if (strcmp(XSTR(body, 0), track_function))
89175 + continue;
89176 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
89177 + // 2. delete call
89178 + insn = delete_insn_and_edges(insn);
89179 +#if BUILDING_GCC_VERSION >= 4007
89180 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
89181 + insn = delete_insn_and_edges(insn);
89182 +#endif
89183 + }
89184 +
89185 +// print_simple_rtl(stderr, get_insns());
89186 +// print_rtl(stderr, get_insns());
89187 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
89188 +
89189 + return 0;
89190 +}
89191 +
89192 +static void stackleak_start_unit(void *gcc_data, void *user_dat)
89193 +{
89194 + tree fntype;
89195 +
89196 + // declare void pax_check_alloca(unsigned long size)
89197 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
89198 + pax_check_alloca_decl = build_fn_decl(check_function, fntype);
89199 + DECL_ASSEMBLER_NAME(pax_check_alloca_decl); // for LTO
89200 + TREE_PUBLIC(pax_check_alloca_decl) = 1;
89201 + DECL_EXTERNAL(pax_check_alloca_decl) = 1;
89202 + DECL_ARTIFICIAL(pax_check_alloca_decl) = 1;
89203 +
89204 + // declare void pax_track_stack(void)
89205 + fntype = build_function_type_list(void_type_node, NULL_TREE);
89206 + pax_track_stack_decl = build_fn_decl(track_function, fntype);
89207 + DECL_ASSEMBLER_NAME(pax_track_stack_decl); // for LTO
89208 + TREE_PUBLIC(pax_track_stack_decl) = 1;
89209 + DECL_EXTERNAL(pax_track_stack_decl) = 1;
89210 + DECL_ARTIFICIAL(pax_track_stack_decl) = 1;
89211 +}
89212 +
89213 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
89214 +{
89215 + const char * const plugin_name = plugin_info->base_name;
89216 + const int argc = plugin_info->argc;
89217 + const struct plugin_argument * const argv = plugin_info->argv;
89218 + int i;
89219 + struct register_pass_info stackleak_tree_instrument_pass_info = {
89220 + .pass = &stackleak_tree_instrument_pass.pass,
89221 +// .reference_pass_name = "tree_profile",
89222 + .reference_pass_name = "optimized",
89223 + .ref_pass_instance_number = 0,
89224 + .pos_op = PASS_POS_INSERT_BEFORE
89225 + };
89226 + struct register_pass_info stackleak_final_pass_info = {
89227 + .pass = &stackleak_final_rtl_opt_pass.pass,
89228 + .reference_pass_name = "final",
89229 + .ref_pass_instance_number = 0,
89230 + .pos_op = PASS_POS_INSERT_BEFORE
89231 + };
89232 +
89233 + if (!plugin_default_version_check(version, &gcc_version)) {
89234 + error(G_("incompatible gcc/plugin versions"));
89235 + return 1;
89236 + }
89237 +
89238 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
89239 +
89240 + for (i = 0; i < argc; ++i) {
89241 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
89242 + if (!argv[i].value) {
89243 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
89244 + continue;
89245 + }
89246 + track_frame_size = atoi(argv[i].value);
89247 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
89248 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
89249 + continue;
89250 + }
89251 + if (!strcmp(argv[i].key, "initialize-locals")) {
89252 + if (argv[i].value) {
89253 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
89254 + continue;
89255 + }
89256 + init_locals = true;
89257 + continue;
89258 + }
89259 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
89260 + }
89261 +
89262 + register_callback("start_unit", PLUGIN_START_UNIT, &stackleak_start_unit, NULL);
89263 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
89264 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
89265 +
89266 + return 0;
89267 +}
89268 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
89269 index 83b3dde..835bee7 100644
89270 --- a/usr/gen_init_cpio.c
89271 +++ b/usr/gen_init_cpio.c
89272 @@ -299,7 +299,7 @@ static int cpio_mkfile(const char *name, const char *location,
89273 int retval;
89274 int rc = -1;
89275 int namesize;
89276 - int i;
89277 + unsigned int i;
89278
89279 mode |= S_IFREG;
89280
89281 @@ -383,9 +383,10 @@ static char *cpio_replace_env(char *new_location)
89282 *env_var = *expanded = '\0';
89283 strncat(env_var, start + 2, end - start - 2);
89284 strncat(expanded, new_location, start - new_location);
89285 - strncat(expanded, getenv(env_var), PATH_MAX);
89286 - strncat(expanded, end + 1, PATH_MAX);
89287 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
89288 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
89289 strncpy(new_location, expanded, PATH_MAX);
89290 + new_location[PATH_MAX] = 0;
89291 } else
89292 break;
89293 }
89294 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
89295 index 4f3434f..fc63040 100644
89296 --- a/virt/kvm/kvm_main.c
89297 +++ b/virt/kvm/kvm_main.c
89298 @@ -43,6 +43,8 @@
89299 #include <linux/swap.h>
89300 #include <linux/bitops.h>
89301 #include <linux/spinlock.h>
89302 +#include <linux/namei.h>
89303 +#include <linux/fs.h>
89304
89305 #include <asm/processor.h>
89306 #include <asm/io.h>
89307 @@ -575,12 +577,73 @@ out:
89308 return r;
89309 }
89310
89311 +/*
89312 + * We want to test whether the caller has been granted permissions to
89313 + * use this device. To be able to configure and control the device,
89314 + * the user needs access to PCI configuration space and BAR resources.
89315 + * These are accessed through PCI sysfs. PCI config space is often
89316 + * passed to the process calling this ioctl via file descriptor, so we
89317 + * can't rely on access to that file. We can check for permissions
89318 + * on each of the BAR resource files, which is a pretty clear
89319 + * indicator that the user has been granted access to the device.
89320 + */
89321 +static int probe_sysfs_permissions(struct pci_dev *dev)
89322 +{
89323 +#ifdef CONFIG_SYSFS
89324 + int i;
89325 + bool bar_found = false;
89326 +
89327 + for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++) {
89328 + char *kpath, *syspath;
89329 + struct path path;
89330 + struct inode *inode;
89331 + int r;
89332 +
89333 + if (!pci_resource_len(dev, i))
89334 + continue;
89335 +
89336 + kpath = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
89337 + if (!kpath)
89338 + return -ENOMEM;
89339 +
89340 + /* Per sysfs-rules, sysfs is always at /sys */
89341 + syspath = kasprintf(GFP_KERNEL, "/sys%s/resource%d", kpath, i);
89342 + kfree(kpath);
89343 + if (!syspath)
89344 + return -ENOMEM;
89345 +
89346 + r = kern_path(syspath, LOOKUP_FOLLOW, &path);
89347 + kfree(syspath);
89348 + if (r)
89349 + return r;
89350 +
89351 + inode = path.dentry->d_inode;
89352 +
89353 + r = inode_permission(inode, MAY_READ | MAY_WRITE | MAY_ACCESS);
89354 + path_put(&path);
89355 + if (r)
89356 + return r;
89357 +
89358 + bar_found = true;
89359 + }
89360 +
89361 + /* If no resources, probably something special */
89362 + if (!bar_found)
89363 + return -EPERM;
89364 +
89365 + return 0;
89366 +#else
89367 + return -EINVAL; /* No way to control the device without sysfs */
89368 +#endif
89369 +}
89370 +
89371 static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
89372 struct kvm_assigned_pci_dev *assigned_dev)
89373 {
89374 int r = 0;
89375 struct kvm_assigned_dev_kernel *match;
89376 struct pci_dev *dev;
89377 + u8 header_type;
89378
89379 down_read(&kvm->slots_lock);
89380 mutex_lock(&kvm->lock);
89381 @@ -607,6 +670,18 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
89382 r = -EINVAL;
89383 goto out_free;
89384 }
89385 +
89386 + /* Don't allow bridges to be assigned */
89387 + pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
89388 + if ((header_type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) {
89389 + r = -EPERM;
89390 + goto out_put;
89391 + }
89392 +
89393 + r = probe_sysfs_permissions(dev);
89394 + if (r)
89395 + goto out_put;
89396 +
89397 if (pci_enable_device(dev)) {
89398 printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
89399 r = -EBUSY;
89400 @@ -2494,7 +2569,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void)
89401 if (kvm_rebooting)
89402 /* spin while reset goes on */
89403 while (true)
89404 - ;
89405 + cpu_relax();
89406 /* Fault while not rebooting. We want the trace. */
89407 BUG();
89408 }
89409 @@ -2714,7 +2789,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
89410 kvm_arch_vcpu_put(vcpu);
89411 }
89412
89413 -int kvm_init(void *opaque, unsigned int vcpu_size,
89414 +int kvm_init(const void *opaque, unsigned int vcpu_size,
89415 struct module *module)
89416 {
89417 int r;
89418 @@ -2767,15 +2842,17 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
89419 /* A kmem cache lets us meet the alignment requirements of fx_save. */
89420 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
89421 __alignof__(struct kvm_vcpu),
89422 - 0, NULL);
89423 + SLAB_USERCOPY, NULL);
89424 if (!kvm_vcpu_cache) {
89425 r = -ENOMEM;
89426 goto out_free_5;
89427 }
89428
89429 - kvm_chardev_ops.owner = module;
89430 - kvm_vm_fops.owner = module;
89431 - kvm_vcpu_fops.owner = module;
89432 + pax_open_kernel();
89433 + *(void **)&kvm_chardev_ops.owner = module;
89434 + *(void **)&kvm_vm_fops.owner = module;
89435 + *(void **)&kvm_vcpu_fops.owner = module;
89436 + pax_close_kernel();
89437
89438 r = misc_register(&kvm_dev);
89439 if (r) {